forked from cerc-io/plugeth
eth/fetcher: don't spend too much time on transaction inclusion (#25524)
* eth/fetcher: introduce some lag in tx fetching * eth/fetcher: change conditions a bit * eth/fetcher: use per-batch quota check * eth/fetcher: fix some comments * eth/fetcher: address review concerns * eth/fetcher: fix panic + add warn log * eth/fetcher: fix log * eth/fetcher: fix log * cmd/devp2p/internal/ethtest: fix ignorign tx announcements from prev. tests * cmd/devp2p/internal/ethtest: fix TestLargeTxRequest This increases the number of tx relay messages the test waits for. Since go-ethereum now processes incoming txs in smaller batches, the announcement messages it sends are also smaller. Co-authored-by: Felix Lange <fjl@twurst.com>
This commit is contained in:
parent
ac7ad811b4
commit
0ce494b60c
@ -357,9 +357,13 @@ func (s *Suite) waitAnnounce(conn *Conn, blockAnnouncement *NewBlock) error {
|
||||
return fmt.Errorf("wrong block hash in announcement: expected %v, got %v", blockAnnouncement.Block.Hash(), hashes[0].Hash)
|
||||
}
|
||||
return nil
|
||||
case *NewPooledTransactionHashes:
|
||||
|
||||
// ignore tx announcements from previous tests
|
||||
case *NewPooledTransactionHashes:
|
||||
continue
|
||||
case *Transactions:
|
||||
continue
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unexpected: %s", pretty.Sdump(msg))
|
||||
}
|
||||
|
@ -544,9 +544,13 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
|
||||
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket))
|
||||
}
|
||||
return
|
||||
|
||||
// ignore propagated txs from previous tests
|
||||
case *NewPooledTransactionHashes:
|
||||
continue
|
||||
case *Transactions:
|
||||
continue
|
||||
|
||||
// ignore block announcements from previous tests
|
||||
case *NewBlockHashes:
|
||||
continue
|
||||
|
@ -192,10 +192,10 @@ func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, txs []*types.Transaction
|
||||
nonce = txs[len(txs)-1].Nonce()
|
||||
|
||||
// Wait for the transaction announcement(s) and make sure all sent txs are being propagated.
|
||||
// all txs should be announced within 3 announcements.
|
||||
// all txs should be announced within a couple announcements.
|
||||
recvHashes := make([]common.Hash, 0)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := 0; i < 20; i++ {
|
||||
switch msg := recvConn.readAndServe(s.chain, timeout).(type) {
|
||||
case *Transactions:
|
||||
for _, tx := range *msg {
|
||||
|
@ -262,22 +262,40 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
|
||||
// direct request replies. The differentiation is important so the fetcher can
|
||||
// re-schedule missing transactions as soon as possible.
|
||||
func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) error {
|
||||
// Keep track of all the propagated transactions
|
||||
if direct {
|
||||
txReplyInMeter.Mark(int64(len(txs)))
|
||||
} else {
|
||||
txBroadcastInMeter.Mark(int64(len(txs)))
|
||||
var (
|
||||
inMeter = txReplyInMeter
|
||||
knownMeter = txReplyKnownMeter
|
||||
underpricedMeter = txReplyUnderpricedMeter
|
||||
otherRejectMeter = txReplyOtherRejectMeter
|
||||
)
|
||||
if !direct {
|
||||
inMeter = txBroadcastInMeter
|
||||
knownMeter = txBroadcastKnownMeter
|
||||
underpricedMeter = txBroadcastUnderpricedMeter
|
||||
otherRejectMeter = txBroadcastOtherRejectMeter
|
||||
}
|
||||
// Keep track of all the propagated transactions
|
||||
inMeter.Mark(int64(len(txs)))
|
||||
|
||||
// Push all the transactions into the pool, tracking underpriced ones to avoid
|
||||
// re-requesting them and dropping the peer in case of malicious transfers.
|
||||
var (
|
||||
added = make([]common.Hash, 0, len(txs))
|
||||
delay time.Duration
|
||||
)
|
||||
// proceed in batches
|
||||
for i := 0; i < len(txs); i += 128 {
|
||||
end := i + 128
|
||||
if end > len(txs) {
|
||||
end = len(txs)
|
||||
}
|
||||
var (
|
||||
duplicate int64
|
||||
underpriced int64
|
||||
otherreject int64
|
||||
)
|
||||
errs := f.addTxs(txs)
|
||||
for i, err := range errs {
|
||||
batch := txs[i:end]
|
||||
for j, err := range f.addTxs(batch) {
|
||||
// Track the transaction hash if the price is too low for us.
|
||||
// Avoid re-request this transaction when we receive another
|
||||
// announcement.
|
||||
@ -285,7 +303,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
|
||||
for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize {
|
||||
f.underpriced.Pop()
|
||||
}
|
||||
f.underpriced.Add(txs[i].Hash())
|
||||
f.underpriced.Add(batch[j].Hash())
|
||||
}
|
||||
// Track a few interesting failure types
|
||||
switch {
|
||||
@ -300,19 +318,23 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
|
||||
default:
|
||||
otherreject++
|
||||
}
|
||||
added = append(added, txs[i].Hash())
|
||||
added = append(added, batch[j].Hash())
|
||||
}
|
||||
knownMeter.Mark(duplicate)
|
||||
underpricedMeter.Mark(underpriced)
|
||||
otherRejectMeter.Mark(otherreject)
|
||||
|
||||
// If 'other reject' is >25% of the deliveries in any batch, abort. Either we are
|
||||
// out of sync with the chain or the peer is griefing us.
|
||||
if otherreject > 128/4 {
|
||||
delay = 200 * time.Millisecond
|
||||
log.Warn("Peer delivering useless transactions", "peer", peer, "ignored", len(txs)-end)
|
||||
break
|
||||
}
|
||||
if direct {
|
||||
txReplyKnownMeter.Mark(duplicate)
|
||||
txReplyUnderpricedMeter.Mark(underpriced)
|
||||
txReplyOtherRejectMeter.Mark(otherreject)
|
||||
} else {
|
||||
txBroadcastKnownMeter.Mark(duplicate)
|
||||
txBroadcastUnderpricedMeter.Mark(underpriced)
|
||||
txBroadcastOtherRejectMeter.Mark(otherreject)
|
||||
}
|
||||
select {
|
||||
case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}:
|
||||
time.Sleep(delay)
|
||||
return nil
|
||||
case <-f.quit:
|
||||
return errTerminated
|
||||
|
Loading…
Reference in New Issue
Block a user