forked from cerc-io/plugeth
core/types: support for optional blob sidecar in BlobTx (#27841)
This PR removes the newly added txpool.Transaction wrapper type, and instead adds a way of keeping the blob sidecar within types.Transaction. It's better this way because most code in go-ethereum does not care about blob transactions, and probably never will. This will start mattering especially on the client side of RPC, where all APIs are based on types.Transaction. Users need to be able to use the same signing flows they already have. However, since blobs are only allowed in some places but not others, we will now need to add checks to avoid creating invalid blocks. I'm still trying to figure out the best place to do some of these. The way I have it currently is as follows: - In block validation (import), txs are verified not to have a blob sidecar. - In miner, we strip off the sidecar when committing the transaction into the block. - In TxPool validation, txs must have a sidecar to be added into the blobpool. - Note there is a special case here: when transactions are re-added because of a chain reorg, we cannot use the transactions gathered from the old chain blocks as-is, because they will be missing their blobs. This was previously handled by storing the blobs into the 'blobpool limbo'. The code has now changed to store the full transaction in the limbo instead, but it might be confusing for code readers why we're not simply adding the types.Transaction we already have. Code changes summary: - txpool.Transaction removed and all uses replaced by types.Transaction again - blobpool now stores types.Transaction instead of defining its own blobTx format for storage - the blobpool limbo now stores types.Transaction instead of storing only the blobs - checks to validate the presence/absence of the blob sidecar added in certain critical places
This commit is contained in:
parent
68860063fb
commit
2a6beb6a39
@ -68,6 +68,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
|
|||||||
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
|
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
|
||||||
return fmt.Errorf("transaction root hash mismatch (header value %x, calculated %x)", header.TxHash, hash)
|
return fmt.Errorf("transaction root hash mismatch (header value %x, calculated %x)", header.TxHash, hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Withdrawals are present after the Shanghai fork.
|
// Withdrawals are present after the Shanghai fork.
|
||||||
if header.WithdrawalsHash != nil {
|
if header.WithdrawalsHash != nil {
|
||||||
// Withdrawals list must be present in body after Shanghai.
|
// Withdrawals list must be present in body after Shanghai.
|
||||||
@ -81,14 +82,23 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
|
|||||||
// Withdrawals are not allowed prior to Shanghai fork
|
// Withdrawals are not allowed prior to Shanghai fork
|
||||||
return errors.New("withdrawals present in block body")
|
return errors.New("withdrawals present in block body")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Blob transactions may be present after the Cancun fork.
|
// Blob transactions may be present after the Cancun fork.
|
||||||
var blobs int
|
var blobs int
|
||||||
for _, tx := range block.Transactions() {
|
for i, tx := range block.Transactions() {
|
||||||
// Count the number of blobs to validate against the header's blobGasUsed
|
// Count the number of blobs to validate against the header's blobGasUsed
|
||||||
blobs += len(tx.BlobHashes())
|
blobs += len(tx.BlobHashes())
|
||||||
|
|
||||||
|
// If the tx is a blob tx, it must NOT have a sidecar attached to be valid in a block.
|
||||||
|
if tx.BlobTxSidecar() != nil {
|
||||||
|
return fmt.Errorf("unexpected blob sidecar in transaction at index %d", i)
|
||||||
|
}
|
||||||
|
|
||||||
// The individual checks for blob validity (version-check + not empty)
|
// The individual checks for blob validity (version-check + not empty)
|
||||||
// happens in the state_transition check.
|
// happens in StateTransition.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check blob gas usage.
|
||||||
if header.BlobGasUsed != nil {
|
if header.BlobGasUsed != nil {
|
||||||
if want := *header.BlobGasUsed / params.BlobTxBlobGasPerBlob; uint64(blobs) != want { // div because the header is surely good vs the body might be bloated
|
if want := *header.BlobGasUsed / params.BlobTxBlobGasPerBlob; uint64(blobs) != want { // div because the header is surely good vs the body might be bloated
|
||||||
return fmt.Errorf("blob gas used mismatch (header %v, calculated %v)", *header.BlobGasUsed, blobs*params.BlobTxBlobGasPerBlob)
|
return fmt.Errorf("blob gas used mismatch (header %v, calculated %v)", *header.BlobGasUsed, blobs*params.BlobTxBlobGasPerBlob)
|
||||||
@ -98,6 +108,8 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
|
|||||||
return errors.New("data blobs present in block body")
|
return errors.New("data blobs present in block body")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ancestor block must be known.
|
||||||
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
|
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
|
||||||
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
|
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
|
||||||
return consensus.ErrUnknownAncestor
|
return consensus.ErrUnknownAncestor
|
||||||
|
@ -1085,19 +1085,30 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
ancientReceipts, liveReceipts []types.Receipts
|
ancientReceipts, liveReceipts []types.Receipts
|
||||||
)
|
)
|
||||||
// Do a sanity check that the provided chain is actually ordered and linked
|
// Do a sanity check that the provided chain is actually ordered and linked
|
||||||
for i := 0; i < len(blockChain); i++ {
|
for i, block := range blockChain {
|
||||||
if i != 0 {
|
if i != 0 {
|
||||||
if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
|
prev := blockChain[i-1]
|
||||||
log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
|
if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
|
||||||
"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
|
log.Error("Non contiguous receipt insert",
|
||||||
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, blockChain[i-1].NumberU64(),
|
"number", block.Number(), "hash", block.Hash(), "parent", block.ParentHash(),
|
||||||
blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
|
"prevnumber", prev.Number(), "prevhash", prev.Hash())
|
||||||
|
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])",
|
||||||
|
i-1, prev.NumberU64(), prev.Hash().Bytes()[:4],
|
||||||
|
i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if blockChain[i].NumberU64() <= ancientLimit {
|
if block.NumberU64() <= ancientLimit {
|
||||||
ancientBlocks, ancientReceipts = append(ancientBlocks, blockChain[i]), append(ancientReceipts, receiptChain[i])
|
ancientBlocks, ancientReceipts = append(ancientBlocks, block), append(ancientReceipts, receiptChain[i])
|
||||||
} else {
|
} else {
|
||||||
liveBlocks, liveReceipts = append(liveBlocks, blockChain[i]), append(liveReceipts, receiptChain[i])
|
liveBlocks, liveReceipts = append(liveBlocks, block), append(liveReceipts, receiptChain[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Here we also validate that blob transactions in the block do not contain a sidecar.
|
||||||
|
// While the sidecar does not affect the block hash / tx hash, sending blobs within a block is not allowed.
|
||||||
|
for txIndex, tx := range block.Transactions() {
|
||||||
|
if tx.Type() == types.BlobTxType && tx.BlobTxSidecar() != nil {
|
||||||
|
return 0, fmt.Errorf("block #%d contains unexpected blob sidecar in tx at index %d", block.NumberU64(), txIndex)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,6 +19,7 @@ package blobpool
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"container/heap"
|
"container/heap"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
@ -35,7 +36,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/txpool"
|
"github.com/ethereum/go-ethereum/core/txpool"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
@ -83,16 +83,6 @@ const (
|
|||||||
limboedTransactionStore = "limbo"
|
limboedTransactionStore = "limbo"
|
||||||
)
|
)
|
||||||
|
|
||||||
// blobTx is a wrapper around types.BlobTx which also contains the literal blob
|
|
||||||
// data along with all the transaction metadata.
|
|
||||||
type blobTx struct {
|
|
||||||
Tx *types.Transaction
|
|
||||||
|
|
||||||
Blobs []kzg4844.Blob
|
|
||||||
Commits []kzg4844.Commitment
|
|
||||||
Proofs []kzg4844.Proof
|
|
||||||
}
|
|
||||||
|
|
||||||
// blobTxMeta is the minimal subset of types.BlobTx necessary to validate and
|
// blobTxMeta is the minimal subset of types.BlobTx necessary to validate and
|
||||||
// schedule the blob transactions into the following blocks. Only ever add the
|
// schedule the blob transactions into the following blocks. Only ever add the
|
||||||
// bare minimum needed fields to keep the size down (and thus number of entries
|
// bare minimum needed fields to keep the size down (and thus number of entries
|
||||||
@ -455,22 +445,27 @@ func (p *BlobPool) Close() error {
|
|||||||
// parseTransaction is a callback method on pool creation that gets called for
|
// parseTransaction is a callback method on pool creation that gets called for
|
||||||
// each transaction on disk to create the in-memory metadata index.
|
// each transaction on disk to create the in-memory metadata index.
|
||||||
func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
|
func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
|
||||||
item := new(blobTx)
|
tx := new(types.Transaction)
|
||||||
if err := rlp.DecodeBytes(blob, item); err != nil {
|
if err := rlp.DecodeBytes(blob, tx); err != nil {
|
||||||
// This path is impossible unless the disk data representation changes
|
// This path is impossible unless the disk data representation changes
|
||||||
// across restarts. For that ever unprobable case, recover gracefully
|
// across restarts. For that ever unprobable case, recover gracefully
|
||||||
// by ignoring this data entry.
|
// by ignoring this data entry.
|
||||||
log.Error("Failed to decode blob pool entry", "id", id, "err", err)
|
log.Error("Failed to decode blob pool entry", "id", id, "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
meta := newBlobTxMeta(id, size, item.Tx)
|
if tx.BlobTxSidecar() == nil {
|
||||||
|
log.Error("Missing sidecar in blob pool entry", "id", id, "hash", tx.Hash())
|
||||||
|
return errors.New("missing blob sidecar")
|
||||||
|
}
|
||||||
|
|
||||||
sender, err := p.signer.Sender(item.Tx)
|
meta := newBlobTxMeta(id, size, tx)
|
||||||
|
|
||||||
|
sender, err := p.signer.Sender(tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// This path is impossible unless the signature validity changes across
|
// This path is impossible unless the signature validity changes across
|
||||||
// restarts. For that ever unprobable case, recover gracefully by ignoring
|
// restarts. For that ever unprobable case, recover gracefully by ignoring
|
||||||
// this data entry.
|
// this data entry.
|
||||||
log.Error("Failed to recover blob tx sender", "id", id, "hash", item.Tx.Hash(), "err", err)
|
log.Error("Failed to recover blob tx sender", "id", id, "hash", tx.Hash(), "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, ok := p.index[sender]; !ok {
|
if _, ok := p.index[sender]; !ok {
|
||||||
@ -718,17 +713,17 @@ func (p *BlobPool) offload(addr common.Address, nonce uint64, id uint64, inclusi
|
|||||||
log.Error("Blobs missing for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err)
|
log.Error("Blobs missing for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
item := new(blobTx)
|
var tx types.Transaction
|
||||||
if err = rlp.DecodeBytes(data, item); err != nil {
|
if err = rlp.DecodeBytes(data, tx); err != nil {
|
||||||
log.Error("Blobs corrupted for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err)
|
log.Error("Blobs corrupted for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
block, ok := inclusions[item.Tx.Hash()]
|
block, ok := inclusions[tx.Hash()]
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Warn("Blob transaction swapped out by signer", "from", addr, "nonce", nonce, "id", id)
|
log.Warn("Blob transaction swapped out by signer", "from", addr, "nonce", nonce, "id", id)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := p.limbo.push(item.Tx.Hash(), block, item.Blobs, item.Commits, item.Proofs); err != nil {
|
if err := p.limbo.push(&tx, block); err != nil {
|
||||||
log.Warn("Failed to offload blob tx into limbo", "err", err)
|
log.Warn("Failed to offload blob tx into limbo", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -760,7 +755,7 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
|
|||||||
for addr, txs := range reinject {
|
for addr, txs := range reinject {
|
||||||
// Blindly push all the lost transactions back into the pool
|
// Blindly push all the lost transactions back into the pool
|
||||||
for _, tx := range txs {
|
for _, tx := range txs {
|
||||||
p.reinject(addr, tx)
|
p.reinject(addr, tx.Hash())
|
||||||
}
|
}
|
||||||
// Recheck the account's pooled transactions to drop included and
|
// Recheck the account's pooled transactions to drop included and
|
||||||
// invalidated one
|
// invalidated one
|
||||||
@ -920,16 +915,19 @@ func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address][]*
|
|||||||
// Note, the method will not initialize the eviction cache values as those will
|
// Note, the method will not initialize the eviction cache values as those will
|
||||||
// be done once for all transactions belonging to an account after all individual
|
// be done once for all transactions belonging to an account after all individual
|
||||||
// transactions are injected back into the pool.
|
// transactions are injected back into the pool.
|
||||||
func (p *BlobPool) reinject(addr common.Address, tx *types.Transaction) {
|
func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) {
|
||||||
// Retrieve the associated blob from the limbo. Without the blobs, we cannot
|
// Retrieve the associated blob from the limbo. Without the blobs, we cannot
|
||||||
// add the transaction back into the pool as it is not mineable.
|
// add the transaction back into the pool as it is not mineable.
|
||||||
blobs, commits, proofs, err := p.limbo.pull(tx.Hash())
|
tx, err := p.limbo.pull(txhash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Blobs unavailable, dropping reorged tx", "err", err)
|
log.Error("Blobs unavailable, dropping reorged tx", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Serialize the transaction back into the primary datastore
|
// TODO: seems like an easy optimization here would be getting the serialized tx
|
||||||
blob, err := rlp.EncodeToBytes(&blobTx{Tx: tx, Blobs: blobs, Commits: commits, Proofs: proofs})
|
// from limbo instead of re-serializing it here.
|
||||||
|
|
||||||
|
// Serialize the transaction back into the primary datastore.
|
||||||
|
blob, err := rlp.EncodeToBytes(tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
|
log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
|
||||||
return
|
return
|
||||||
@ -939,9 +937,9 @@ func (p *BlobPool) reinject(addr common.Address, tx *types.Transaction) {
|
|||||||
log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err)
|
log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the indixes and metrics
|
// Update the indixes and metrics
|
||||||
meta := newBlobTxMeta(id, p.store.Size(id), tx)
|
meta := newBlobTxMeta(id, p.store.Size(id), tx)
|
||||||
|
|
||||||
if _, ok := p.index[addr]; !ok {
|
if _, ok := p.index[addr]; !ok {
|
||||||
if err := p.reserve(addr, true); err != nil {
|
if err := p.reserve(addr, true); err != nil {
|
||||||
log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err)
|
log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err)
|
||||||
@ -1023,7 +1021,7 @@ func (p *BlobPool) SetGasTip(tip *big.Int) {
|
|||||||
|
|
||||||
// validateTx checks whether a transaction is valid according to the consensus
|
// validateTx checks whether a transaction is valid according to the consensus
|
||||||
// rules and adheres to some heuristic limits of the local node (price and size).
|
// rules and adheres to some heuristic limits of the local node (price and size).
|
||||||
func (p *BlobPool) validateTx(tx *types.Transaction, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) error {
|
func (p *BlobPool) validateTx(tx *types.Transaction) error {
|
||||||
// Ensure the transaction adheres to basic pool filters (type, size, tip) and
|
// Ensure the transaction adheres to basic pool filters (type, size, tip) and
|
||||||
// consensus rules
|
// consensus rules
|
||||||
baseOpts := &txpool.ValidationOptions{
|
baseOpts := &txpool.ValidationOptions{
|
||||||
@ -1032,7 +1030,7 @@ func (p *BlobPool) validateTx(tx *types.Transaction, blobs []kzg4844.Blob, commi
|
|||||||
MaxSize: txMaxSize,
|
MaxSize: txMaxSize,
|
||||||
MinTip: p.gasTip.ToBig(),
|
MinTip: p.gasTip.ToBig(),
|
||||||
}
|
}
|
||||||
if err := txpool.ValidateTransaction(tx, blobs, commits, proofs, p.head, p.signer, baseOpts); err != nil {
|
if err := txpool.ValidateTransaction(tx, p.head, p.signer, baseOpts); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Ensure the transaction adheres to the stateful pool filters (nonce, balance)
|
// Ensure the transaction adheres to the stateful pool filters (nonce, balance)
|
||||||
@ -1117,7 +1115,7 @@ func (p *BlobPool) Has(hash common.Hash) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a transaction if it is contained in the pool, or nil otherwise.
|
// Get returns a transaction if it is contained in the pool, or nil otherwise.
|
||||||
func (p *BlobPool) Get(hash common.Hash) *txpool.Transaction {
|
func (p *BlobPool) Get(hash common.Hash) *types.Transaction {
|
||||||
// Track the amount of time waiting to retrieve a fully resolved blob tx from
|
// Track the amount of time waiting to retrieve a fully resolved blob tx from
|
||||||
// the pool and the amount of time actually spent on pulling the data from disk.
|
// the pool and the amount of time actually spent on pulling the data from disk.
|
||||||
getStart := time.Now()
|
getStart := time.Now()
|
||||||
@ -1139,32 +1137,27 @@ func (p *BlobPool) Get(hash common.Hash) *txpool.Transaction {
|
|||||||
log.Error("Tracked blob transaction missing from store", "hash", hash, "id", id, "err", err)
|
log.Error("Tracked blob transaction missing from store", "hash", hash, "id", id, "err", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
item := new(blobTx)
|
item := new(types.Transaction)
|
||||||
if err = rlp.DecodeBytes(data, item); err != nil {
|
if err = rlp.DecodeBytes(data, item); err != nil {
|
||||||
log.Error("Blobs corrupted for traced transaction", "hash", hash, "id", id, "err", err)
|
log.Error("Blobs corrupted for traced transaction", "hash", hash, "id", id, "err", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return &txpool.Transaction{
|
return item
|
||||||
Tx: item.Tx,
|
|
||||||
BlobTxBlobs: item.Blobs,
|
|
||||||
BlobTxCommits: item.Commits,
|
|
||||||
BlobTxProofs: item.Proofs,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add inserts a set of blob transactions into the pool if they pass validation (both
|
// Add inserts a set of blob transactions into the pool if they pass validation (both
|
||||||
// consensus validity and pool restictions).
|
// consensus validity and pool restictions).
|
||||||
func (p *BlobPool) Add(txs []*txpool.Transaction, local bool, sync bool) []error {
|
func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
|
||||||
errs := make([]error, len(txs))
|
errs := make([]error, len(txs))
|
||||||
for i, tx := range txs {
|
for i, tx := range txs {
|
||||||
errs[i] = p.add(tx.Tx, tx.BlobTxBlobs, tx.BlobTxCommits, tx.BlobTxProofs)
|
errs[i] = p.add(tx)
|
||||||
}
|
}
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add inserts a new blob transaction into the pool if it passes validation (both
|
// Add inserts a new blob transaction into the pool if it passes validation (both
|
||||||
// consensus validity and pool restictions).
|
// consensus validity and pool restictions).
|
||||||
func (p *BlobPool) add(tx *types.Transaction, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) (err error) {
|
func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
||||||
// The blob pool blocks on adding a transaction. This is because blob txs are
|
// The blob pool blocks on adding a transaction. This is because blob txs are
|
||||||
// only even pulled form the network, so this method will act as the overload
|
// only even pulled form the network, so this method will act as the overload
|
||||||
// protection for fetches.
|
// protection for fetches.
|
||||||
@ -1178,7 +1171,7 @@ func (p *BlobPool) add(tx *types.Transaction, blobs []kzg4844.Blob, commits []kz
|
|||||||
}(time.Now())
|
}(time.Now())
|
||||||
|
|
||||||
// Ensure the transaction is valid from all perspectives
|
// Ensure the transaction is valid from all perspectives
|
||||||
if err := p.validateTx(tx, blobs, commits, proofs); err != nil {
|
if err := p.validateTx(tx); err != nil {
|
||||||
log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err)
|
log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1203,7 +1196,7 @@ func (p *BlobPool) add(tx *types.Transaction, blobs []kzg4844.Blob, commits []kz
|
|||||||
}
|
}
|
||||||
// Transaction permitted into the pool from a nonce and cost perspective,
|
// Transaction permitted into the pool from a nonce and cost perspective,
|
||||||
// insert it into the database and update the indices
|
// insert it into the database and update the indices
|
||||||
blob, err := rlp.EncodeToBytes(&blobTx{Tx: tx, Blobs: blobs, Commits: commits, Proofs: proofs})
|
blob, err := rlp.EncodeToBytes(tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
|
log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
|
||||||
return err
|
return err
|
||||||
|
@ -193,8 +193,8 @@ func makeAddressReserver() txpool.AddressReserver {
|
|||||||
// with a valid key, only setting the interesting fields from the perspective of
|
// with a valid key, only setting the interesting fields from the perspective of
|
||||||
// the blob pool.
|
// the blob pool.
|
||||||
func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, key *ecdsa.PrivateKey) *types.Transaction {
|
func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, key *ecdsa.PrivateKey) *types.Transaction {
|
||||||
tx, _ := types.SignNewTx(key, types.LatestSigner(testChainConfig), makeUnsignedTx(nonce, gasTipCap, gasFeeCap, blobFeeCap))
|
blobtx := makeUnsignedTx(nonce, gasTipCap, gasFeeCap, blobFeeCap)
|
||||||
return tx
|
return types.MustSignNewTx(key, types.LatestSigner(testChainConfig), blobtx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeUnsignedTx is a utility method to construct a random blob tranasaction
|
// makeUnsignedTx is a utility method to construct a random blob tranasaction
|
||||||
@ -209,6 +209,11 @@ func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap
|
|||||||
BlobFeeCap: uint256.NewInt(blobFeeCap),
|
BlobFeeCap: uint256.NewInt(blobFeeCap),
|
||||||
BlobHashes: []common.Hash{emptyBlobVHash},
|
BlobHashes: []common.Hash{emptyBlobVHash},
|
||||||
Value: uint256.NewInt(100),
|
Value: uint256.NewInt(100),
|
||||||
|
Sidecar: &types.BlobTxSidecar{
|
||||||
|
Blobs: []kzg4844.Blob{emptyBlob},
|
||||||
|
Commitments: []kzg4844.Commitment{emptyBlobCommit},
|
||||||
|
Proofs: []kzg4844.Proof{emptyBlobProof},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -341,7 +346,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
R: new(uint256.Int),
|
R: new(uint256.Int),
|
||||||
S: new(uint256.Int),
|
S: new(uint256.Int),
|
||||||
})
|
})
|
||||||
blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
|
blob, _ := rlp.EncodeToBytes(tx)
|
||||||
badsig, _ := store.Put(blob)
|
badsig, _ := store.Put(blob)
|
||||||
|
|
||||||
// Insert a sequence of transactions with a nonce gap in between to verify
|
// Insert a sequence of transactions with a nonce gap in between to verify
|
||||||
@ -354,7 +359,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
)
|
)
|
||||||
for _, nonce := range []uint64{0, 1, 3, 4, 6, 7} { // first gap at #2, another at #5
|
for _, nonce := range []uint64{0, 1, 3, 4, 6, 7} { // first gap at #2, another at #5
|
||||||
tx := makeTx(nonce, 1, 1, 1, gapper)
|
tx := makeTx(nonce, 1, 1, 1, gapper)
|
||||||
blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
|
blob, _ := rlp.EncodeToBytes(tx)
|
||||||
|
|
||||||
id, _ := store.Put(blob)
|
id, _ := store.Put(blob)
|
||||||
if nonce < 2 {
|
if nonce < 2 {
|
||||||
@ -371,7 +376,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
)
|
)
|
||||||
for _, nonce := range []uint64{1, 2, 3} { // first gap at #0, all set dangling
|
for _, nonce := range []uint64{1, 2, 3} { // first gap at #0, all set dangling
|
||||||
tx := makeTx(nonce, 1, 1, 1, dangler)
|
tx := makeTx(nonce, 1, 1, 1, dangler)
|
||||||
blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
|
blob, _ := rlp.EncodeToBytes(tx)
|
||||||
|
|
||||||
id, _ := store.Put(blob)
|
id, _ := store.Put(blob)
|
||||||
dangling[id] = struct{}{}
|
dangling[id] = struct{}{}
|
||||||
@ -384,7 +389,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
)
|
)
|
||||||
for _, nonce := range []uint64{0, 1, 2} { // account nonce at 3, all set filled
|
for _, nonce := range []uint64{0, 1, 2} { // account nonce at 3, all set filled
|
||||||
tx := makeTx(nonce, 1, 1, 1, filler)
|
tx := makeTx(nonce, 1, 1, 1, filler)
|
||||||
blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
|
blob, _ := rlp.EncodeToBytes(tx)
|
||||||
|
|
||||||
id, _ := store.Put(blob)
|
id, _ := store.Put(blob)
|
||||||
filled[id] = struct{}{}
|
filled[id] = struct{}{}
|
||||||
@ -397,7 +402,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
)
|
)
|
||||||
for _, nonce := range []uint64{0, 1, 2, 3} { // account nonce at 2, half filled
|
for _, nonce := range []uint64{0, 1, 2, 3} { // account nonce at 2, half filled
|
||||||
tx := makeTx(nonce, 1, 1, 1, overlapper)
|
tx := makeTx(nonce, 1, 1, 1, overlapper)
|
||||||
blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
|
blob, _ := rlp.EncodeToBytes(tx)
|
||||||
|
|
||||||
id, _ := store.Put(blob)
|
id, _ := store.Put(blob)
|
||||||
if nonce >= 2 {
|
if nonce >= 2 {
|
||||||
@ -419,7 +424,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
tx = makeTx(uint64(i), 1, 1, 1, underpayer)
|
tx = makeTx(uint64(i), 1, 1, 1, underpayer)
|
||||||
}
|
}
|
||||||
blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
|
blob, _ := rlp.EncodeToBytes(tx)
|
||||||
|
|
||||||
id, _ := store.Put(blob)
|
id, _ := store.Put(blob)
|
||||||
underpaid[id] = struct{}{}
|
underpaid[id] = struct{}{}
|
||||||
@ -438,7 +443,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
tx = makeTx(uint64(i), 1, 1, 1, outpricer)
|
tx = makeTx(uint64(i), 1, 1, 1, outpricer)
|
||||||
}
|
}
|
||||||
blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
|
blob, _ := rlp.EncodeToBytes(tx)
|
||||||
|
|
||||||
id, _ := store.Put(blob)
|
id, _ := store.Put(blob)
|
||||||
if i < 2 {
|
if i < 2 {
|
||||||
@ -460,7 +465,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
tx = makeTx(nonce, 1, 1, 1, exceeder)
|
tx = makeTx(nonce, 1, 1, 1, exceeder)
|
||||||
}
|
}
|
||||||
blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
|
blob, _ := rlp.EncodeToBytes(tx)
|
||||||
|
|
||||||
id, _ := store.Put(blob)
|
id, _ := store.Put(blob)
|
||||||
exceeded[id] = struct{}{}
|
exceeded[id] = struct{}{}
|
||||||
@ -478,7 +483,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
tx = makeTx(nonce, 1, 1, 1, overdrafter)
|
tx = makeTx(nonce, 1, 1, 1, overdrafter)
|
||||||
}
|
}
|
||||||
blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
|
blob, _ := rlp.EncodeToBytes(tx)
|
||||||
|
|
||||||
id, _ := store.Put(blob)
|
id, _ := store.Put(blob)
|
||||||
if nonce < 1 {
|
if nonce < 1 {
|
||||||
@ -494,7 +499,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
overcapped = make(map[uint64]struct{})
|
overcapped = make(map[uint64]struct{})
|
||||||
)
|
)
|
||||||
for nonce := uint64(0); nonce < maxTxsPerAccount+3; nonce++ {
|
for nonce := uint64(0); nonce < maxTxsPerAccount+3; nonce++ {
|
||||||
blob, _ := rlp.EncodeToBytes(&blobTx{Tx: makeTx(nonce, 1, 1, 1, overcapper)})
|
blob, _ := rlp.EncodeToBytes(makeTx(nonce, 1, 1, 1, overcapper))
|
||||||
|
|
||||||
id, _ := store.Put(blob)
|
id, _ := store.Put(blob)
|
||||||
if nonce < maxTxsPerAccount {
|
if nonce < maxTxsPerAccount {
|
||||||
@ -625,7 +630,7 @@ func TestOpenIndex(t *testing.T) {
|
|||||||
)
|
)
|
||||||
for _, i := range []int{5, 3, 4, 2, 0, 1} { // Randomize the tx insertion order to force sorting on load
|
for _, i := range []int{5, 3, 4, 2, 0, 1} { // Randomize the tx insertion order to force sorting on load
|
||||||
tx := makeTx(uint64(i), txExecTipCaps[i], txExecFeeCaps[i], txBlobFeeCaps[i], key)
|
tx := makeTx(uint64(i), txExecTipCaps[i], txExecFeeCaps[i], txBlobFeeCaps[i], key)
|
||||||
blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
|
blob, _ := rlp.EncodeToBytes(tx)
|
||||||
store.Put(blob)
|
store.Put(blob)
|
||||||
}
|
}
|
||||||
store.Close()
|
store.Close()
|
||||||
@ -718,9 +723,9 @@ func TestOpenHeap(t *testing.T) {
|
|||||||
tx2 = makeTx(0, 1, 800, 70, key2)
|
tx2 = makeTx(0, 1, 800, 70, key2)
|
||||||
tx3 = makeTx(0, 1, 1500, 110, key3)
|
tx3 = makeTx(0, 1, 1500, 110, key3)
|
||||||
|
|
||||||
blob1, _ = rlp.EncodeToBytes(&blobTx{Tx: tx1})
|
blob1, _ = rlp.EncodeToBytes(tx1)
|
||||||
blob2, _ = rlp.EncodeToBytes(&blobTx{Tx: tx2})
|
blob2, _ = rlp.EncodeToBytes(tx2)
|
||||||
blob3, _ = rlp.EncodeToBytes(&blobTx{Tx: tx3})
|
blob3, _ = rlp.EncodeToBytes(tx3)
|
||||||
|
|
||||||
heapOrder = []common.Address{addr2, addr1, addr3}
|
heapOrder = []common.Address{addr2, addr1, addr3}
|
||||||
heapIndex = map[common.Address]int{addr2: 0, addr1: 1, addr3: 2}
|
heapIndex = map[common.Address]int{addr2: 0, addr1: 1, addr3: 2}
|
||||||
@ -794,9 +799,9 @@ func TestOpenCap(t *testing.T) {
|
|||||||
tx2 = makeTx(0, 1, 800, 70, key2)
|
tx2 = makeTx(0, 1, 800, 70, key2)
|
||||||
tx3 = makeTx(0, 1, 1500, 110, key3)
|
tx3 = makeTx(0, 1, 1500, 110, key3)
|
||||||
|
|
||||||
blob1, _ = rlp.EncodeToBytes(&blobTx{Tx: tx1, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}})
|
blob1, _ = rlp.EncodeToBytes(tx1)
|
||||||
blob2, _ = rlp.EncodeToBytes(&blobTx{Tx: tx2, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}})
|
blob2, _ = rlp.EncodeToBytes(tx2)
|
||||||
blob3, _ = rlp.EncodeToBytes(&blobTx{Tx: tx3, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}})
|
blob3, _ = rlp.EncodeToBytes(tx3)
|
||||||
|
|
||||||
keep = []common.Address{addr1, addr3}
|
keep = []common.Address{addr1, addr3}
|
||||||
drop = []common.Address{addr2}
|
drop = []common.Address{addr2}
|
||||||
@ -1210,10 +1215,8 @@ func TestAdd(t *testing.T) {
|
|||||||
|
|
||||||
// Sign the seed transactions and store them in the data store
|
// Sign the seed transactions and store them in the data store
|
||||||
for _, tx := range seed.txs {
|
for _, tx := range seed.txs {
|
||||||
var (
|
signed := types.MustSignNewTx(keys[acc], types.LatestSigner(testChainConfig), tx)
|
||||||
signed, _ = types.SignNewTx(keys[acc], types.LatestSigner(testChainConfig), tx)
|
blob, _ := rlp.EncodeToBytes(signed)
|
||||||
blob, _ = rlp.EncodeToBytes(&blobTx{Tx: signed, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}})
|
|
||||||
)
|
|
||||||
store.Put(blob)
|
store.Put(blob)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1236,7 +1239,7 @@ func TestAdd(t *testing.T) {
|
|||||||
// Add each transaction one by one, verifying the pool internals in between
|
// Add each transaction one by one, verifying the pool internals in between
|
||||||
for j, add := range tt.adds {
|
for j, add := range tt.adds {
|
||||||
signed, _ := types.SignNewTx(keys[add.from], types.LatestSigner(testChainConfig), add.tx)
|
signed, _ := types.SignNewTx(keys[add.from], types.LatestSigner(testChainConfig), add.tx)
|
||||||
if err := pool.add(signed, []kzg4844.Blob{emptyBlob}, []kzg4844.Commitment{emptyBlobCommit}, []kzg4844.Proof{emptyBlobProof}); !errors.Is(err, add.err) {
|
if err := pool.add(signed); !errors.Is(err, add.err) {
|
||||||
t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, err, add.err)
|
t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, err, add.err)
|
||||||
}
|
}
|
||||||
verifyPoolInternals(t, pool)
|
verifyPoolInternals(t, pool)
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/holiman/billy"
|
"github.com/holiman/billy"
|
||||||
@ -31,12 +30,9 @@ import (
|
|||||||
// to which it belongs as well as the block number in which it was included for
|
// to which it belongs as well as the block number in which it was included for
|
||||||
// finality eviction.
|
// finality eviction.
|
||||||
type limboBlob struct {
|
type limboBlob struct {
|
||||||
Owner common.Hash // Owner transaction's hash to support resurrecting reorged txs
|
TxHash common.Hash // Owner transaction's hash to support resurrecting reorged txs
|
||||||
Block uint64 // Block in which the blob transaction was included
|
Block uint64 // Block in which the blob transaction was included
|
||||||
|
Tx *types.Transaction
|
||||||
Blobs []kzg4844.Blob // The opaque blobs originally part of the transaction
|
|
||||||
Commits []kzg4844.Commitment // The commitments for the original blobs
|
|
||||||
Proofs []kzg4844.Proof // The proofs verifying the commitments
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// limbo is a light, indexed database to temporarily store recently included
|
// limbo is a light, indexed database to temporarily store recently included
|
||||||
@ -98,19 +94,19 @@ func (l *limbo) parseBlob(id uint64, data []byte) error {
|
|||||||
log.Error("Failed to decode blob limbo entry", "id", id, "err", err)
|
log.Error("Failed to decode blob limbo entry", "id", id, "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, ok := l.index[item.Owner]; ok {
|
if _, ok := l.index[item.TxHash]; ok {
|
||||||
// This path is impossible, unless due to a programming error a blob gets
|
// This path is impossible, unless due to a programming error a blob gets
|
||||||
// inserted into the limbo which was already part of if. Recover gracefully
|
// inserted into the limbo which was already part of if. Recover gracefully
|
||||||
// by ignoring this data entry.
|
// by ignoring this data entry.
|
||||||
log.Error("Dropping duplicate blob limbo entry", "owner", item.Owner, "id", id)
|
log.Error("Dropping duplicate blob limbo entry", "owner", item.TxHash, "id", id)
|
||||||
return errors.New("duplicate blob")
|
return errors.New("duplicate blob")
|
||||||
}
|
}
|
||||||
l.index[item.Owner] = id
|
l.index[item.TxHash] = id
|
||||||
|
|
||||||
if _, ok := l.groups[item.Block]; !ok {
|
if _, ok := l.groups[item.Block]; !ok {
|
||||||
l.groups[item.Block] = make(map[uint64]common.Hash)
|
l.groups[item.Block] = make(map[uint64]common.Hash)
|
||||||
}
|
}
|
||||||
l.groups[item.Block][id] = item.Owner
|
l.groups[item.Block][id] = item.TxHash
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -139,14 +135,14 @@ func (l *limbo) finalize(final *types.Header) {
|
|||||||
|
|
||||||
// push stores a new blob transaction into the limbo, waiting until finality for
|
// push stores a new blob transaction into the limbo, waiting until finality for
|
||||||
// it to be automatically evicted.
|
// it to be automatically evicted.
|
||||||
func (l *limbo) push(tx common.Hash, block uint64, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) error {
|
func (l *limbo) push(tx *types.Transaction, block uint64) error {
|
||||||
// If the blobs are already tracked by the limbo, consider it a programming
|
// If the blobs are already tracked by the limbo, consider it a programming
|
||||||
// error. There's not much to do against it, but be loud.
|
// error. There's not much to do against it, but be loud.
|
||||||
if _, ok := l.index[tx]; ok {
|
if _, ok := l.index[tx.Hash()]; ok {
|
||||||
log.Error("Limbo cannot push already tracked blobs", "tx", tx)
|
log.Error("Limbo cannot push already tracked blobs", "tx", tx)
|
||||||
return errors.New("already tracked blob transaction")
|
return errors.New("already tracked blob transaction")
|
||||||
}
|
}
|
||||||
if err := l.setAndIndex(tx, block, blobs, commits, proofs); err != nil {
|
if err := l.setAndIndex(tx, block); err != nil {
|
||||||
log.Error("Failed to set and index liboed blobs", "tx", tx, "err", err)
|
log.Error("Failed to set and index liboed blobs", "tx", tx, "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -156,21 +152,21 @@ func (l *limbo) push(tx common.Hash, block uint64, blobs []kzg4844.Blob, commits
|
|||||||
// pull retrieves a previously pushed set of blobs back from the limbo, removing
|
// pull retrieves a previously pushed set of blobs back from the limbo, removing
|
||||||
// it at the same time. This method should be used when a previously included blob
|
// it at the same time. This method should be used when a previously included blob
|
||||||
// transaction gets reorged out.
|
// transaction gets reorged out.
|
||||||
func (l *limbo) pull(tx common.Hash) ([]kzg4844.Blob, []kzg4844.Commitment, []kzg4844.Proof, error) {
|
func (l *limbo) pull(tx common.Hash) (*types.Transaction, error) {
|
||||||
// If the blobs are not tracked by the limbo, there's not much to do. This
|
// If the blobs are not tracked by the limbo, there's not much to do. This
|
||||||
// can happen for example if a blob transaction is mined without pushing it
|
// can happen for example if a blob transaction is mined without pushing it
|
||||||
// into the network first.
|
// into the network first.
|
||||||
id, ok := l.index[tx]
|
id, ok := l.index[tx]
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Trace("Limbo cannot pull non-tracked blobs", "tx", tx)
|
log.Trace("Limbo cannot pull non-tracked blobs", "tx", tx)
|
||||||
return nil, nil, nil, errors.New("unseen blob transaction")
|
return nil, errors.New("unseen blob transaction")
|
||||||
}
|
}
|
||||||
item, err := l.getAndDrop(id)
|
item, err := l.getAndDrop(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to get and drop limboed blobs", "tx", tx, "id", id, "err", err)
|
log.Error("Failed to get and drop limboed blobs", "tx", tx, "id", id, "err", err)
|
||||||
return nil, nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return item.Blobs, item.Commits, item.Proofs, nil
|
return item.Tx, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// update changes the block number under which a blob transaction is tracked. This
|
// update changes the block number under which a blob transaction is tracked. This
|
||||||
@ -180,33 +176,33 @@ func (l *limbo) pull(tx common.Hash) ([]kzg4844.Blob, []kzg4844.Commitment, []kz
|
|||||||
// any of it since there's no clear error case. Some errors may be due to coding
|
// any of it since there's no clear error case. Some errors may be due to coding
|
||||||
// issues, others caused by signers mining MEV stuff or swapping transactions. In
|
// issues, others caused by signers mining MEV stuff or swapping transactions. In
|
||||||
// all cases, the pool needs to continue operating.
|
// all cases, the pool needs to continue operating.
|
||||||
func (l *limbo) update(tx common.Hash, block uint64) {
|
func (l *limbo) update(txhash common.Hash, block uint64) {
|
||||||
// If the blobs are not tracked by the limbo, there's not much to do. This
|
// If the blobs are not tracked by the limbo, there's not much to do. This
|
||||||
// can happen for example if a blob transaction is mined without pushing it
|
// can happen for example if a blob transaction is mined without pushing it
|
||||||
// into the network first.
|
// into the network first.
|
||||||
id, ok := l.index[tx]
|
id, ok := l.index[txhash]
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Trace("Limbo cannot update non-tracked blobs", "tx", tx)
|
log.Trace("Limbo cannot update non-tracked blobs", "tx", txhash)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// If there was no change in the blob's inclusion block, don't mess around
|
// If there was no change in the blob's inclusion block, don't mess around
|
||||||
// with heavy database operations.
|
// with heavy database operations.
|
||||||
if _, ok := l.groups[block][id]; ok {
|
if _, ok := l.groups[block][id]; ok {
|
||||||
log.Trace("Blob transaction unchanged in limbo", "tx", tx, "block", block)
|
log.Trace("Blob transaction unchanged in limbo", "tx", txhash, "block", block)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Retrieve the old blobs from the data store and write tehm back with a new
|
// Retrieve the old blobs from the data store and write tehm back with a new
|
||||||
// block number. IF anything fails, there's not much to do, go on.
|
// block number. IF anything fails, there's not much to do, go on.
|
||||||
item, err := l.getAndDrop(id)
|
item, err := l.getAndDrop(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to get and drop limboed blobs", "tx", tx, "id", id, "err", err)
|
log.Error("Failed to get and drop limboed blobs", "tx", txhash, "id", id, "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := l.setAndIndex(tx, block, item.Blobs, item.Commits, item.Proofs); err != nil {
|
if err := l.setAndIndex(item.Tx, block); err != nil {
|
||||||
log.Error("Failed to set and index limboed blobs", "tx", tx, "err", err)
|
log.Error("Failed to set and index limboed blobs", "tx", txhash, "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Trace("Blob transaction updated in limbo", "tx", tx, "old-block", item.Block, "new-block", block)
|
log.Trace("Blob transaction updated in limbo", "tx", txhash, "old-block", item.Block, "new-block", block)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getAndDrop retrieves a blob item from the limbo store and deletes it both from
|
// getAndDrop retrieves a blob item from the limbo store and deletes it both from
|
||||||
@ -220,7 +216,7 @@ func (l *limbo) getAndDrop(id uint64) (*limboBlob, error) {
|
|||||||
if err = rlp.DecodeBytes(data, item); err != nil {
|
if err = rlp.DecodeBytes(data, item); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
delete(l.index, item.Owner)
|
delete(l.index, item.TxHash)
|
||||||
delete(l.groups[item.Block], id)
|
delete(l.groups[item.Block], id)
|
||||||
if len(l.groups[item.Block]) == 0 {
|
if len(l.groups[item.Block]) == 0 {
|
||||||
delete(l.groups, item.Block)
|
delete(l.groups, item.Block)
|
||||||
@ -233,13 +229,12 @@ func (l *limbo) getAndDrop(id uint64) (*limboBlob, error) {
|
|||||||
|
|
||||||
// setAndIndex assembles a limbo blob database entry and stores it, also updating
|
// setAndIndex assembles a limbo blob database entry and stores it, also updating
|
||||||
// the in-memory indices.
|
// the in-memory indices.
|
||||||
func (l *limbo) setAndIndex(tx common.Hash, block uint64, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) error {
|
func (l *limbo) setAndIndex(tx *types.Transaction, block uint64) error {
|
||||||
|
txhash := tx.Hash()
|
||||||
item := &limboBlob{
|
item := &limboBlob{
|
||||||
Owner: tx,
|
TxHash: txhash,
|
||||||
Block: block,
|
Block: block,
|
||||||
Blobs: blobs,
|
Tx: tx,
|
||||||
Commits: commits,
|
|
||||||
Proofs: proofs,
|
|
||||||
}
|
}
|
||||||
data, err := rlp.EncodeToBytes(item)
|
data, err := rlp.EncodeToBytes(item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -249,10 +244,10 @@ func (l *limbo) setAndIndex(tx common.Hash, block uint64, blobs []kzg4844.Blob,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
l.index[tx] = id
|
l.index[txhash] = id
|
||||||
if _, ok := l.groups[block]; !ok {
|
if _, ok := l.groups[block]; !ok {
|
||||||
l.groups[block] = make(map[uint64]common.Hash)
|
l.groups[block] = make(map[uint64]common.Hash)
|
||||||
}
|
}
|
||||||
l.groups[block][id] = tx
|
l.groups[block][id] = txhash
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -535,7 +535,7 @@ func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.L
|
|||||||
lazies[i] = &txpool.LazyTransaction{
|
lazies[i] = &txpool.LazyTransaction{
|
||||||
Pool: pool,
|
Pool: pool,
|
||||||
Hash: txs[i].Hash(),
|
Hash: txs[i].Hash(),
|
||||||
Tx: &txpool.Transaction{Tx: txs[i]},
|
Tx: txs[i],
|
||||||
Time: txs[i].Time(),
|
Time: txs[i].Time(),
|
||||||
GasFeeCap: txs[i].GasFeeCap(),
|
GasFeeCap: txs[i].GasFeeCap(),
|
||||||
GasTipCap: txs[i].GasTipCap(),
|
GasTipCap: txs[i].GasTipCap(),
|
||||||
@ -588,7 +588,7 @@ func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) erro
|
|||||||
if local {
|
if local {
|
||||||
opts.MinTip = new(big.Int)
|
opts.MinTip = new(big.Int)
|
||||||
}
|
}
|
||||||
if err := txpool.ValidateTransaction(tx, nil, nil, nil, pool.currentHead.Load(), pool.signer, opts); err != nil {
|
if err := txpool.ValidateTransaction(tx, pool.currentHead.Load(), pool.signer, opts); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -900,26 +900,13 @@ func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *typ
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add enqueues a batch of transactions into the pool if they are valid. Depending
|
|
||||||
// on the local flag, full pricing contraints will or will not be applied.
|
|
||||||
//
|
|
||||||
// If sync is set, the method will block until all internal maintenance related
|
|
||||||
// to the add is finished. Only use this during tests for determinism!
|
|
||||||
func (pool *LegacyPool) Add(txs []*txpool.Transaction, local bool, sync bool) []error {
|
|
||||||
unwrapped := make([]*types.Transaction, len(txs))
|
|
||||||
for i, tx := range txs {
|
|
||||||
unwrapped[i] = tx.Tx
|
|
||||||
}
|
|
||||||
return pool.addTxs(unwrapped, local, sync)
|
|
||||||
}
|
|
||||||
|
|
||||||
// addLocals enqueues a batch of transactions into the pool if they are valid, marking the
|
// addLocals enqueues a batch of transactions into the pool if they are valid, marking the
|
||||||
// senders as a local ones, ensuring they go around the local pricing constraints.
|
// senders as a local ones, ensuring they go around the local pricing constraints.
|
||||||
//
|
//
|
||||||
// This method is used to add transactions from the RPC API and performs synchronous pool
|
// This method is used to add transactions from the RPC API and performs synchronous pool
|
||||||
// reorganization and event propagation.
|
// reorganization and event propagation.
|
||||||
func (pool *LegacyPool) addLocals(txs []*types.Transaction) []error {
|
func (pool *LegacyPool) addLocals(txs []*types.Transaction) []error {
|
||||||
return pool.addTxs(txs, !pool.config.NoLocals, true)
|
return pool.Add(txs, !pool.config.NoLocals, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// addLocal enqueues a single local transaction into the pool if it is valid. This is
|
// addLocal enqueues a single local transaction into the pool if it is valid. This is
|
||||||
@ -935,7 +922,7 @@ func (pool *LegacyPool) addLocal(tx *types.Transaction) error {
|
|||||||
// This method is used to add transactions from the p2p network and does not wait for pool
|
// This method is used to add transactions from the p2p network and does not wait for pool
|
||||||
// reorganization and internal event propagation.
|
// reorganization and internal event propagation.
|
||||||
func (pool *LegacyPool) addRemotes(txs []*types.Transaction) []error {
|
func (pool *LegacyPool) addRemotes(txs []*types.Transaction) []error {
|
||||||
return pool.addTxs(txs, false, false)
|
return pool.Add(txs, false, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// addRemote enqueues a single transaction into the pool if it is valid. This is a convenience
|
// addRemote enqueues a single transaction into the pool if it is valid. This is a convenience
|
||||||
@ -947,16 +934,20 @@ func (pool *LegacyPool) addRemote(tx *types.Transaction) error {
|
|||||||
|
|
||||||
// addRemotesSync is like addRemotes, but waits for pool reorganization. Tests use this method.
|
// addRemotesSync is like addRemotes, but waits for pool reorganization. Tests use this method.
|
||||||
func (pool *LegacyPool) addRemotesSync(txs []*types.Transaction) []error {
|
func (pool *LegacyPool) addRemotesSync(txs []*types.Transaction) []error {
|
||||||
return pool.addTxs(txs, false, true)
|
return pool.Add(txs, false, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is like addRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
|
// This is like addRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
|
||||||
func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error {
|
func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error {
|
||||||
return pool.addTxs([]*types.Transaction{tx}, false, true)[0]
|
return pool.Add([]*types.Transaction{tx}, false, true)[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
// addTxs attempts to queue a batch of transactions if they are valid.
|
// Add enqueues a batch of transactions into the pool if they are valid. Depending
|
||||||
func (pool *LegacyPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
|
// on the local flag, full pricing contraints will or will not be applied.
|
||||||
|
//
|
||||||
|
// If sync is set, the method will block until all internal maintenance related
|
||||||
|
// to the add is finished. Only use this during tests for determinism!
|
||||||
|
func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error {
|
||||||
// Filter out known ones without obtaining the pool lock or recovering signatures
|
// Filter out known ones without obtaining the pool lock or recovering signatures
|
||||||
var (
|
var (
|
||||||
errs = make([]error, len(txs))
|
errs = make([]error, len(txs))
|
||||||
@ -1042,12 +1033,12 @@ func (pool *LegacyPool) Status(hash common.Hash) txpool.TxStatus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a transaction if it is contained in the pool and nil otherwise.
|
// Get returns a transaction if it is contained in the pool and nil otherwise.
|
||||||
func (pool *LegacyPool) Get(hash common.Hash) *txpool.Transaction {
|
func (pool *LegacyPool) Get(hash common.Hash) *types.Transaction {
|
||||||
tx := pool.get(hash)
|
tx := pool.get(hash)
|
||||||
if tx == nil {
|
if tx == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return &txpool.Transaction{Tx: tx}
|
return tx
|
||||||
}
|
}
|
||||||
|
|
||||||
// get returns a transaction if it is contained in the pool and nil otherwise.
|
// get returns a transaction if it is contained in the pool and nil otherwise.
|
||||||
|
@ -23,27 +23,16 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Transaction is a helper struct to group together a canonical transaction with
|
|
||||||
// satellite data items that are needed by the pool but are not part of the chain.
|
|
||||||
type Transaction struct {
|
|
||||||
Tx *types.Transaction // Canonical transaction
|
|
||||||
|
|
||||||
BlobTxBlobs []kzg4844.Blob // Blobs needed by the blob pool
|
|
||||||
BlobTxCommits []kzg4844.Commitment // Commitments needed by the blob pool
|
|
||||||
BlobTxProofs []kzg4844.Proof // Proofs needed by the blob pool
|
|
||||||
}
|
|
||||||
|
|
||||||
// LazyTransaction contains a small subset of the transaction properties that is
|
// LazyTransaction contains a small subset of the transaction properties that is
|
||||||
// enough for the miner and other APIs to handle large batches of transactions;
|
// enough for the miner and other APIs to handle large batches of transactions;
|
||||||
// and supports pulling up the entire transaction when really needed.
|
// and supports pulling up the entire transaction when really needed.
|
||||||
type LazyTransaction struct {
|
type LazyTransaction struct {
|
||||||
Pool SubPool // Transaction subpool to pull the real transaction up
|
Pool SubPool // Transaction subpool to pull the real transaction up
|
||||||
Hash common.Hash // Transaction hash to pull up if needed
|
Hash common.Hash // Transaction hash to pull up if needed
|
||||||
Tx *Transaction // Transaction if already resolved
|
Tx *types.Transaction // Transaction if already resolved
|
||||||
|
|
||||||
Time time.Time // Time when the transaction was first seen
|
Time time.Time // Time when the transaction was first seen
|
||||||
GasFeeCap *big.Int // Maximum fee per gas the transaction may consume
|
GasFeeCap *big.Int // Maximum fee per gas the transaction may consume
|
||||||
@ -52,7 +41,7 @@ type LazyTransaction struct {
|
|||||||
|
|
||||||
// Resolve retrieves the full transaction belonging to a lazy handle if it is still
|
// Resolve retrieves the full transaction belonging to a lazy handle if it is still
|
||||||
// maintained by the transaction pool.
|
// maintained by the transaction pool.
|
||||||
func (ltx *LazyTransaction) Resolve() *Transaction {
|
func (ltx *LazyTransaction) Resolve() *types.Transaction {
|
||||||
if ltx.Tx == nil {
|
if ltx.Tx == nil {
|
||||||
ltx.Tx = ltx.Pool.Get(ltx.Hash)
|
ltx.Tx = ltx.Pool.Get(ltx.Hash)
|
||||||
}
|
}
|
||||||
@ -99,12 +88,12 @@ type SubPool interface {
|
|||||||
Has(hash common.Hash) bool
|
Has(hash common.Hash) bool
|
||||||
|
|
||||||
// Get returns a transaction if it is contained in the pool, or nil otherwise.
|
// Get returns a transaction if it is contained in the pool, or nil otherwise.
|
||||||
Get(hash common.Hash) *Transaction
|
Get(hash common.Hash) *types.Transaction
|
||||||
|
|
||||||
// Add enqueues a batch of transactions into the pool if they are valid. Due
|
// Add enqueues a batch of transactions into the pool if they are valid. Due
|
||||||
// to the large transaction churn, add may postpone fully integrating the tx
|
// to the large transaction churn, add may postpone fully integrating the tx
|
||||||
// to a later point to batch multiple ones together.
|
// to a later point to batch multiple ones together.
|
||||||
Add(txs []*Transaction, local bool, sync bool) []error
|
Add(txs []*types.Transaction, local bool, sync bool) []error
|
||||||
|
|
||||||
// Pending retrieves all currently processable transactions, grouped by origin
|
// Pending retrieves all currently processable transactions, grouped by origin
|
||||||
// account and sorted by nonce.
|
// account and sorted by nonce.
|
||||||
|
@ -249,7 +249,7 @@ func (p *TxPool) Has(hash common.Hash) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a transaction if it is contained in the pool, or nil otherwise.
|
// Get returns a transaction if it is contained in the pool, or nil otherwise.
|
||||||
func (p *TxPool) Get(hash common.Hash) *Transaction {
|
func (p *TxPool) Get(hash common.Hash) *types.Transaction {
|
||||||
for _, subpool := range p.subpools {
|
for _, subpool := range p.subpools {
|
||||||
if tx := subpool.Get(hash); tx != nil {
|
if tx := subpool.Get(hash); tx != nil {
|
||||||
return tx
|
return tx
|
||||||
@ -261,14 +261,14 @@ func (p *TxPool) Get(hash common.Hash) *Transaction {
|
|||||||
// Add enqueues a batch of transactions into the pool if they are valid. Due
|
// Add enqueues a batch of transactions into the pool if they are valid. Due
|
||||||
// to the large transaction churn, add may postpone fully integrating the tx
|
// to the large transaction churn, add may postpone fully integrating the tx
|
||||||
// to a later point to batch multiple ones together.
|
// to a later point to batch multiple ones together.
|
||||||
func (p *TxPool) Add(txs []*Transaction, local bool, sync bool) []error {
|
func (p *TxPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
|
||||||
// Split the input transactions between the subpools. It shouldn't really
|
// Split the input transactions between the subpools. It shouldn't really
|
||||||
// happen that we receive merged batches, but better graceful than strange
|
// happen that we receive merged batches, but better graceful than strange
|
||||||
// errors.
|
// errors.
|
||||||
//
|
//
|
||||||
// We also need to track how the transactions were split across the subpools,
|
// We also need to track how the transactions were split across the subpools,
|
||||||
// so we can piece back the returned errors into the original order.
|
// so we can piece back the returned errors into the original order.
|
||||||
txsets := make([][]*Transaction, len(p.subpools))
|
txsets := make([][]*types.Transaction, len(p.subpools))
|
||||||
splits := make([]int, len(txs))
|
splits := make([]int, len(txs))
|
||||||
|
|
||||||
for i, tx := range txs {
|
for i, tx := range txs {
|
||||||
@ -277,7 +277,7 @@ func (p *TxPool) Add(txs []*Transaction, local bool, sync bool) []error {
|
|||||||
|
|
||||||
// Try to find a subpool that accepts the transaction
|
// Try to find a subpool that accepts the transaction
|
||||||
for j, subpool := range p.subpools {
|
for j, subpool := range p.subpools {
|
||||||
if subpool.Filter(tx.Tx) {
|
if subpool.Filter(tx) {
|
||||||
txsets[j] = append(txsets[j], tx)
|
txsets[j] = append(txsets[j], tx)
|
||||||
splits[i] = j
|
splits[i] = j
|
||||||
break
|
break
|
||||||
|
@ -46,7 +46,7 @@ type ValidationOptions struct {
|
|||||||
//
|
//
|
||||||
// This check is public to allow different transaction pools to check the basic
|
// This check is public to allow different transaction pools to check the basic
|
||||||
// rules without duplicating code and running the risk of missed updates.
|
// rules without duplicating code and running the risk of missed updates.
|
||||||
func ValidateTransaction(tx *types.Transaction, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof, head *types.Header, signer types.Signer, opts *ValidationOptions) error {
|
func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types.Signer, opts *ValidationOptions) error {
|
||||||
// Ensure transactions not implemented by the calling pool are rejected
|
// Ensure transactions not implemented by the calling pool are rejected
|
||||||
if opts.Accept&(1<<tx.Type()) == 0 {
|
if opts.Accept&(1<<tx.Type()) == 0 {
|
||||||
return fmt.Errorf("%w: tx type %v not supported by this pool", core.ErrTxTypeNotSupported, tx.Type())
|
return fmt.Errorf("%w: tx type %v not supported by this pool", core.ErrTxTypeNotSupported, tx.Type())
|
||||||
@ -110,6 +110,10 @@ func ValidateTransaction(tx *types.Transaction, blobs []kzg4844.Blob, commits []
|
|||||||
}
|
}
|
||||||
// Ensure blob transactions have valid commitments
|
// Ensure blob transactions have valid commitments
|
||||||
if tx.Type() == types.BlobTxType {
|
if tx.Type() == types.BlobTxType {
|
||||||
|
sidecar := tx.BlobTxSidecar()
|
||||||
|
if sidecar == nil {
|
||||||
|
return fmt.Errorf("missing sidecar in blob transaction")
|
||||||
|
}
|
||||||
// Ensure the number of items in the blob transaction and vairous side
|
// Ensure the number of items in the blob transaction and vairous side
|
||||||
// data match up before doing any expensive validations
|
// data match up before doing any expensive validations
|
||||||
hashes := tx.BlobHashes()
|
hashes := tx.BlobHashes()
|
||||||
@ -119,37 +123,44 @@ func ValidateTransaction(tx *types.Transaction, blobs []kzg4844.Blob, commits []
|
|||||||
if len(hashes) > params.BlobTxMaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob {
|
if len(hashes) > params.BlobTxMaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob {
|
||||||
return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), params.BlobTxMaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob)
|
return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), params.BlobTxMaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob)
|
||||||
}
|
}
|
||||||
if len(blobs) != len(hashes) {
|
if err := validateBlobSidecar(hashes, sidecar); err != nil {
|
||||||
return fmt.Errorf("invalid number of %d blobs compared to %d blob hashes", len(blobs), len(hashes))
|
return err
|
||||||
}
|
}
|
||||||
if len(commits) != len(hashes) {
|
}
|
||||||
return fmt.Errorf("invalid number of %d blob commitments compared to %d blob hashes", len(commits), len(hashes))
|
return nil
|
||||||
}
|
}
|
||||||
if len(proofs) != len(hashes) {
|
|
||||||
return fmt.Errorf("invalid number of %d blob proofs compared to %d blob hashes", len(proofs), len(hashes))
|
|
||||||
}
|
|
||||||
// Blob quantities match up, validate that the provers match with the
|
|
||||||
// transaction hash before getting to the cryptography
|
|
||||||
hasher := sha256.New()
|
|
||||||
for i, want := range hashes {
|
|
||||||
hasher.Write(commits[i][:])
|
|
||||||
hash := hasher.Sum(nil)
|
|
||||||
hasher.Reset()
|
|
||||||
|
|
||||||
var vhash common.Hash
|
func validateBlobSidecar(hashes []common.Hash, sidecar *types.BlobTxSidecar) error {
|
||||||
vhash[0] = params.BlobTxHashVersion
|
if len(sidecar.Blobs) != len(hashes) {
|
||||||
copy(vhash[1:], hash[1:])
|
return fmt.Errorf("invalid number of %d blobs compared to %d blob hashes", len(sidecar.Blobs), len(hashes))
|
||||||
|
}
|
||||||
|
if len(sidecar.Commitments) != len(hashes) {
|
||||||
|
return fmt.Errorf("invalid number of %d blob commitments compared to %d blob hashes", len(sidecar.Commitments), len(hashes))
|
||||||
|
}
|
||||||
|
if len(sidecar.Proofs) != len(hashes) {
|
||||||
|
return fmt.Errorf("invalid number of %d blob proofs compared to %d blob hashes", len(sidecar.Proofs), len(hashes))
|
||||||
|
}
|
||||||
|
// Blob quantities match up, validate that the provers match with the
|
||||||
|
// transaction hash before getting to the cryptography
|
||||||
|
hasher := sha256.New()
|
||||||
|
for i, want := range hashes {
|
||||||
|
hasher.Write(sidecar.Commitments[i][:])
|
||||||
|
hash := hasher.Sum(nil)
|
||||||
|
hasher.Reset()
|
||||||
|
|
||||||
if vhash != want {
|
var vhash common.Hash
|
||||||
return fmt.Errorf("blob %d: computed hash %#x mismatches transaction one %#x", i, vhash, want)
|
vhash[0] = params.BlobTxHashVersion
|
||||||
}
|
copy(vhash[1:], hash[1:])
|
||||||
|
|
||||||
|
if vhash != want {
|
||||||
|
return fmt.Errorf("blob %d: computed hash %#x mismatches transaction one %#x", i, vhash, want)
|
||||||
}
|
}
|
||||||
// Blob commitments match with the hashes in the transaction, verify the
|
}
|
||||||
// blobs themselves via KZG
|
// Blob commitments match with the hashes in the transaction, verify the
|
||||||
for i := range blobs {
|
// blobs themselves via KZG
|
||||||
if err := kzg4844.VerifyBlobProof(blobs[i], commits[i], proofs[i]); err != nil {
|
for i := range sidecar.Blobs {
|
||||||
return fmt.Errorf("invalid blob %d: %v", i, err)
|
if err := kzg4844.VerifyBlobProof(sidecar.Blobs[i], sidecar.Commitments[i], sidecar.Proofs[i]); err != nil {
|
||||||
}
|
return fmt.Errorf("invalid blob %d: %v", i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -82,9 +82,6 @@ type TxData interface {
|
|||||||
value() *big.Int
|
value() *big.Int
|
||||||
nonce() uint64
|
nonce() uint64
|
||||||
to() *common.Address
|
to() *common.Address
|
||||||
blobGas() uint64
|
|
||||||
blobGasFeeCap() *big.Int
|
|
||||||
blobHashes() []common.Hash
|
|
||||||
|
|
||||||
rawSignatureValues() (v, r, s *big.Int)
|
rawSignatureValues() (v, r, s *big.Int)
|
||||||
setSignatureValues(chainID, v, r, s *big.Int)
|
setSignatureValues(chainID, v, r, s *big.Int)
|
||||||
@ -96,6 +93,9 @@ type TxData interface {
|
|||||||
// copy of the computed value, i.e. callers are allowed to mutate the result.
|
// copy of the computed value, i.e. callers are allowed to mutate the result.
|
||||||
// Method implementations can use 'dst' to store the result.
|
// Method implementations can use 'dst' to store the result.
|
||||||
effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int
|
effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int
|
||||||
|
|
||||||
|
encode(*bytes.Buffer) error
|
||||||
|
decode([]byte) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeRLP implements rlp.Encoder
|
// EncodeRLP implements rlp.Encoder
|
||||||
@ -116,7 +116,7 @@ func (tx *Transaction) EncodeRLP(w io.Writer) error {
|
|||||||
// encodeTyped writes the canonical encoding of a typed transaction to w.
|
// encodeTyped writes the canonical encoding of a typed transaction to w.
|
||||||
func (tx *Transaction) encodeTyped(w *bytes.Buffer) error {
|
func (tx *Transaction) encodeTyped(w *bytes.Buffer) error {
|
||||||
w.WriteByte(tx.Type())
|
w.WriteByte(tx.Type())
|
||||||
return rlp.Encode(w, tx.inner)
|
return tx.inner.encode(w)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalBinary returns the canonical encoding of the transaction.
|
// MarshalBinary returns the canonical encoding of the transaction.
|
||||||
@ -186,22 +186,19 @@ func (tx *Transaction) decodeTyped(b []byte) (TxData, error) {
|
|||||||
if len(b) <= 1 {
|
if len(b) <= 1 {
|
||||||
return nil, errShortTypedTx
|
return nil, errShortTypedTx
|
||||||
}
|
}
|
||||||
|
var inner TxData
|
||||||
switch b[0] {
|
switch b[0] {
|
||||||
case AccessListTxType:
|
case AccessListTxType:
|
||||||
var inner AccessListTx
|
inner = new(AccessListTx)
|
||||||
err := rlp.DecodeBytes(b[1:], &inner)
|
|
||||||
return &inner, err
|
|
||||||
case DynamicFeeTxType:
|
case DynamicFeeTxType:
|
||||||
var inner DynamicFeeTx
|
inner = new(DynamicFeeTx)
|
||||||
err := rlp.DecodeBytes(b[1:], &inner)
|
|
||||||
return &inner, err
|
|
||||||
case BlobTxType:
|
case BlobTxType:
|
||||||
var inner BlobTx
|
inner = new(BlobTx)
|
||||||
err := rlp.DecodeBytes(b[1:], &inner)
|
|
||||||
return &inner, err
|
|
||||||
default:
|
default:
|
||||||
return nil, ErrTxTypeNotSupported
|
return nil, ErrTxTypeNotSupported
|
||||||
}
|
}
|
||||||
|
err := inner.decode(b[1:])
|
||||||
|
return inner, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// setDecoded sets the inner transaction and size after decoding.
|
// setDecoded sets the inner transaction and size after decoding.
|
||||||
@ -288,15 +285,6 @@ func (tx *Transaction) GasTipCap() *big.Int { return new(big.Int).Set(tx.inner.g
|
|||||||
// GasFeeCap returns the fee cap per gas of the transaction.
|
// GasFeeCap returns the fee cap per gas of the transaction.
|
||||||
func (tx *Transaction) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.gasFeeCap()) }
|
func (tx *Transaction) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.gasFeeCap()) }
|
||||||
|
|
||||||
// BlobGas returns the blob gas limit of the transaction for blob transactions, 0 otherwise.
|
|
||||||
func (tx *Transaction) BlobGas() uint64 { return tx.inner.blobGas() }
|
|
||||||
|
|
||||||
// BlobGasFeeCap returns the blob gas fee cap per blob gas of the transaction for blob transactions, nil otherwise.
|
|
||||||
func (tx *Transaction) BlobGasFeeCap() *big.Int { return tx.inner.blobGasFeeCap() }
|
|
||||||
|
|
||||||
// BlobHashes returns the hases of the blob commitments for blob transactions, nil otherwise.
|
|
||||||
func (tx *Transaction) BlobHashes() []common.Hash { return tx.inner.blobHashes() }
|
|
||||||
|
|
||||||
// Value returns the ether amount of the transaction.
|
// Value returns the ether amount of the transaction.
|
||||||
func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value()) }
|
func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value()) }
|
||||||
|
|
||||||
@ -383,14 +371,66 @@ func (tx *Transaction) EffectiveGasTipIntCmp(other *big.Int, baseFee *big.Int) i
|
|||||||
return tx.EffectiveGasTipValue(baseFee).Cmp(other)
|
return tx.EffectiveGasTipValue(baseFee).Cmp(other)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BlobGas returns the blob gas limit of the transaction for blob transactions, 0 otherwise.
|
||||||
|
func (tx *Transaction) BlobGas() uint64 {
|
||||||
|
if blobtx, ok := tx.inner.(*BlobTx); ok {
|
||||||
|
return blobtx.blobGas()
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobGasFeeCap returns the blob gas fee cap per blob gas of the transaction for blob transactions, nil otherwise.
|
||||||
|
func (tx *Transaction) BlobGasFeeCap() *big.Int {
|
||||||
|
if blobtx, ok := tx.inner.(*BlobTx); ok {
|
||||||
|
return blobtx.BlobFeeCap.ToBig()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobHashes returns the hases of the blob commitments for blob transactions, nil otherwise.
|
||||||
|
func (tx *Transaction) BlobHashes() []common.Hash {
|
||||||
|
if blobtx, ok := tx.inner.(*BlobTx); ok {
|
||||||
|
return blobtx.BlobHashes
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobTxSidecar returns the sidecar of a blob transaction, nil otherwise.
|
||||||
|
func (tx *Transaction) BlobTxSidecar() *BlobTxSidecar {
|
||||||
|
if blobtx, ok := tx.inner.(*BlobTx); ok {
|
||||||
|
return blobtx.Sidecar
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// BlobGasFeeCapCmp compares the blob fee cap of two transactions.
|
// BlobGasFeeCapCmp compares the blob fee cap of two transactions.
|
||||||
func (tx *Transaction) BlobGasFeeCapCmp(other *Transaction) int {
|
func (tx *Transaction) BlobGasFeeCapCmp(other *Transaction) int {
|
||||||
return tx.inner.blobGasFeeCap().Cmp(other.inner.blobGasFeeCap())
|
return tx.BlobGasFeeCap().Cmp(other.BlobGasFeeCap())
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlobGasFeeCapIntCmp compares the blob fee cap of the transaction against the given blob fee cap.
|
// BlobGasFeeCapIntCmp compares the blob fee cap of the transaction against the given blob fee cap.
|
||||||
func (tx *Transaction) BlobGasFeeCapIntCmp(other *big.Int) int {
|
func (tx *Transaction) BlobGasFeeCapIntCmp(other *big.Int) int {
|
||||||
return tx.inner.blobGasFeeCap().Cmp(other)
|
return tx.BlobGasFeeCap().Cmp(other)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithoutBlobTxSidecar returns a copy of tx with the blob sidecar removed.
|
||||||
|
func (tx *Transaction) WithoutBlobTxSidecar() *Transaction {
|
||||||
|
blobtx, ok := tx.inner.(*BlobTx)
|
||||||
|
if !ok {
|
||||||
|
return tx
|
||||||
|
}
|
||||||
|
cpy := &Transaction{
|
||||||
|
inner: blobtx.withoutSidecar(),
|
||||||
|
time: tx.time,
|
||||||
|
}
|
||||||
|
// Note: tx.size cache not carried over because the sidecar is included in size!
|
||||||
|
if h := tx.hash.Load(); h != nil {
|
||||||
|
cpy.hash.Store(h)
|
||||||
|
}
|
||||||
|
if f := tx.from.Load(); f != nil {
|
||||||
|
cpy.from.Store(f)
|
||||||
|
}
|
||||||
|
return cpy
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetTime sets the decoding time of a transaction. This is used by tests to set
|
// SetTime sets the decoding time of a transaction. This is used by tests to set
|
||||||
@ -428,13 +468,24 @@ func (tx *Transaction) Size() uint64 {
|
|||||||
if size := tx.size.Load(); size != nil {
|
if size := tx.size.Load(); size != nil {
|
||||||
return size.(uint64)
|
return size.(uint64)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Cache miss, encode and cache.
|
||||||
|
// Note we rely on the assumption that all tx.inner values are RLP-encoded!
|
||||||
c := writeCounter(0)
|
c := writeCounter(0)
|
||||||
rlp.Encode(&c, &tx.inner)
|
rlp.Encode(&c, &tx.inner)
|
||||||
|
|
||||||
size := uint64(c)
|
size := uint64(c)
|
||||||
if tx.Type() != LegacyTxType {
|
|
||||||
size += 1 // type byte
|
// For blob transactions, add the size of the blob content and the outer list of the
|
||||||
|
// tx + sidecar encoding.
|
||||||
|
if sc := tx.BlobTxSidecar(); sc != nil {
|
||||||
|
size += rlp.ListSize(sc.encodedSize())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For typed transactions, the encoding also includes the leading type byte.
|
||||||
|
if tx.Type() != LegacyTxType {
|
||||||
|
size += 1
|
||||||
|
}
|
||||||
|
|
||||||
tx.size.Store(size)
|
tx.size.Store(size)
|
||||||
return size
|
return size
|
||||||
}
|
}
|
||||||
|
@ -17,9 +17,11 @@
|
|||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate go run github.com/fjl/gencodec -type AccessTuple -out gen_access_tuple.go
|
//go:generate go run github.com/fjl/gencodec -type AccessTuple -out gen_access_tuple.go
|
||||||
@ -94,20 +96,17 @@ func (tx *AccessListTx) copy() TxData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// accessors for innerTx.
|
// accessors for innerTx.
|
||||||
func (tx *AccessListTx) txType() byte { return AccessListTxType }
|
func (tx *AccessListTx) txType() byte { return AccessListTxType }
|
||||||
func (tx *AccessListTx) chainID() *big.Int { return tx.ChainID }
|
func (tx *AccessListTx) chainID() *big.Int { return tx.ChainID }
|
||||||
func (tx *AccessListTx) accessList() AccessList { return tx.AccessList }
|
func (tx *AccessListTx) accessList() AccessList { return tx.AccessList }
|
||||||
func (tx *AccessListTx) data() []byte { return tx.Data }
|
func (tx *AccessListTx) data() []byte { return tx.Data }
|
||||||
func (tx *AccessListTx) gas() uint64 { return tx.Gas }
|
func (tx *AccessListTx) gas() uint64 { return tx.Gas }
|
||||||
func (tx *AccessListTx) gasPrice() *big.Int { return tx.GasPrice }
|
func (tx *AccessListTx) gasPrice() *big.Int { return tx.GasPrice }
|
||||||
func (tx *AccessListTx) gasTipCap() *big.Int { return tx.GasPrice }
|
func (tx *AccessListTx) gasTipCap() *big.Int { return tx.GasPrice }
|
||||||
func (tx *AccessListTx) gasFeeCap() *big.Int { return tx.GasPrice }
|
func (tx *AccessListTx) gasFeeCap() *big.Int { return tx.GasPrice }
|
||||||
func (tx *AccessListTx) value() *big.Int { return tx.Value }
|
func (tx *AccessListTx) value() *big.Int { return tx.Value }
|
||||||
func (tx *AccessListTx) nonce() uint64 { return tx.Nonce }
|
func (tx *AccessListTx) nonce() uint64 { return tx.Nonce }
|
||||||
func (tx *AccessListTx) to() *common.Address { return tx.To }
|
func (tx *AccessListTx) to() *common.Address { return tx.To }
|
||||||
func (tx *AccessListTx) blobGas() uint64 { return 0 }
|
|
||||||
func (tx *AccessListTx) blobGasFeeCap() *big.Int { return nil }
|
|
||||||
func (tx *AccessListTx) blobHashes() []common.Hash { return nil }
|
|
||||||
|
|
||||||
func (tx *AccessListTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
|
func (tx *AccessListTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
|
||||||
return dst.Set(tx.GasPrice)
|
return dst.Set(tx.GasPrice)
|
||||||
@ -120,3 +119,11 @@ func (tx *AccessListTx) rawSignatureValues() (v, r, s *big.Int) {
|
|||||||
func (tx *AccessListTx) setSignatureValues(chainID, v, r, s *big.Int) {
|
func (tx *AccessListTx) setSignatureValues(chainID, v, r, s *big.Int) {
|
||||||
tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s
|
tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tx *AccessListTx) encode(b *bytes.Buffer) error {
|
||||||
|
return rlp.Encode(b, tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *AccessListTx) decode(input []byte) error {
|
||||||
|
return rlp.DecodeBytes(input, tx)
|
||||||
|
}
|
||||||
|
@ -17,10 +17,14 @@
|
|||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/sha256"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/holiman/uint256"
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -38,12 +42,56 @@ type BlobTx struct {
|
|||||||
BlobFeeCap *uint256.Int // a.k.a. maxFeePerBlobGas
|
BlobFeeCap *uint256.Int // a.k.a. maxFeePerBlobGas
|
||||||
BlobHashes []common.Hash
|
BlobHashes []common.Hash
|
||||||
|
|
||||||
|
// A blob transaction can optionally contain blobs. This field must be set when BlobTx
|
||||||
|
// is used to create a transaction for sigining.
|
||||||
|
Sidecar *BlobTxSidecar `rlp:"-"`
|
||||||
|
|
||||||
// Signature values
|
// Signature values
|
||||||
V *uint256.Int `json:"v" gencodec:"required"`
|
V *uint256.Int `json:"v" gencodec:"required"`
|
||||||
R *uint256.Int `json:"r" gencodec:"required"`
|
R *uint256.Int `json:"r" gencodec:"required"`
|
||||||
S *uint256.Int `json:"s" gencodec:"required"`
|
S *uint256.Int `json:"s" gencodec:"required"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BlobTxSidecar contains the blobs of a blob transaction.
|
||||||
|
type BlobTxSidecar struct {
|
||||||
|
Blobs []kzg4844.Blob // Blobs needed by the blob pool
|
||||||
|
Commitments []kzg4844.Commitment // Commitments needed by the blob pool
|
||||||
|
Proofs []kzg4844.Proof // Proofs needed by the blob pool
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobHashes computes the blob hashes of the given blobs.
|
||||||
|
func (sc *BlobTxSidecar) BlobHashes() []common.Hash {
|
||||||
|
h := make([]common.Hash, len(sc.Commitments))
|
||||||
|
for i := range sc.Blobs {
|
||||||
|
h[i] = blobHash(&sc.Commitments[i])
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodedSize computes the RLP size of the sidecar elements. This does NOT return the
|
||||||
|
// encoded size of the BlobTxSidecar, it's just a helper for tx.Size().
|
||||||
|
func (sc *BlobTxSidecar) encodedSize() uint64 {
|
||||||
|
var blobs, commitments, proofs uint64
|
||||||
|
for i := range sc.Blobs {
|
||||||
|
blobs += rlp.BytesSize(sc.Blobs[i][:])
|
||||||
|
}
|
||||||
|
for i := range sc.Commitments {
|
||||||
|
commitments += rlp.BytesSize(sc.Commitments[i][:])
|
||||||
|
}
|
||||||
|
for i := range sc.Proofs {
|
||||||
|
proofs += rlp.BytesSize(sc.Proofs[i][:])
|
||||||
|
}
|
||||||
|
return rlp.ListSize(blobs) + rlp.ListSize(commitments) + rlp.ListSize(proofs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// blobTxWithBlobs is used for encoding of transactions when blobs are present.
|
||||||
|
type blobTxWithBlobs struct {
|
||||||
|
BlobTx *BlobTx
|
||||||
|
Blobs []kzg4844.Blob
|
||||||
|
Commitments []kzg4844.Commitment
|
||||||
|
Proofs []kzg4844.Proof
|
||||||
|
}
|
||||||
|
|
||||||
// copy creates a deep copy of the transaction data and initializes all fields.
|
// copy creates a deep copy of the transaction data and initializes all fields.
|
||||||
func (tx *BlobTx) copy() TxData {
|
func (tx *BlobTx) copy() TxData {
|
||||||
cpy := &BlobTx{
|
cpy := &BlobTx{
|
||||||
@ -90,24 +138,29 @@ func (tx *BlobTx) copy() TxData {
|
|||||||
if tx.S != nil {
|
if tx.S != nil {
|
||||||
cpy.S.Set(tx.S)
|
cpy.S.Set(tx.S)
|
||||||
}
|
}
|
||||||
|
if tx.Sidecar != nil {
|
||||||
|
cpy.Sidecar = &BlobTxSidecar{
|
||||||
|
Blobs: append([]kzg4844.Blob(nil), tx.Sidecar.Blobs...),
|
||||||
|
Commitments: append([]kzg4844.Commitment(nil), tx.Sidecar.Commitments...),
|
||||||
|
Proofs: append([]kzg4844.Proof(nil), tx.Sidecar.Proofs...),
|
||||||
|
}
|
||||||
|
}
|
||||||
return cpy
|
return cpy
|
||||||
}
|
}
|
||||||
|
|
||||||
// accessors for innerTx.
|
// accessors for innerTx.
|
||||||
func (tx *BlobTx) txType() byte { return BlobTxType }
|
func (tx *BlobTx) txType() byte { return BlobTxType }
|
||||||
func (tx *BlobTx) chainID() *big.Int { return tx.ChainID.ToBig() }
|
func (tx *BlobTx) chainID() *big.Int { return tx.ChainID.ToBig() }
|
||||||
func (tx *BlobTx) accessList() AccessList { return tx.AccessList }
|
func (tx *BlobTx) accessList() AccessList { return tx.AccessList }
|
||||||
func (tx *BlobTx) data() []byte { return tx.Data }
|
func (tx *BlobTx) data() []byte { return tx.Data }
|
||||||
func (tx *BlobTx) gas() uint64 { return tx.Gas }
|
func (tx *BlobTx) gas() uint64 { return tx.Gas }
|
||||||
func (tx *BlobTx) gasFeeCap() *big.Int { return tx.GasFeeCap.ToBig() }
|
func (tx *BlobTx) gasFeeCap() *big.Int { return tx.GasFeeCap.ToBig() }
|
||||||
func (tx *BlobTx) gasTipCap() *big.Int { return tx.GasTipCap.ToBig() }
|
func (tx *BlobTx) gasTipCap() *big.Int { return tx.GasTipCap.ToBig() }
|
||||||
func (tx *BlobTx) gasPrice() *big.Int { return tx.GasFeeCap.ToBig() }
|
func (tx *BlobTx) gasPrice() *big.Int { return tx.GasFeeCap.ToBig() }
|
||||||
func (tx *BlobTx) value() *big.Int { return tx.Value.ToBig() }
|
func (tx *BlobTx) value() *big.Int { return tx.Value.ToBig() }
|
||||||
func (tx *BlobTx) nonce() uint64 { return tx.Nonce }
|
func (tx *BlobTx) nonce() uint64 { return tx.Nonce }
|
||||||
func (tx *BlobTx) to() *common.Address { tmp := tx.To; return &tmp }
|
func (tx *BlobTx) to() *common.Address { tmp := tx.To; return &tmp }
|
||||||
func (tx *BlobTx) blobGas() uint64 { return params.BlobTxBlobGasPerBlob * uint64(len(tx.BlobHashes)) }
|
func (tx *BlobTx) blobGas() uint64 { return params.BlobTxBlobGasPerBlob * uint64(len(tx.BlobHashes)) }
|
||||||
func (tx *BlobTx) blobGasFeeCap() *big.Int { return tx.BlobFeeCap.ToBig() }
|
|
||||||
func (tx *BlobTx) blobHashes() []common.Hash { return tx.BlobHashes }
|
|
||||||
|
|
||||||
func (tx *BlobTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
|
func (tx *BlobTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
|
||||||
if baseFee == nil {
|
if baseFee == nil {
|
||||||
@ -130,3 +183,64 @@ func (tx *BlobTx) setSignatureValues(chainID, v, r, s *big.Int) {
|
|||||||
tx.R.SetFromBig(r)
|
tx.R.SetFromBig(r)
|
||||||
tx.S.SetFromBig(s)
|
tx.S.SetFromBig(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tx *BlobTx) withoutSidecar() *BlobTx {
|
||||||
|
cpy := *tx
|
||||||
|
cpy.Sidecar = nil
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *BlobTx) encode(b *bytes.Buffer) error {
|
||||||
|
if tx.Sidecar == nil {
|
||||||
|
return rlp.Encode(b, tx)
|
||||||
|
}
|
||||||
|
inner := &blobTxWithBlobs{
|
||||||
|
BlobTx: tx,
|
||||||
|
Blobs: tx.Sidecar.Blobs,
|
||||||
|
Commitments: tx.Sidecar.Commitments,
|
||||||
|
Proofs: tx.Sidecar.Proofs,
|
||||||
|
}
|
||||||
|
return rlp.Encode(b, inner)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *BlobTx) decode(input []byte) error {
|
||||||
|
// Here we need to support two formats: the network protocol encoding of the tx (with
|
||||||
|
// blobs) or the canonical encoding without blobs.
|
||||||
|
//
|
||||||
|
// The two encodings can be distinguished by checking whether the first element of the
|
||||||
|
// input list is itself a list.
|
||||||
|
|
||||||
|
outerList, _, err := rlp.SplitList(input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstElemKind, _, _, err := rlp.Split(outerList)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if firstElemKind != rlp.List {
|
||||||
|
return rlp.DecodeBytes(input, tx)
|
||||||
|
}
|
||||||
|
// It's a tx with blobs.
|
||||||
|
var inner blobTxWithBlobs
|
||||||
|
if err := rlp.DecodeBytes(input, &inner); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*tx = *inner.BlobTx
|
||||||
|
tx.Sidecar = &BlobTxSidecar{
|
||||||
|
Blobs: inner.Blobs,
|
||||||
|
Commitments: inner.Commitments,
|
||||||
|
Proofs: inner.Proofs,
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func blobHash(commit *kzg4844.Commitment) common.Hash {
|
||||||
|
hasher := sha256.New()
|
||||||
|
hasher.Write(commit[:])
|
||||||
|
var vhash common.Hash
|
||||||
|
hasher.Sum(vhash[:0])
|
||||||
|
vhash[0] = params.BlobTxHashVersion
|
||||||
|
return vhash
|
||||||
|
}
|
||||||
|
90
core/types/tx_blob_test.go
Normal file
90
core/types/tx_blob_test.go
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This test verifies that tx.Hash() is not affected by presence of a BlobTxSidecar.
|
||||||
|
func TestBlobTxHashing(t *testing.T) {
|
||||||
|
key, _ := crypto.GenerateKey()
|
||||||
|
withBlobs := createEmptyBlobTx(key, true)
|
||||||
|
withBlobsStripped := withBlobs.WithoutBlobTxSidecar()
|
||||||
|
withoutBlobs := createEmptyBlobTx(key, false)
|
||||||
|
|
||||||
|
hash := withBlobs.Hash()
|
||||||
|
t.Log("tx hash:", hash)
|
||||||
|
|
||||||
|
if h := withBlobsStripped.Hash(); h != hash {
|
||||||
|
t.Fatal("wrong tx hash after WithoutBlobTxSidecar:", h)
|
||||||
|
}
|
||||||
|
if h := withoutBlobs.Hash(); h != hash {
|
||||||
|
t.Fatal("wrong tx hash on tx created without sidecar:", h)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test verifies that tx.Size() takes BlobTxSidecar into account.
|
||||||
|
func TestBlobTxSize(t *testing.T) {
|
||||||
|
key, _ := crypto.GenerateKey()
|
||||||
|
withBlobs := createEmptyBlobTx(key, true)
|
||||||
|
withBlobsStripped := withBlobs.WithoutBlobTxSidecar()
|
||||||
|
withoutBlobs := createEmptyBlobTx(key, false)
|
||||||
|
|
||||||
|
withBlobsEnc, _ := withBlobs.MarshalBinary()
|
||||||
|
withoutBlobsEnc, _ := withoutBlobs.MarshalBinary()
|
||||||
|
|
||||||
|
size := withBlobs.Size()
|
||||||
|
t.Log("size with blobs:", size)
|
||||||
|
|
||||||
|
sizeNoBlobs := withoutBlobs.Size()
|
||||||
|
t.Log("size without blobs:", sizeNoBlobs)
|
||||||
|
|
||||||
|
if size != uint64(len(withBlobsEnc)) {
|
||||||
|
t.Error("wrong size with blobs:", size, "encoded length:", len(withBlobsEnc))
|
||||||
|
}
|
||||||
|
if sizeNoBlobs != uint64(len(withoutBlobsEnc)) {
|
||||||
|
t.Error("wrong size without blobs:", sizeNoBlobs, "encoded length:", len(withoutBlobsEnc))
|
||||||
|
}
|
||||||
|
if sizeNoBlobs >= size {
|
||||||
|
t.Error("size without blobs >= size with blobs")
|
||||||
|
}
|
||||||
|
if sz := withBlobsStripped.Size(); sz != sizeNoBlobs {
|
||||||
|
t.Fatal("wrong size on tx after WithoutBlobTxSidecar:", sz)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
emptyBlob = kzg4844.Blob{}
|
||||||
|
emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob)
|
||||||
|
emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit)
|
||||||
|
)
|
||||||
|
|
||||||
|
func createEmptyBlobTx(key *ecdsa.PrivateKey, withSidecar bool) *Transaction {
|
||||||
|
sidecar := &BlobTxSidecar{
|
||||||
|
Blobs: []kzg4844.Blob{emptyBlob},
|
||||||
|
Commitments: []kzg4844.Commitment{emptyBlobCommit},
|
||||||
|
Proofs: []kzg4844.Proof{emptyBlobProof},
|
||||||
|
}
|
||||||
|
blobtx := &BlobTx{
|
||||||
|
ChainID: uint256.NewInt(1),
|
||||||
|
Nonce: 5,
|
||||||
|
GasTipCap: uint256.NewInt(22),
|
||||||
|
GasFeeCap: uint256.NewInt(5),
|
||||||
|
Gas: 25000,
|
||||||
|
To: common.Address{0x03, 0x04, 0x05},
|
||||||
|
Value: uint256.NewInt(99),
|
||||||
|
Data: make([]byte, 50),
|
||||||
|
BlobFeeCap: uint256.NewInt(15),
|
||||||
|
BlobHashes: sidecar.BlobHashes(),
|
||||||
|
}
|
||||||
|
if withSidecar {
|
||||||
|
blobtx.Sidecar = sidecar
|
||||||
|
}
|
||||||
|
signer := NewCancunSigner(blobtx.ChainID.ToBig())
|
||||||
|
return MustSignNewTx(key, signer, blobtx)
|
||||||
|
}
|
@ -17,9 +17,11 @@
|
|||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DynamicFeeTx represents an EIP-1559 transaction.
|
// DynamicFeeTx represents an EIP-1559 transaction.
|
||||||
@ -83,20 +85,17 @@ func (tx *DynamicFeeTx) copy() TxData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// accessors for innerTx.
|
// accessors for innerTx.
|
||||||
func (tx *DynamicFeeTx) txType() byte { return DynamicFeeTxType }
|
func (tx *DynamicFeeTx) txType() byte { return DynamicFeeTxType }
|
||||||
func (tx *DynamicFeeTx) chainID() *big.Int { return tx.ChainID }
|
func (tx *DynamicFeeTx) chainID() *big.Int { return tx.ChainID }
|
||||||
func (tx *DynamicFeeTx) accessList() AccessList { return tx.AccessList }
|
func (tx *DynamicFeeTx) accessList() AccessList { return tx.AccessList }
|
||||||
func (tx *DynamicFeeTx) data() []byte { return tx.Data }
|
func (tx *DynamicFeeTx) data() []byte { return tx.Data }
|
||||||
func (tx *DynamicFeeTx) gas() uint64 { return tx.Gas }
|
func (tx *DynamicFeeTx) gas() uint64 { return tx.Gas }
|
||||||
func (tx *DynamicFeeTx) gasFeeCap() *big.Int { return tx.GasFeeCap }
|
func (tx *DynamicFeeTx) gasFeeCap() *big.Int { return tx.GasFeeCap }
|
||||||
func (tx *DynamicFeeTx) gasTipCap() *big.Int { return tx.GasTipCap }
|
func (tx *DynamicFeeTx) gasTipCap() *big.Int { return tx.GasTipCap }
|
||||||
func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.GasFeeCap }
|
func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.GasFeeCap }
|
||||||
func (tx *DynamicFeeTx) value() *big.Int { return tx.Value }
|
func (tx *DynamicFeeTx) value() *big.Int { return tx.Value }
|
||||||
func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce }
|
func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce }
|
||||||
func (tx *DynamicFeeTx) to() *common.Address { return tx.To }
|
func (tx *DynamicFeeTx) to() *common.Address { return tx.To }
|
||||||
func (tx *DynamicFeeTx) blobGas() uint64 { return 0 }
|
|
||||||
func (tx *DynamicFeeTx) blobGasFeeCap() *big.Int { return nil }
|
|
||||||
func (tx *DynamicFeeTx) blobHashes() []common.Hash { return nil }
|
|
||||||
|
|
||||||
func (tx *DynamicFeeTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
|
func (tx *DynamicFeeTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
|
||||||
if baseFee == nil {
|
if baseFee == nil {
|
||||||
@ -116,3 +115,11 @@ func (tx *DynamicFeeTx) rawSignatureValues() (v, r, s *big.Int) {
|
|||||||
func (tx *DynamicFeeTx) setSignatureValues(chainID, v, r, s *big.Int) {
|
func (tx *DynamicFeeTx) setSignatureValues(chainID, v, r, s *big.Int) {
|
||||||
tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s
|
tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tx *DynamicFeeTx) encode(b *bytes.Buffer) error {
|
||||||
|
return rlp.Encode(b, tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *DynamicFeeTx) decode(input []byte) error {
|
||||||
|
return rlp.DecodeBytes(input, tx)
|
||||||
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -91,20 +92,17 @@ func (tx *LegacyTx) copy() TxData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// accessors for innerTx.
|
// accessors for innerTx.
|
||||||
func (tx *LegacyTx) txType() byte { return LegacyTxType }
|
func (tx *LegacyTx) txType() byte { return LegacyTxType }
|
||||||
func (tx *LegacyTx) chainID() *big.Int { return deriveChainId(tx.V) }
|
func (tx *LegacyTx) chainID() *big.Int { return deriveChainId(tx.V) }
|
||||||
func (tx *LegacyTx) accessList() AccessList { return nil }
|
func (tx *LegacyTx) accessList() AccessList { return nil }
|
||||||
func (tx *LegacyTx) data() []byte { return tx.Data }
|
func (tx *LegacyTx) data() []byte { return tx.Data }
|
||||||
func (tx *LegacyTx) gas() uint64 { return tx.Gas }
|
func (tx *LegacyTx) gas() uint64 { return tx.Gas }
|
||||||
func (tx *LegacyTx) gasPrice() *big.Int { return tx.GasPrice }
|
func (tx *LegacyTx) gasPrice() *big.Int { return tx.GasPrice }
|
||||||
func (tx *LegacyTx) gasTipCap() *big.Int { return tx.GasPrice }
|
func (tx *LegacyTx) gasTipCap() *big.Int { return tx.GasPrice }
|
||||||
func (tx *LegacyTx) gasFeeCap() *big.Int { return tx.GasPrice }
|
func (tx *LegacyTx) gasFeeCap() *big.Int { return tx.GasPrice }
|
||||||
func (tx *LegacyTx) value() *big.Int { return tx.Value }
|
func (tx *LegacyTx) value() *big.Int { return tx.Value }
|
||||||
func (tx *LegacyTx) nonce() uint64 { return tx.Nonce }
|
func (tx *LegacyTx) nonce() uint64 { return tx.Nonce }
|
||||||
func (tx *LegacyTx) to() *common.Address { return tx.To }
|
func (tx *LegacyTx) to() *common.Address { return tx.To }
|
||||||
func (tx *LegacyTx) blobGas() uint64 { return 0 }
|
|
||||||
func (tx *LegacyTx) blobGasFeeCap() *big.Int { return nil }
|
|
||||||
func (tx *LegacyTx) blobHashes() []common.Hash { return nil }
|
|
||||||
|
|
||||||
func (tx *LegacyTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
|
func (tx *LegacyTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
|
||||||
return dst.Set(tx.GasPrice)
|
return dst.Set(tx.GasPrice)
|
||||||
@ -117,3 +115,11 @@ func (tx *LegacyTx) rawSignatureValues() (v, r, s *big.Int) {
|
|||||||
func (tx *LegacyTx) setSignatureValues(chainID, v, r, s *big.Int) {
|
func (tx *LegacyTx) setSignatureValues(chainID, v, r, s *big.Int) {
|
||||||
tx.V, tx.R, tx.S = v, r, s
|
tx.V, tx.R, tx.S = v, r, s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tx *LegacyTx) encode(*bytes.Buffer) error {
|
||||||
|
panic("encode called on LegacyTx")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *LegacyTx) decode([]byte) error {
|
||||||
|
panic("decode called on LegacyTx)")
|
||||||
|
}
|
||||||
|
@ -294,7 +294,7 @@ func (b *EthAPIBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscri
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
|
func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
|
||||||
return b.eth.txPool.Add([]*txpool.Transaction{{Tx: signedTx}}, true, false)[0]
|
return b.eth.txPool.Add([]*types.Transaction{signedTx}, true, false)[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) {
|
func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) {
|
||||||
@ -303,7 +303,7 @@ func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) {
|
|||||||
for _, batch := range pending {
|
for _, batch := range pending {
|
||||||
for _, lazy := range batch {
|
for _, lazy := range batch {
|
||||||
if tx := lazy.Resolve(); tx != nil {
|
if tx := lazy.Resolve(); tx != nil {
|
||||||
txs = append(txs, tx.Tx)
|
txs = append(txs, tx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -311,10 +311,7 @@ func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *EthAPIBackend) GetPoolTransaction(hash common.Hash) *types.Transaction {
|
func (b *EthAPIBackend) GetPoolTransaction(hash common.Hash) *types.Transaction {
|
||||||
if tx := b.eth.txPool.Get(hash); tx != nil {
|
return b.eth.txPool.Get(hash)
|
||||||
return tx.Tx
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *EthAPIBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
|
func (b *EthAPIBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
|
||||||
|
@ -35,7 +35,6 @@ import (
|
|||||||
beaconConsensus "github.com/ethereum/go-ethereum/consensus/beacon"
|
beaconConsensus "github.com/ethereum/go-ethereum/consensus/beacon"
|
||||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/txpool"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||||
@ -108,7 +107,7 @@ func TestEth2AssembleBlock(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error signing transaction, err=%v", err)
|
t.Fatalf("error signing transaction, err=%v", err)
|
||||||
}
|
}
|
||||||
ethservice.TxPool().Add([]*txpool.Transaction{{Tx: tx}}, true, false)
|
ethservice.TxPool().Add([]*types.Transaction{tx}, true, false)
|
||||||
blockParams := engine.PayloadAttributes{
|
blockParams := engine.PayloadAttributes{
|
||||||
Timestamp: blocks[9].Time() + 5,
|
Timestamp: blocks[9].Time() + 5,
|
||||||
}
|
}
|
||||||
@ -145,11 +144,7 @@ func TestEth2AssembleBlockWithAnotherBlocksTxs(t *testing.T) {
|
|||||||
|
|
||||||
// Put the 10th block's tx in the pool and produce a new block
|
// Put the 10th block's tx in the pool and produce a new block
|
||||||
txs := blocks[9].Transactions()
|
txs := blocks[9].Transactions()
|
||||||
wrapped := make([]*txpool.Transaction, len(txs))
|
api.eth.TxPool().Add(txs, false, true)
|
||||||
for i, tx := range txs {
|
|
||||||
wrapped[i] = &txpool.Transaction{Tx: tx}
|
|
||||||
}
|
|
||||||
api.eth.TxPool().Add(wrapped, false, true)
|
|
||||||
blockParams := engine.PayloadAttributes{
|
blockParams := engine.PayloadAttributes{
|
||||||
Timestamp: blocks[8].Time() + 5,
|
Timestamp: blocks[8].Time() + 5,
|
||||||
}
|
}
|
||||||
@ -189,11 +184,7 @@ func TestEth2PrepareAndGetPayload(t *testing.T) {
|
|||||||
|
|
||||||
// Put the 10th block's tx in the pool and produce a new block
|
// Put the 10th block's tx in the pool and produce a new block
|
||||||
txs := blocks[9].Transactions()
|
txs := blocks[9].Transactions()
|
||||||
wrapped := make([]*txpool.Transaction, len(txs))
|
ethservice.TxPool().Add(txs, true, false)
|
||||||
for i, tx := range txs {
|
|
||||||
wrapped[i] = &txpool.Transaction{Tx: tx}
|
|
||||||
}
|
|
||||||
ethservice.TxPool().Add(wrapped, true, false)
|
|
||||||
blockParams := engine.PayloadAttributes{
|
blockParams := engine.PayloadAttributes{
|
||||||
Timestamp: blocks[8].Time() + 5,
|
Timestamp: blocks[8].Time() + 5,
|
||||||
}
|
}
|
||||||
@ -315,7 +306,7 @@ func TestEth2NewBlock(t *testing.T) {
|
|||||||
statedb, _ := ethservice.BlockChain().StateAt(parent.Root())
|
statedb, _ := ethservice.BlockChain().StateAt(parent.Root())
|
||||||
nonce := statedb.GetNonce(testAddr)
|
nonce := statedb.GetNonce(testAddr)
|
||||||
tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
|
tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
|
||||||
ethservice.TxPool().Add([]*txpool.Transaction{{Tx: tx}}, true, false)
|
ethservice.TxPool().Add([]*types.Transaction{tx}, true, false)
|
||||||
|
|
||||||
execData, err := assembleWithTransactions(api, parent.Hash(), &engine.PayloadAttributes{
|
execData, err := assembleWithTransactions(api, parent.Hash(), &engine.PayloadAttributes{
|
||||||
Timestamp: parent.Time() + 5,
|
Timestamp: parent.Time() + 5,
|
||||||
@ -484,7 +475,7 @@ func TestFullAPI(t *testing.T) {
|
|||||||
statedb, _ := ethservice.BlockChain().StateAt(parent.Root)
|
statedb, _ := ethservice.BlockChain().StateAt(parent.Root)
|
||||||
nonce := statedb.GetNonce(testAddr)
|
nonce := statedb.GetNonce(testAddr)
|
||||||
tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
|
tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
|
||||||
ethservice.TxPool().Add([]*txpool.Transaction{{Tx: tx}}, true, false)
|
ethservice.TxPool().Add([]*types.Transaction{tx}, true, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
setupBlocks(t, ethservice, 10, parent, callback, nil)
|
setupBlocks(t, ethservice, 10, parent, callback, nil)
|
||||||
@ -610,7 +601,7 @@ func TestNewPayloadOnInvalidChain(t *testing.T) {
|
|||||||
GasPrice: big.NewInt(2 * params.InitialBaseFee),
|
GasPrice: big.NewInt(2 * params.InitialBaseFee),
|
||||||
Data: logCode,
|
Data: logCode,
|
||||||
})
|
})
|
||||||
ethservice.TxPool().Add([]*txpool.Transaction{{Tx: tx}}, false, true)
|
ethservice.TxPool().Add([]*types.Transaction{tx}, false, true)
|
||||||
var (
|
var (
|
||||||
params = engine.PayloadAttributes{
|
params = engine.PayloadAttributes{
|
||||||
Timestamp: parent.Time + 1,
|
Timestamp: parent.Time + 1,
|
||||||
@ -1284,7 +1275,7 @@ func setupBodies(t *testing.T) (*node.Node, *eth.Ethereum, []*types.Block) {
|
|||||||
statedb, _ := ethservice.BlockChain().StateAt(parent.Root)
|
statedb, _ := ethservice.BlockChain().StateAt(parent.Root)
|
||||||
nonce := statedb.GetNonce(testAddr)
|
nonce := statedb.GetNonce(testAddr)
|
||||||
tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
|
tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
|
||||||
ethservice.TxPool().Add([]*txpool.Transaction{{Tx: tx}}, true, false)
|
ethservice.TxPool().Add([]*types.Transaction{tx}, false, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
withdrawals := make([][]*types.Withdrawal, 10)
|
withdrawals := make([][]*types.Withdrawal, 10)
|
||||||
|
@ -798,7 +798,7 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListH
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Blocks must have a number of blobs corresponding to the header gas usage,
|
// Blocks must have a number of blobs corresponding to the header gas usage,
|
||||||
// and zero before the Cancun hardfork
|
// and zero before the Cancun hardfork.
|
||||||
var blobs int
|
var blobs int
|
||||||
for _, tx := range txLists[index] {
|
for _, tx := range txLists[index] {
|
||||||
// Count the number of blobs to validate against the header's blobGasUsed
|
// Count the number of blobs to validate against the header's blobGasUsed
|
||||||
@ -814,6 +814,9 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListH
|
|||||||
return errInvalidBody
|
return errInvalidBody
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if tx.BlobTxSidecar() != nil {
|
||||||
|
return errInvalidBody
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if header.BlobGasUsed != nil {
|
if header.BlobGasUsed != nil {
|
||||||
|
@ -169,9 +169,9 @@ type TxFetcher struct {
|
|||||||
alternates map[common.Hash]map[string]struct{} // In-flight transaction alternate origins if retrieval fails
|
alternates map[common.Hash]map[string]struct{} // In-flight transaction alternate origins if retrieval fails
|
||||||
|
|
||||||
// Callbacks
|
// Callbacks
|
||||||
hasTx func(common.Hash) bool // Retrieves a tx from the local txpool
|
hasTx func(common.Hash) bool // Retrieves a tx from the local txpool
|
||||||
addTxs func([]*txpool.Transaction) []error // Insert a batch of transactions into local txpool
|
addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool
|
||||||
fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer
|
fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer
|
||||||
|
|
||||||
step chan struct{} // Notification channel when the fetcher loop iterates
|
step chan struct{} // Notification channel when the fetcher loop iterates
|
||||||
clock mclock.Clock // Time wrapper to simulate in tests
|
clock mclock.Clock // Time wrapper to simulate in tests
|
||||||
@ -180,14 +180,14 @@ type TxFetcher struct {
|
|||||||
|
|
||||||
// NewTxFetcher creates a transaction fetcher to retrieve transaction
|
// NewTxFetcher creates a transaction fetcher to retrieve transaction
|
||||||
// based on hash announcements.
|
// based on hash announcements.
|
||||||
func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*txpool.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher {
|
func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher {
|
||||||
return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil)
|
return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTxFetcherForTests is a testing method to mock out the realtime clock with
|
// NewTxFetcherForTests is a testing method to mock out the realtime clock with
|
||||||
// a simulated version and the internal randomness with a deterministic one.
|
// a simulated version and the internal randomness with a deterministic one.
|
||||||
func NewTxFetcherForTests(
|
func NewTxFetcherForTests(
|
||||||
hasTx func(common.Hash) bool, addTxs func([]*txpool.Transaction) []error, fetchTxs func(string, []common.Hash) error,
|
hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error,
|
||||||
clock mclock.Clock, rand *mrand.Rand) *TxFetcher {
|
clock mclock.Clock, rand *mrand.Rand) *TxFetcher {
|
||||||
return &TxFetcher{
|
return &TxFetcher{
|
||||||
notify: make(chan *txAnnounce),
|
notify: make(chan *txAnnounce),
|
||||||
@ -295,11 +295,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
|
|||||||
)
|
)
|
||||||
batch := txs[i:end]
|
batch := txs[i:end]
|
||||||
|
|
||||||
wrapped := make([]*txpool.Transaction, len(batch))
|
for j, err := range f.addTxs(batch) {
|
||||||
for j, tx := range batch {
|
|
||||||
wrapped[j] = &txpool.Transaction{Tx: tx}
|
|
||||||
}
|
|
||||||
for j, err := range f.addTxs(wrapped) {
|
|
||||||
// Track the transaction hash if the price is too low for us.
|
// Track the transaction hash if the price is too low for us.
|
||||||
// Avoid re-request this transaction when we receive another
|
// Avoid re-request this transaction when we receive another
|
||||||
// announcement.
|
// announcement.
|
||||||
|
@ -378,7 +378,7 @@ func TestTransactionFetcherCleanup(t *testing.T) {
|
|||||||
init: func() *TxFetcher {
|
init: func() *TxFetcher {
|
||||||
return NewTxFetcher(
|
return NewTxFetcher(
|
||||||
func(common.Hash) bool { return false },
|
func(common.Hash) bool { return false },
|
||||||
func(txs []*txpool.Transaction) []error {
|
func(txs []*types.Transaction) []error {
|
||||||
return make([]error, len(txs))
|
return make([]error, len(txs))
|
||||||
},
|
},
|
||||||
func(string, []common.Hash) error { return nil },
|
func(string, []common.Hash) error { return nil },
|
||||||
@ -417,7 +417,7 @@ func TestTransactionFetcherCleanupEmpty(t *testing.T) {
|
|||||||
init: func() *TxFetcher {
|
init: func() *TxFetcher {
|
||||||
return NewTxFetcher(
|
return NewTxFetcher(
|
||||||
func(common.Hash) bool { return false },
|
func(common.Hash) bool { return false },
|
||||||
func(txs []*txpool.Transaction) []error {
|
func(txs []*types.Transaction) []error {
|
||||||
return make([]error, len(txs))
|
return make([]error, len(txs))
|
||||||
},
|
},
|
||||||
func(string, []common.Hash) error { return nil },
|
func(string, []common.Hash) error { return nil },
|
||||||
@ -455,7 +455,7 @@ func TestTransactionFetcherMissingRescheduling(t *testing.T) {
|
|||||||
init: func() *TxFetcher {
|
init: func() *TxFetcher {
|
||||||
return NewTxFetcher(
|
return NewTxFetcher(
|
||||||
func(common.Hash) bool { return false },
|
func(common.Hash) bool { return false },
|
||||||
func(txs []*txpool.Transaction) []error {
|
func(txs []*types.Transaction) []error {
|
||||||
return make([]error, len(txs))
|
return make([]error, len(txs))
|
||||||
},
|
},
|
||||||
func(string, []common.Hash) error { return nil },
|
func(string, []common.Hash) error { return nil },
|
||||||
@ -501,7 +501,7 @@ func TestTransactionFetcherMissingCleanup(t *testing.T) {
|
|||||||
init: func() *TxFetcher {
|
init: func() *TxFetcher {
|
||||||
return NewTxFetcher(
|
return NewTxFetcher(
|
||||||
func(common.Hash) bool { return false },
|
func(common.Hash) bool { return false },
|
||||||
func(txs []*txpool.Transaction) []error {
|
func(txs []*types.Transaction) []error {
|
||||||
return make([]error, len(txs))
|
return make([]error, len(txs))
|
||||||
},
|
},
|
||||||
func(string, []common.Hash) error { return nil },
|
func(string, []common.Hash) error { return nil },
|
||||||
@ -539,7 +539,7 @@ func TestTransactionFetcherBroadcasts(t *testing.T) {
|
|||||||
init: func() *TxFetcher {
|
init: func() *TxFetcher {
|
||||||
return NewTxFetcher(
|
return NewTxFetcher(
|
||||||
func(common.Hash) bool { return false },
|
func(common.Hash) bool { return false },
|
||||||
func(txs []*txpool.Transaction) []error {
|
func(txs []*types.Transaction) []error {
|
||||||
return make([]error, len(txs))
|
return make([]error, len(txs))
|
||||||
},
|
},
|
||||||
func(string, []common.Hash) error { return nil },
|
func(string, []common.Hash) error { return nil },
|
||||||
@ -644,7 +644,7 @@ func TestTransactionFetcherTimeoutRescheduling(t *testing.T) {
|
|||||||
init: func() *TxFetcher {
|
init: func() *TxFetcher {
|
||||||
return NewTxFetcher(
|
return NewTxFetcher(
|
||||||
func(common.Hash) bool { return false },
|
func(common.Hash) bool { return false },
|
||||||
func(txs []*txpool.Transaction) []error {
|
func(txs []*types.Transaction) []error {
|
||||||
return make([]error, len(txs))
|
return make([]error, len(txs))
|
||||||
},
|
},
|
||||||
func(string, []common.Hash) error { return nil },
|
func(string, []common.Hash) error { return nil },
|
||||||
@ -865,7 +865,7 @@ func TestTransactionFetcherUnderpricedDedup(t *testing.T) {
|
|||||||
init: func() *TxFetcher {
|
init: func() *TxFetcher {
|
||||||
return NewTxFetcher(
|
return NewTxFetcher(
|
||||||
func(common.Hash) bool { return false },
|
func(common.Hash) bool { return false },
|
||||||
func(txs []*txpool.Transaction) []error {
|
func(txs []*types.Transaction) []error {
|
||||||
errs := make([]error, len(txs))
|
errs := make([]error, len(txs))
|
||||||
for i := 0; i < len(errs); i++ {
|
for i := 0; i < len(errs); i++ {
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
@ -938,7 +938,7 @@ func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) {
|
|||||||
init: func() *TxFetcher {
|
init: func() *TxFetcher {
|
||||||
return NewTxFetcher(
|
return NewTxFetcher(
|
||||||
func(common.Hash) bool { return false },
|
func(common.Hash) bool { return false },
|
||||||
func(txs []*txpool.Transaction) []error {
|
func(txs []*types.Transaction) []error {
|
||||||
errs := make([]error, len(txs))
|
errs := make([]error, len(txs))
|
||||||
for i := 0; i < len(errs); i++ {
|
for i := 0; i < len(errs); i++ {
|
||||||
errs[i] = txpool.ErrUnderpriced
|
errs[i] = txpool.ErrUnderpriced
|
||||||
@ -964,7 +964,7 @@ func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) {
|
|||||||
init: func() *TxFetcher {
|
init: func() *TxFetcher {
|
||||||
return NewTxFetcher(
|
return NewTxFetcher(
|
||||||
func(common.Hash) bool { return false },
|
func(common.Hash) bool { return false },
|
||||||
func(txs []*txpool.Transaction) []error {
|
func(txs []*types.Transaction) []error {
|
||||||
return make([]error, len(txs))
|
return make([]error, len(txs))
|
||||||
},
|
},
|
||||||
func(string, []common.Hash) error { return nil },
|
func(string, []common.Hash) error { return nil },
|
||||||
@ -1017,7 +1017,7 @@ func TestTransactionFetcherDrop(t *testing.T) {
|
|||||||
init: func() *TxFetcher {
|
init: func() *TxFetcher {
|
||||||
return NewTxFetcher(
|
return NewTxFetcher(
|
||||||
func(common.Hash) bool { return false },
|
func(common.Hash) bool { return false },
|
||||||
func(txs []*txpool.Transaction) []error {
|
func(txs []*types.Transaction) []error {
|
||||||
return make([]error, len(txs))
|
return make([]error, len(txs))
|
||||||
},
|
},
|
||||||
func(string, []common.Hash) error { return nil },
|
func(string, []common.Hash) error { return nil },
|
||||||
@ -1083,7 +1083,7 @@ func TestTransactionFetcherDropRescheduling(t *testing.T) {
|
|||||||
init: func() *TxFetcher {
|
init: func() *TxFetcher {
|
||||||
return NewTxFetcher(
|
return NewTxFetcher(
|
||||||
func(common.Hash) bool { return false },
|
func(common.Hash) bool { return false },
|
||||||
func(txs []*txpool.Transaction) []error {
|
func(txs []*types.Transaction) []error {
|
||||||
return make([]error, len(txs))
|
return make([]error, len(txs))
|
||||||
},
|
},
|
||||||
func(string, []common.Hash) error { return nil },
|
func(string, []common.Hash) error { return nil },
|
||||||
@ -1128,7 +1128,7 @@ func TestTransactionFetcherFuzzCrash01(t *testing.T) {
|
|||||||
init: func() *TxFetcher {
|
init: func() *TxFetcher {
|
||||||
return NewTxFetcher(
|
return NewTxFetcher(
|
||||||
func(common.Hash) bool { return false },
|
func(common.Hash) bool { return false },
|
||||||
func(txs []*txpool.Transaction) []error {
|
func(txs []*types.Transaction) []error {
|
||||||
return make([]error, len(txs))
|
return make([]error, len(txs))
|
||||||
},
|
},
|
||||||
func(string, []common.Hash) error { return nil },
|
func(string, []common.Hash) error { return nil },
|
||||||
@ -1155,7 +1155,7 @@ func TestTransactionFetcherFuzzCrash02(t *testing.T) {
|
|||||||
init: func() *TxFetcher {
|
init: func() *TxFetcher {
|
||||||
return NewTxFetcher(
|
return NewTxFetcher(
|
||||||
func(common.Hash) bool { return false },
|
func(common.Hash) bool { return false },
|
||||||
func(txs []*txpool.Transaction) []error {
|
func(txs []*types.Transaction) []error {
|
||||||
return make([]error, len(txs))
|
return make([]error, len(txs))
|
||||||
},
|
},
|
||||||
func(string, []common.Hash) error { return nil },
|
func(string, []common.Hash) error { return nil },
|
||||||
@ -1184,7 +1184,7 @@ func TestTransactionFetcherFuzzCrash03(t *testing.T) {
|
|||||||
init: func() *TxFetcher {
|
init: func() *TxFetcher {
|
||||||
return NewTxFetcher(
|
return NewTxFetcher(
|
||||||
func(common.Hash) bool { return false },
|
func(common.Hash) bool { return false },
|
||||||
func(txs []*txpool.Transaction) []error {
|
func(txs []*types.Transaction) []error {
|
||||||
return make([]error, len(txs))
|
return make([]error, len(txs))
|
||||||
},
|
},
|
||||||
func(string, []common.Hash) error { return nil },
|
func(string, []common.Hash) error { return nil },
|
||||||
@ -1217,7 +1217,7 @@ func TestTransactionFetcherFuzzCrash04(t *testing.T) {
|
|||||||
init: func() *TxFetcher {
|
init: func() *TxFetcher {
|
||||||
return NewTxFetcher(
|
return NewTxFetcher(
|
||||||
func(common.Hash) bool { return false },
|
func(common.Hash) bool { return false },
|
||||||
func(txs []*txpool.Transaction) []error {
|
func(txs []*types.Transaction) []error {
|
||||||
return make([]error, len(txs))
|
return make([]error, len(txs))
|
||||||
},
|
},
|
||||||
func(string, []common.Hash) error {
|
func(string, []common.Hash) error {
|
||||||
|
@ -68,10 +68,10 @@ type txPool interface {
|
|||||||
|
|
||||||
// Get retrieves the transaction from local txpool with given
|
// Get retrieves the transaction from local txpool with given
|
||||||
// tx hash.
|
// tx hash.
|
||||||
Get(hash common.Hash) *txpool.Transaction
|
Get(hash common.Hash) *types.Transaction
|
||||||
|
|
||||||
// Add should add the given transactions to the pool.
|
// Add should add the given transactions to the pool.
|
||||||
Add(txs []*txpool.Transaction, local bool, sync bool) []error
|
Add(txs []*types.Transaction, local bool, sync bool) []error
|
||||||
|
|
||||||
// Pending should return pending transactions.
|
// Pending should return pending transactions.
|
||||||
// The slice should be modifiable by the caller.
|
// The slice should be modifiable by the caller.
|
||||||
@ -287,7 +287,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
|
|||||||
}
|
}
|
||||||
return p.RequestTxs(hashes)
|
return p.RequestTxs(hashes)
|
||||||
}
|
}
|
||||||
addTxs := func(txs []*txpool.Transaction) []error {
|
addTxs := func(txs []*types.Transaction) []error {
|
||||||
return h.txpool.Add(txs, false, false)
|
return h.txpool.Add(txs, false, false)
|
||||||
}
|
}
|
||||||
h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx)
|
h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx)
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/forkid"
|
"github.com/ethereum/go-ethereum/core/forkid"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/txpool"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||||
@ -308,12 +307,11 @@ func testSendTransactions(t *testing.T, protocol uint) {
|
|||||||
handler := newTestHandler()
|
handler := newTestHandler()
|
||||||
defer handler.close()
|
defer handler.close()
|
||||||
|
|
||||||
insert := make([]*txpool.Transaction, 100)
|
insert := make([]*types.Transaction, 100)
|
||||||
for nonce := range insert {
|
for nonce := range insert {
|
||||||
tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), make([]byte, 10240))
|
tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), make([]byte, 10240))
|
||||||
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||||
|
insert[nonce] = tx
|
||||||
insert[nonce] = &txpool.Transaction{Tx: tx}
|
|
||||||
}
|
}
|
||||||
go handler.txpool.Add(insert, false, false) // Need goroutine to not block on feed
|
go handler.txpool.Add(insert, false, false) // Need goroutine to not block on feed
|
||||||
time.Sleep(250 * time.Millisecond) // Wait until tx events get out of the system (can't use events, tx broadcaster races with peer join)
|
time.Sleep(250 * time.Millisecond) // Wait until tx events get out of the system (can't use events, tx broadcaster races with peer join)
|
||||||
@ -376,8 +374,8 @@ func testSendTransactions(t *testing.T, protocol uint) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, tx := range insert {
|
for _, tx := range insert {
|
||||||
if _, ok := seen[tx.Tx.Hash()]; !ok {
|
if _, ok := seen[tx.Hash()]; !ok {
|
||||||
t.Errorf("missing transaction: %x", tx.Tx.Hash())
|
t.Errorf("missing transaction: %x", tx.Hash())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -434,12 +432,11 @@ func testTransactionPropagation(t *testing.T, protocol uint) {
|
|||||||
defer sub.Unsubscribe()
|
defer sub.Unsubscribe()
|
||||||
}
|
}
|
||||||
// Fill the source pool with transactions and wait for them at the sinks
|
// Fill the source pool with transactions and wait for them at the sinks
|
||||||
txs := make([]*txpool.Transaction, 1024)
|
txs := make([]*types.Transaction, 1024)
|
||||||
for nonce := range txs {
|
for nonce := range txs {
|
||||||
tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil)
|
tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil)
|
||||||
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||||
|
txs[nonce] = tx
|
||||||
txs[nonce] = &txpool.Transaction{Tx: tx}
|
|
||||||
}
|
}
|
||||||
source.txpool.Add(txs, false, false)
|
source.txpool.Add(txs, false, false)
|
||||||
|
|
||||||
|
@ -72,32 +72,23 @@ func (p *testTxPool) Has(hash common.Hash) bool {
|
|||||||
|
|
||||||
// Get retrieves the transaction from local txpool with given
|
// Get retrieves the transaction from local txpool with given
|
||||||
// tx hash.
|
// tx hash.
|
||||||
func (p *testTxPool) Get(hash common.Hash) *txpool.Transaction {
|
func (p *testTxPool) Get(hash common.Hash) *types.Transaction {
|
||||||
p.lock.Lock()
|
p.lock.Lock()
|
||||||
defer p.lock.Unlock()
|
defer p.lock.Unlock()
|
||||||
|
return p.pool[hash]
|
||||||
if tx := p.pool[hash]; tx != nil {
|
|
||||||
return &txpool.Transaction{Tx: tx}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add appends a batch of transactions to the pool, and notifies any
|
// Add appends a batch of transactions to the pool, and notifies any
|
||||||
// listeners if the addition channel is non nil
|
// listeners if the addition channel is non nil
|
||||||
func (p *testTxPool) Add(txs []*txpool.Transaction, local bool, sync bool) []error {
|
func (p *testTxPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
|
||||||
unwrapped := make([]*types.Transaction, len(txs))
|
|
||||||
for i, tx := range txs {
|
|
||||||
unwrapped[i] = tx.Tx
|
|
||||||
}
|
|
||||||
p.lock.Lock()
|
p.lock.Lock()
|
||||||
defer p.lock.Unlock()
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
for _, tx := range unwrapped {
|
for _, tx := range txs {
|
||||||
p.pool[tx.Hash()] = tx
|
p.pool[tx.Hash()] = tx
|
||||||
}
|
}
|
||||||
|
p.txFeed.Send(core.NewTxsEvent{Txs: txs})
|
||||||
p.txFeed.Send(core.NewTxsEvent{Txs: unwrapped})
|
return make([]error, len(txs))
|
||||||
return make([]error, len(unwrapped))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pending returns all the transactions known to the pool
|
// Pending returns all the transactions known to the pool
|
||||||
@ -118,7 +109,7 @@ func (p *testTxPool) Pending(enforceTips bool) map[common.Address][]*txpool.Lazy
|
|||||||
for _, tx := range batch {
|
for _, tx := range batch {
|
||||||
pending[addr] = append(pending[addr], &txpool.LazyTransaction{
|
pending[addr] = append(pending[addr], &txpool.LazyTransaction{
|
||||||
Hash: tx.Hash(),
|
Hash: tx.Hash(),
|
||||||
Tx: &txpool.Transaction{Tx: tx},
|
Tx: tx,
|
||||||
Time: tx.Time(),
|
Time: tx.Time(),
|
||||||
GasFeeCap: tx.GasFeeCap(),
|
GasFeeCap: tx.GasFeeCap(),
|
||||||
GasTipCap: tx.GasTipCap(),
|
GasTipCap: tx.GasTipCap(),
|
||||||
|
@ -81,8 +81,8 @@ func (p *Peer) broadcastTransactions() {
|
|||||||
)
|
)
|
||||||
for i := 0; i < len(queue) && size < maxTxPacketSize; i++ {
|
for i := 0; i < len(queue) && size < maxTxPacketSize; i++ {
|
||||||
if tx := p.txpool.Get(queue[i]); tx != nil {
|
if tx := p.txpool.Get(queue[i]); tx != nil {
|
||||||
txs = append(txs, tx.Tx)
|
txs = append(txs, tx)
|
||||||
size += common.StorageSize(tx.Tx.Size())
|
size += common.StorageSize(tx.Size())
|
||||||
}
|
}
|
||||||
hashesCount++
|
hashesCount++
|
||||||
}
|
}
|
||||||
@ -151,8 +151,8 @@ func (p *Peer) announceTransactions() {
|
|||||||
for count = 0; count < len(queue) && size < maxTxPacketSize; count++ {
|
for count = 0; count < len(queue) && size < maxTxPacketSize; count++ {
|
||||||
if tx := p.txpool.Get(queue[count]); tx != nil {
|
if tx := p.txpool.Get(queue[count]); tx != nil {
|
||||||
pending = append(pending, queue[count])
|
pending = append(pending, queue[count])
|
||||||
pendingTypes = append(pendingTypes, tx.Tx.Type())
|
pendingTypes = append(pendingTypes, tx.Type())
|
||||||
pendingSizes = append(pendingSizes, uint32(tx.Tx.Size()))
|
pendingSizes = append(pendingSizes, uint32(tx.Size()))
|
||||||
size += common.HashLength
|
size += common.HashLength
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/txpool"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
@ -90,7 +90,7 @@ type Backend interface {
|
|||||||
// TxPool defines the methods needed by the protocol handler to serve transactions.
|
// TxPool defines the methods needed by the protocol handler to serve transactions.
|
||||||
type TxPool interface {
|
type TxPool interface {
|
||||||
// Get retrieves the transaction from the local txpool with the given hash.
|
// Get retrieves the transaction from the local txpool with the given hash.
|
||||||
Get(hash common.Hash) *txpool.Transaction
|
Get(hash common.Hash) *types.Transaction
|
||||||
}
|
}
|
||||||
|
|
||||||
// MakeProtocols constructs the P2P protocol definitions for `eth`.
|
// MakeProtocols constructs the P2P protocol definitions for `eth`.
|
||||||
|
@ -503,7 +503,7 @@ func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsPac
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// If known, encode and queue for response packet
|
// If known, encode and queue for response packet
|
||||||
if encoded, err := rlp.EncodeToBytes(tx.Tx); err != nil {
|
if encoded, err := rlp.EncodeToBytes(tx); err != nil {
|
||||||
log.Error("Failed to encode transaction", "err", err)
|
log.Error("Failed to encode transaction", "err", err)
|
||||||
} else {
|
} else {
|
||||||
hashes = append(hashes, hash)
|
hashes = append(hashes, hash)
|
||||||
|
@ -518,7 +518,7 @@ func handleSendTx(msg Decoder) (serveRequestFn, uint64, uint64, error) {
|
|||||||
hash := tx.Hash()
|
hash := tx.Hash()
|
||||||
stats[i] = txStatus(backend, hash)
|
stats[i] = txStatus(backend, hash)
|
||||||
if stats[i].Status == txpool.TxStatusUnknown {
|
if stats[i].Status == txpool.TxStatusUnknown {
|
||||||
if errs := backend.TxPool().Add([]*txpool.Transaction{{Tx: tx}}, false, backend.AddTxsSync()); errs[0] != nil {
|
if errs := backend.TxPool().Add([]*types.Transaction{tx}, false, backend.AddTxsSync()); errs[0] != nil {
|
||||||
stats[i].Error = errs[0].Error()
|
stats[i].Error = errs[0].Error()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -88,7 +88,7 @@ func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) {
|
|||||||
}
|
}
|
||||||
groups[addr] = append(groups[addr], &txpool.LazyTransaction{
|
groups[addr] = append(groups[addr], &txpool.LazyTransaction{
|
||||||
Hash: tx.Hash(),
|
Hash: tx.Hash(),
|
||||||
Tx: &txpool.Transaction{Tx: tx},
|
Tx: tx,
|
||||||
Time: tx.Time(),
|
Time: tx.Time(),
|
||||||
GasFeeCap: tx.GasFeeCap(),
|
GasFeeCap: tx.GasFeeCap(),
|
||||||
GasTipCap: tx.GasTipCap(),
|
GasTipCap: tx.GasTipCap(),
|
||||||
@ -101,7 +101,7 @@ func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) {
|
|||||||
|
|
||||||
txs := types.Transactions{}
|
txs := types.Transactions{}
|
||||||
for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
|
for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
|
||||||
txs = append(txs, tx.Tx.Tx)
|
txs = append(txs, tx.Tx)
|
||||||
txset.Shift()
|
txset.Shift()
|
||||||
}
|
}
|
||||||
if len(txs) != expectedCount {
|
if len(txs) != expectedCount {
|
||||||
@ -153,7 +153,7 @@ func TestTransactionTimeSort(t *testing.T) {
|
|||||||
|
|
||||||
groups[addr] = append(groups[addr], &txpool.LazyTransaction{
|
groups[addr] = append(groups[addr], &txpool.LazyTransaction{
|
||||||
Hash: tx.Hash(),
|
Hash: tx.Hash(),
|
||||||
Tx: &txpool.Transaction{Tx: tx},
|
Tx: tx,
|
||||||
Time: tx.Time(),
|
Time: tx.Time(),
|
||||||
GasFeeCap: tx.GasFeeCap(),
|
GasFeeCap: tx.GasFeeCap(),
|
||||||
GasTipCap: tx.GasTipCap(),
|
GasTipCap: tx.GasTipCap(),
|
||||||
@ -164,7 +164,7 @@ func TestTransactionTimeSort(t *testing.T) {
|
|||||||
|
|
||||||
txs := types.Transactions{}
|
txs := types.Transactions{}
|
||||||
for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
|
for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
|
||||||
txs = append(txs, tx.Tx.Tx)
|
txs = append(txs, tx.Tx)
|
||||||
txset.Shift()
|
txset.Shift()
|
||||||
}
|
}
|
||||||
if len(txs) != len(keys) {
|
if len(txs) != len(keys) {
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/fdlimit"
|
"github.com/ethereum/go-ethereum/common/fdlimit"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/txpool"
|
|
||||||
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
|
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
@ -133,7 +132,7 @@ func main() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if err := backend.TxPool().Add([]*txpool.Transaction{{Tx: tx}}, true, false); err != nil {
|
if err := backend.TxPool().Add([]*types.Transaction{tx}, true, false); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
nonces[index]++
|
nonces[index]++
|
||||||
|
@ -539,7 +539,7 @@ func (w *worker) mainLoop() {
|
|||||||
acc, _ := types.Sender(w.current.signer, tx)
|
acc, _ := types.Sender(w.current.signer, tx)
|
||||||
txs[acc] = append(txs[acc], &txpool.LazyTransaction{
|
txs[acc] = append(txs[acc], &txpool.LazyTransaction{
|
||||||
Hash: tx.Hash(),
|
Hash: tx.Hash(),
|
||||||
Tx: &txpool.Transaction{Tx: tx},
|
Tx: tx.WithoutBlobTxSidecar(),
|
||||||
Time: tx.Time(),
|
Time: tx.Time(),
|
||||||
GasFeeCap: tx.GasFeeCap(),
|
GasFeeCap: tx.GasFeeCap(),
|
||||||
GasTipCap: tx.GasTipCap(),
|
GasTipCap: tx.GasTipCap(),
|
||||||
@ -734,18 +734,18 @@ func (w *worker) updateSnapshot(env *environment) {
|
|||||||
w.snapshotState = env.state.Copy()
|
w.snapshotState = env.state.Copy()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *worker) commitTransaction(env *environment, tx *txpool.Transaction) ([]*types.Log, error) {
|
func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]*types.Log, error) {
|
||||||
var (
|
var (
|
||||||
snap = env.state.Snapshot()
|
snap = env.state.Snapshot()
|
||||||
gp = env.gasPool.Gas()
|
gp = env.gasPool.Gas()
|
||||||
)
|
)
|
||||||
receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx.Tx, &env.header.GasUsed, *w.chain.GetVMConfig())
|
receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
env.state.RevertToSnapshot(snap)
|
env.state.RevertToSnapshot(snap)
|
||||||
env.gasPool.SetGas(gp)
|
env.gasPool.SetGas(gp)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
env.txs = append(env.txs, tx.Tx)
|
env.txs = append(env.txs, tx)
|
||||||
env.receipts = append(env.receipts, receipt)
|
env.receipts = append(env.receipts, receipt)
|
||||||
|
|
||||||
return receipt.Logs, nil
|
return receipt.Logs, nil
|
||||||
@ -778,30 +778,30 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn
|
|||||||
tx := ltx.Resolve()
|
tx := ltx.Resolve()
|
||||||
if tx == nil {
|
if tx == nil {
|
||||||
log.Warn("Ignoring evicted transaction")
|
log.Warn("Ignoring evicted transaction")
|
||||||
|
|
||||||
txs.Pop()
|
txs.Pop()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error may be ignored here. The error has already been checked
|
// Error may be ignored here. The error has already been checked
|
||||||
// during transaction acceptance is the transaction pool.
|
// during transaction acceptance is the transaction pool.
|
||||||
from, _ := types.Sender(env.signer, tx.Tx)
|
from, _ := types.Sender(env.signer, tx)
|
||||||
|
|
||||||
// Check whether the tx is replay protected. If we're not in the EIP155 hf
|
// Check whether the tx is replay protected. If we're not in the EIP155 hf
|
||||||
// phase, start ignoring the sender until we do.
|
// phase, start ignoring the sender until we do.
|
||||||
if tx.Tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) {
|
if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) {
|
||||||
log.Trace("Ignoring reply protected transaction", "hash", tx.Tx.Hash(), "eip155", w.chainConfig.EIP155Block)
|
log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block)
|
||||||
|
|
||||||
txs.Pop()
|
txs.Pop()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start executing the transaction
|
// Start executing the transaction
|
||||||
env.state.SetTxContext(tx.Tx.Hash(), env.tcount)
|
env.state.SetTxContext(tx.Hash(), env.tcount)
|
||||||
|
|
||||||
logs, err := w.commitTransaction(env, tx)
|
logs, err := w.commitTransaction(env, tx)
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, core.ErrNonceTooLow):
|
case errors.Is(err, core.ErrNonceTooLow):
|
||||||
// New head notification data race between the transaction pool and miner, shift
|
// New head notification data race between the transaction pool and miner, shift
|
||||||
log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Tx.Nonce())
|
log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce())
|
||||||
txs.Shift()
|
txs.Shift()
|
||||||
|
|
||||||
case errors.Is(err, nil):
|
case errors.Is(err, nil):
|
||||||
@ -813,7 +813,7 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn
|
|||||||
default:
|
default:
|
||||||
// Transaction is regarded as invalid, drop all consecutive transactions from
|
// Transaction is regarded as invalid, drop all consecutive transactions from
|
||||||
// the same sender because of `nonce-too-high` clause.
|
// the same sender because of `nonce-too-high` clause.
|
||||||
log.Debug("Transaction failed, account skipped", "hash", tx.Tx.Hash(), "err", err)
|
log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err)
|
||||||
txs.Pop()
|
txs.Pop()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -63,7 +63,7 @@ var (
|
|||||||
testUserAddress = crypto.PubkeyToAddress(testUserKey.PublicKey)
|
testUserAddress = crypto.PubkeyToAddress(testUserKey.PublicKey)
|
||||||
|
|
||||||
// Test transactions
|
// Test transactions
|
||||||
pendingTxs []*txpool.Transaction
|
pendingTxs []*types.Transaction
|
||||||
newTxs []*types.Transaction
|
newTxs []*types.Transaction
|
||||||
|
|
||||||
testConfig = &Config{
|
testConfig = &Config{
|
||||||
@ -93,7 +93,7 @@ func init() {
|
|||||||
Gas: params.TxGas,
|
Gas: params.TxGas,
|
||||||
GasPrice: big.NewInt(params.InitialBaseFee),
|
GasPrice: big.NewInt(params.InitialBaseFee),
|
||||||
})
|
})
|
||||||
pendingTxs = append(pendingTxs, &txpool.Transaction{Tx: tx1})
|
pendingTxs = append(pendingTxs, tx1)
|
||||||
|
|
||||||
tx2 := types.MustSignNewTx(testBankKey, signer, &types.LegacyTx{
|
tx2 := types.MustSignNewTx(testBankKey, signer, &types.LegacyTx{
|
||||||
Nonce: 1,
|
Nonce: 1,
|
||||||
@ -194,8 +194,8 @@ func TestGenerateAndImportBlock(t *testing.T) {
|
|||||||
w.start()
|
w.start()
|
||||||
|
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
b.txPool.Add([]*txpool.Transaction{{Tx: b.newRandomTx(true)}}, true, false)
|
b.txPool.Add([]*types.Transaction{b.newRandomTx(true)}, true, false)
|
||||||
b.txPool.Add([]*txpool.Transaction{{Tx: b.newRandomTx(false)}}, true, false)
|
b.txPool.Add([]*types.Transaction{b.newRandomTx(false)}, true, false)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case ev := <-sub.Chan():
|
case ev := <-sub.Chan():
|
||||||
|
@ -25,7 +25,6 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/mclock"
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
"github.com/ethereum/go-ethereum/core/txpool"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/eth/fetcher"
|
"github.com/ethereum/go-ethereum/eth/fetcher"
|
||||||
)
|
)
|
||||||
@ -80,7 +79,7 @@ func Fuzz(input []byte) int {
|
|||||||
|
|
||||||
f := fetcher.NewTxFetcherForTests(
|
f := fetcher.NewTxFetcherForTests(
|
||||||
func(common.Hash) bool { return false },
|
func(common.Hash) bool { return false },
|
||||||
func(txs []*txpool.Transaction) []error {
|
func(txs []*types.Transaction) []error {
|
||||||
return make([]error, len(txs))
|
return make([]error, len(txs))
|
||||||
},
|
},
|
||||||
func(string, []common.Hash) error { return nil },
|
func(string, []common.Hash) error { return nil },
|
||||||
|
Loading…
Reference in New Issue
Block a user