diff --git a/.gitignore b/.gitignore
index e24e1d167..3f27cdc00 100644
--- a/.gitignore
+++ b/.gitignore
@@ -47,4 +47,6 @@ profile.cov
/dashboard/assets/package-lock.json
**/yarn-error.log
-logs/
\ No newline at end of file
+logs/
+
+tests/spec-tests/
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index dbdcd1782..854997648 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -606,8 +606,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) {
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
}
- head := b.blockchain.CurrentHeader()
- if !b.blockchain.Config().IsLondon(head.Number) {
+ if !b.blockchain.Config().IsLondon(header.Number) {
// If there's no basefee, then it must be a non-1559 execution
if call.GasPrice == nil {
call.GasPrice = new(big.Int)
@@ -629,13 +628,13 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
// Backfill the legacy gasPrice for EVM execution, unless we're all zeroes
call.GasPrice = new(big.Int)
if call.GasFeeCap.BitLen() > 0 || call.GasTipCap.BitLen() > 0 {
- call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, head.BaseFee), call.GasFeeCap)
+ call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, header.BaseFee), call.GasFeeCap)
}
}
}
// Ensure message is initialized properly.
if call.Gas == 0 {
- call.Gas = 50000000
+ call.Gas = 10 * header.GasLimit
}
if call.Value == nil {
call.Value = new(big.Int)
diff --git a/accounts/scwallet/README.md b/accounts/scwallet/README.md
index 4313d9c6b..28079c474 100644
--- a/accounts/scwallet/README.md
+++ b/accounts/scwallet/README.md
@@ -8,7 +8,7 @@
## Preparing the smartcard
- **WARNING: FOILLOWING THESE INSTRUCTIONS WILL DESTROY THE MASTER KEY ON YOUR CARD. ONLY PROCEED IF NO FUNDS ARE ASSOCIATED WITH THESE ACCOUNTS**
+ **WARNING: FOLLOWING THESE INSTRUCTIONS WILL DESTROY THE MASTER KEY ON YOUR CARD. ONLY PROCEED IF NO FUNDS ARE ASSOCIATED WITH THESE ACCOUNTS**
You can use status' [keycard-cli](https://github.com/status-im/keycard-cli) and you should get _at least_ version 2.1.1 of their [smartcard application](https://github.com/status-im/status-keycard/releases/download/2.2.1/keycard_v2.2.1.cap)
diff --git a/cmd/devp2p/internal/ethtest/chain_test.go b/cmd/devp2p/internal/ethtest/chain_test.go
index 67221923a..de6acfdcd 100644
--- a/cmd/devp2p/internal/ethtest/chain_test.go
+++ b/cmd/devp2p/internal/ethtest/chain_test.go
@@ -145,7 +145,7 @@ func TestChain_GetHeaders(t *testing.T) {
}{
{
req: GetBlockHeaders{
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Number: uint64(2)},
Amount: uint64(5),
Skip: 1,
@@ -162,7 +162,7 @@ func TestChain_GetHeaders(t *testing.T) {
},
{
req: GetBlockHeaders{
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)},
Amount: uint64(3),
Skip: 0,
@@ -177,7 +177,7 @@ func TestChain_GetHeaders(t *testing.T) {
},
{
req: GetBlockHeaders{
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Hash: chain.Head().Hash()},
Amount: uint64(1),
Skip: 0,
diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go
index bc901bdeb..a0339b88c 100644
--- a/cmd/devp2p/internal/ethtest/helpers.go
+++ b/cmd/devp2p/internal/ethtest/helpers.go
@@ -62,7 +62,6 @@ func (s *Suite) dial() (*Conn, error) {
}
// set default p2p capabilities
conn.caps = []p2p.Cap{
- {Name: "eth", Version: 66},
{Name: "eth", Version: 67},
{Name: "eth", Version: 68},
}
@@ -237,8 +236,8 @@ func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message {
return errorf("could not get headers for inbound header request: %v", err)
}
resp := &BlockHeaders{
- RequestId: msg.ReqID(),
- BlockHeadersPacket: eth.BlockHeadersPacket(headers),
+ RequestId: msg.ReqID(),
+ BlockHeadersRequest: eth.BlockHeadersRequest(headers),
}
if err := c.Write(resp); err != nil {
return errorf("could not write to connection: %v", err)
@@ -267,7 +266,7 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, reqID uint
if !ok {
return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg))
}
- headers := []*types.Header(resp.BlockHeadersPacket)
+ headers := []*types.Header(resp.BlockHeadersRequest)
return headers, nil
}
@@ -379,7 +378,7 @@ func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block) error {
conn.SetReadDeadline(time.Now().Add(20 * time.Second))
// create request
req := &GetBlockHeaders{
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Hash: block.Hash()},
Amount: 1,
},
@@ -604,8 +603,8 @@ func (s *Suite) hashAnnounce() error {
pretty.Sdump(blockHeaderReq))
}
err = sendConn.Write(&BlockHeaders{
- RequestId: blockHeaderReq.ReqID(),
- BlockHeadersPacket: eth.BlockHeadersPacket{nextBlock.Header()},
+ RequestId: blockHeaderReq.ReqID(),
+ BlockHeadersRequest: eth.BlockHeadersRequest{nextBlock.Header()},
})
if err != nil {
return fmt.Errorf("failed to write to connection: %v", err)
diff --git a/cmd/devp2p/internal/ethtest/snap.go b/cmd/devp2p/internal/ethtest/snap.go
index f947e4bc9..ea528e5e2 100644
--- a/cmd/devp2p/internal/ethtest/snap.go
+++ b/cmd/devp2p/internal/ethtest/snap.go
@@ -27,8 +27,8 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/protocols/snap"
"github.com/ethereum/go-ethereum/internal/utesting"
- "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -530,11 +530,11 @@ func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error {
for i, key := range hashes {
keys[i] = common.CopyBytes(key[:])
}
- nodes := make(light.NodeList, len(proof))
+ nodes := make(trienode.ProofList, len(proof))
for i, node := range proof {
nodes[i] = node
}
- proofdb := nodes.NodeSet()
+ proofdb := nodes.Set()
var end []byte
if len(keys) > 0 {
diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go
index 815353be7..0b56c8cf4 100644
--- a/cmd/devp2p/internal/ethtest/suite.go
+++ b/cmd/devp2p/internal/ethtest/suite.go
@@ -112,7 +112,7 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
}
// write request
req := &GetBlockHeaders{
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Hash: s.chain.blocks[1].Hash()},
Amount: 2,
Skip: 1,
@@ -150,7 +150,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
// create two requests
req1 := &GetBlockHeaders{
RequestId: uint64(111),
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{
Hash: s.chain.blocks[1].Hash(),
},
@@ -161,7 +161,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
}
req2 := &GetBlockHeaders{
RequestId: uint64(222),
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{
Hash: s.chain.blocks[1].Hash(),
},
@@ -201,10 +201,10 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
if err != nil {
t.Fatalf("failed to get expected headers for request 2: %v", err)
}
- if !headersMatch(expected1, headers1.BlockHeadersPacket) {
+ if !headersMatch(expected1, headers1.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
}
- if !headersMatch(expected2, headers2.BlockHeadersPacket) {
+ if !headersMatch(expected2, headers2.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
}
}
@@ -224,7 +224,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
reqID := uint64(1234)
request1 := &GetBlockHeaders{
RequestId: reqID,
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{
Number: 1,
},
@@ -233,7 +233,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
}
request2 := &GetBlockHeaders{
RequestId: reqID,
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{
Number: 33,
},
@@ -270,10 +270,10 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
if err != nil {
t.Fatalf("failed to get expected block headers: %v", err)
}
- if !headersMatch(expected1, headers1.BlockHeadersPacket) {
+ if !headersMatch(expected1, headers1.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
}
- if !headersMatch(expected2, headers2.BlockHeadersPacket) {
+ if !headersMatch(expected2, headers2.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
}
}
@@ -290,7 +290,7 @@ func (s *Suite) TestZeroRequestID(t *utesting.T) {
t.Fatalf("peering failed: %v", err)
}
req := &GetBlockHeaders{
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Number: 0},
Amount: 2,
},
@@ -322,7 +322,7 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) {
// create block bodies request
req := &GetBlockBodies{
RequestId: uint64(55),
- GetBlockBodiesPacket: eth.GetBlockBodiesPacket{
+ GetBlockBodiesRequest: eth.GetBlockBodiesRequest{
s.chain.blocks[54].Hash(),
s.chain.blocks[75].Hash(),
},
@@ -336,11 +336,11 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) {
if !ok {
t.Fatalf("unexpected: %s", pretty.Sdump(msg))
}
- bodies := resp.BlockBodiesPacket
+ bodies := resp.BlockBodiesResponse
t.Logf("received %d block bodies", len(bodies))
- if len(bodies) != len(req.GetBlockBodiesPacket) {
+ if len(bodies) != len(req.GetBlockBodiesRequest) {
t.Fatalf("wrong bodies in response: expected %d bodies, "+
- "got %d", len(req.GetBlockBodiesPacket), len(bodies))
+ "got %d", len(req.GetBlockBodiesRequest), len(bodies))
}
}
@@ -481,8 +481,8 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
hashes = append(hashes, hash)
}
getTxReq := &GetPooledTransactions{
- RequestId: 1234,
- GetPooledTransactionsPacket: hashes,
+ RequestId: 1234,
+ GetPooledTransactionsRequest: hashes,
}
if err = conn.Write(getTxReq); err != nil {
t.Fatalf("could not write to conn: %v", err)
@@ -490,7 +490,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
// check that all received transactions match those that were sent to node
switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) {
case *PooledTransactions:
- for _, gotTx := range msg.PooledTransactionsPacket {
+ for _, gotTx := range msg.PooledTransactionsResponse {
if _, exists := hashMap[gotTx.Hash()]; !exists {
t.Fatalf("unexpected tx received: %v", gotTx.Hash())
}
@@ -547,8 +547,8 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
msg := conn.readAndServe(s.chain, timeout)
switch msg := msg.(type) {
case *GetPooledTransactions:
- if len(msg.GetPooledTransactionsPacket) != len(hashes) {
- t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket))
+ if len(msg.GetPooledTransactionsRequest) != len(hashes) {
+ t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest))
}
return
diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go
index afa9a9c8c..805d7a81b 100644
--- a/cmd/devp2p/internal/ethtest/types.go
+++ b/cmd/devp2p/internal/ethtest/types.go
@@ -99,24 +99,24 @@ func (msg Transactions) Code() int { return 18 }
func (msg Transactions) ReqID() uint64 { return 18 }
// GetBlockHeaders represents a block header query.
-type GetBlockHeaders eth.GetBlockHeadersPacket66
+type GetBlockHeaders eth.GetBlockHeadersPacket
func (msg GetBlockHeaders) Code() int { return 19 }
func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId }
-type BlockHeaders eth.BlockHeadersPacket66
+type BlockHeaders eth.BlockHeadersPacket
func (msg BlockHeaders) Code() int { return 20 }
func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId }
// GetBlockBodies represents a GetBlockBodies request
-type GetBlockBodies eth.GetBlockBodiesPacket66
+type GetBlockBodies eth.GetBlockBodiesPacket
func (msg GetBlockBodies) Code() int { return 21 }
func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId }
// BlockBodies is the network packet for block content distribution.
-type BlockBodies eth.BlockBodiesPacket66
+type BlockBodies eth.BlockBodiesPacket
func (msg BlockBodies) Code() int { return 22 }
func (msg BlockBodies) ReqID() uint64 { return msg.RequestId }
@@ -128,7 +128,7 @@ func (msg NewBlock) Code() int { return 23 }
func (msg NewBlock) ReqID() uint64 { return 0 }
// NewPooledTransactionHashes66 is the network packet for the tx hash propagation message.
-type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket66
+type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket67
func (msg NewPooledTransactionHashes66) Code() int { return 24 }
func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 }
@@ -139,12 +139,12 @@ type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket68
func (msg NewPooledTransactionHashes) Code() int { return 24 }
func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 }
-type GetPooledTransactions eth.GetPooledTransactionsPacket66
+type GetPooledTransactions eth.GetPooledTransactionsPacket
func (msg GetPooledTransactions) Code() int { return 25 }
func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId }
-type PooledTransactions eth.PooledTransactionsPacket66
+type PooledTransactions eth.PooledTransactionsPacket
func (msg PooledTransactions) Code() int { return 26 }
func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId }
@@ -180,25 +180,25 @@ func (c *Conn) Read() Message {
case (Status{}).Code():
msg = new(Status)
case (GetBlockHeaders{}).Code():
- ethMsg := new(eth.GetBlockHeadersPacket66)
+ ethMsg := new(eth.GetBlockHeadersPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err)
}
return (*GetBlockHeaders)(ethMsg)
case (BlockHeaders{}).Code():
- ethMsg := new(eth.BlockHeadersPacket66)
+ ethMsg := new(eth.BlockHeadersPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err)
}
return (*BlockHeaders)(ethMsg)
case (GetBlockBodies{}).Code():
- ethMsg := new(eth.GetBlockBodiesPacket66)
+ ethMsg := new(eth.GetBlockBodiesPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err)
}
return (*GetBlockBodies)(ethMsg)
case (BlockBodies{}).Code():
- ethMsg := new(eth.BlockBodiesPacket66)
+ ethMsg := new(eth.BlockBodiesPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err)
}
@@ -217,13 +217,13 @@ func (c *Conn) Read() Message {
}
msg = new(NewPooledTransactionHashes66)
case (GetPooledTransactions{}.Code()):
- ethMsg := new(eth.GetPooledTransactionsPacket66)
+ ethMsg := new(eth.GetPooledTransactionsPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err)
}
return (*GetPooledTransactions)(ethMsg)
case (PooledTransactions{}.Code()):
- ethMsg := new(eth.PooledTransactionsPacket66)
+ ethMsg := new(eth.PooledTransactionsPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err)
}
diff --git a/cmd/evm/internal/t8ntool/block.go b/cmd/evm/internal/t8ntool/block.go
index 09dca8984..5c0e28e28 100644
--- a/cmd/evm/internal/t8ntool/block.go
+++ b/cmd/evm/internal/t8ntool/block.go
@@ -37,33 +37,38 @@ import (
//go:generate go run github.com/fjl/gencodec -type header -field-override headerMarshaling -out gen_header.go
type header struct {
- ParentHash common.Hash `json:"parentHash"`
- OmmerHash *common.Hash `json:"sha3Uncles"`
- Coinbase *common.Address `json:"miner"`
- Root common.Hash `json:"stateRoot" gencodec:"required"`
- TxHash *common.Hash `json:"transactionsRoot"`
- ReceiptHash *common.Hash `json:"receiptsRoot"`
- Bloom types.Bloom `json:"logsBloom"`
- Difficulty *big.Int `json:"difficulty"`
- Number *big.Int `json:"number" gencodec:"required"`
- GasLimit uint64 `json:"gasLimit" gencodec:"required"`
- GasUsed uint64 `json:"gasUsed"`
- Time uint64 `json:"timestamp" gencodec:"required"`
- Extra []byte `json:"extraData"`
- MixDigest common.Hash `json:"mixHash"`
- Nonce *types.BlockNonce `json:"nonce"`
- BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
- WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
+ ParentHash common.Hash `json:"parentHash"`
+ OmmerHash *common.Hash `json:"sha3Uncles"`
+ Coinbase *common.Address `json:"miner"`
+ Root common.Hash `json:"stateRoot" gencodec:"required"`
+ TxHash *common.Hash `json:"transactionsRoot"`
+ ReceiptHash *common.Hash `json:"receiptsRoot"`
+ Bloom types.Bloom `json:"logsBloom"`
+ Difficulty *big.Int `json:"difficulty"`
+ Number *big.Int `json:"number" gencodec:"required"`
+ GasLimit uint64 `json:"gasLimit" gencodec:"required"`
+ GasUsed uint64 `json:"gasUsed"`
+ Time uint64 `json:"timestamp" gencodec:"required"`
+ Extra []byte `json:"extraData"`
+ MixDigest common.Hash `json:"mixHash"`
+ Nonce *types.BlockNonce `json:"nonce"`
+ BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
+ WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
+ BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"`
+ ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"`
+ ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
}
type headerMarshaling struct {
- Difficulty *math.HexOrDecimal256
- Number *math.HexOrDecimal256
- GasLimit math.HexOrDecimal64
- GasUsed math.HexOrDecimal64
- Time math.HexOrDecimal64
- Extra hexutil.Bytes
- BaseFee *math.HexOrDecimal256
+ Difficulty *math.HexOrDecimal256
+ Number *math.HexOrDecimal256
+ GasLimit math.HexOrDecimal64
+ GasUsed math.HexOrDecimal64
+ Time math.HexOrDecimal64
+ Extra hexutil.Bytes
+ BaseFee *math.HexOrDecimal256
+ BlobGasUsed *math.HexOrDecimal64
+ ExcessBlobGas *math.HexOrDecimal64
}
type bbInput struct {
@@ -113,22 +118,25 @@ func (c *cliqueInput) UnmarshalJSON(input []byte) error {
// ToBlock converts i into a *types.Block
func (i *bbInput) ToBlock() *types.Block {
header := &types.Header{
- ParentHash: i.Header.ParentHash,
- UncleHash: types.EmptyUncleHash,
- Coinbase: common.Address{},
- Root: i.Header.Root,
- TxHash: types.EmptyTxsHash,
- ReceiptHash: types.EmptyReceiptsHash,
- Bloom: i.Header.Bloom,
- Difficulty: common.Big0,
- Number: i.Header.Number,
- GasLimit: i.Header.GasLimit,
- GasUsed: i.Header.GasUsed,
- Time: i.Header.Time,
- Extra: i.Header.Extra,
- MixDigest: i.Header.MixDigest,
- BaseFee: i.Header.BaseFee,
- WithdrawalsHash: i.Header.WithdrawalsHash,
+ ParentHash: i.Header.ParentHash,
+ UncleHash: types.EmptyUncleHash,
+ Coinbase: common.Address{},
+ Root: i.Header.Root,
+ TxHash: types.EmptyTxsHash,
+ ReceiptHash: types.EmptyReceiptsHash,
+ Bloom: i.Header.Bloom,
+ Difficulty: common.Big0,
+ Number: i.Header.Number,
+ GasLimit: i.Header.GasLimit,
+ GasUsed: i.Header.GasUsed,
+ Time: i.Header.Time,
+ Extra: i.Header.Extra,
+ MixDigest: i.Header.MixDigest,
+ BaseFee: i.Header.BaseFee,
+ WithdrawalsHash: i.Header.WithdrawalsHash,
+ BlobGasUsed: i.Header.BlobGasUsed,
+ ExcessBlobGas: i.Header.ExcessBlobGas,
+ ParentBeaconRoot: i.Header.ParentBeaconBlockRoot,
}
// Fill optional values.
@@ -150,7 +158,7 @@ func (i *bbInput) ToBlock() *types.Block {
if i.Header.Nonce != nil {
header.Nonce = *i.Header.Nonce
}
- if header.Difficulty != nil {
+ if i.Header.Difficulty != nil {
header.Difficulty = i.Header.Difficulty
}
return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers).WithWithdrawals(i.Withdrawals)
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index bb14ac63c..312f427d4 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -59,7 +59,7 @@ type ExecutionResult struct {
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
- CurrentBlobGasUsed *math.HexOrDecimal64 `json:"currentBlobGasUsed,omitempty"`
+ CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"`
}
type ommer struct {
@@ -85,7 +85,7 @@ type stEnv struct {
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
BaseFee *big.Int `json:"currentBaseFee,omitempty"`
ParentUncleHash common.Hash `json:"parentUncleHash"`
- ExcessBlobGas *uint64 `json:"excessBlobGas,omitempty"`
+ ExcessBlobGas *uint64 `json:"currentExcessBlobGas,omitempty"`
ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"`
ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
@@ -163,17 +163,19 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
rnd := common.BigToHash(pre.Env.Random)
vmContext.Random = &rnd
}
- // If excessBlobGas is defined, add it to the vmContext.
+ // Calculate the BlobBaseFee
+ var excessBlobGas uint64
if pre.Env.ExcessBlobGas != nil {
- vmContext.ExcessBlobGas = pre.Env.ExcessBlobGas
+ excessBlobGas := *pre.Env.ExcessBlobGas
+ vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas)
} else {
// If it is not explicitly defined, but we have the parent values, we try
// to calculate it ourselves.
parentExcessBlobGas := pre.Env.ParentExcessBlobGas
parentBlobGasUsed := pre.Env.ParentBlobGasUsed
if parentExcessBlobGas != nil && parentBlobGasUsed != nil {
- excessBlobGas := eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed)
- vmContext.ExcessBlobGas = &excessBlobGas
+ excessBlobGas = eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed)
+ vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas)
}
}
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
@@ -189,12 +191,15 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
}
var blobGasUsed uint64
for i, tx := range txs {
- if tx.Type() == types.BlobTxType && vmContext.ExcessBlobGas == nil {
+ if tx.Type() == types.BlobTxType && vmContext.BlobBaseFee == nil {
errMsg := "blob tx used but field env.ExcessBlobGas missing"
log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", errMsg)
rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg})
continue
}
+ if tx.Type() == types.BlobTxType {
+ blobGasUsed += uint64(params.BlobTxBlobGasPerBlob * len(tx.BlobHashes()))
+ }
msg, err := core.TransactionToMessage(tx, signer, pre.Env.BaseFee)
if err != nil {
log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", err)
@@ -224,9 +229,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
gaspool.SetGas(prevGas)
continue
}
- if tx.Type() == types.BlobTxType {
- blobGasUsed += params.BlobTxBlobGasPerBlob
- }
includedTxs = append(includedTxs, tx)
if hashError != nil {
return nil, nil, NewError(ErrorMissingBlockhash, hashError)
@@ -322,8 +324,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
h := types.DeriveSha(types.Withdrawals(pre.Env.Withdrawals), trie.NewStackTrie(nil))
execRs.WithdrawalsRoot = &h
}
- if vmContext.ExcessBlobGas != nil {
- execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(vmContext.ExcessBlobGas)
+ if vmContext.BlobBaseFee != nil {
+ execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(&excessBlobGas)
execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed)
}
// Re-create statedb instance with new root upon the updated database
diff --git a/cmd/evm/internal/t8ntool/gen_header.go b/cmd/evm/internal/t8ntool/gen_header.go
index 76228394d..a8c866897 100644
--- a/cmd/evm/internal/t8ntool/gen_header.go
+++ b/cmd/evm/internal/t8ntool/gen_header.go
@@ -18,23 +18,26 @@ var _ = (*headerMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (h header) MarshalJSON() ([]byte, error) {
type header struct {
- ParentHash common.Hash `json:"parentHash"`
- OmmerHash *common.Hash `json:"sha3Uncles"`
- Coinbase *common.Address `json:"miner"`
- Root common.Hash `json:"stateRoot" gencodec:"required"`
- TxHash *common.Hash `json:"transactionsRoot"`
- ReceiptHash *common.Hash `json:"receiptsRoot"`
- Bloom types.Bloom `json:"logsBloom"`
- Difficulty *math.HexOrDecimal256 `json:"difficulty"`
- Number *math.HexOrDecimal256 `json:"number" gencodec:"required"`
- GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
- GasUsed math.HexOrDecimal64 `json:"gasUsed"`
- Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"`
- Extra hexutil.Bytes `json:"extraData"`
- MixDigest common.Hash `json:"mixHash"`
- Nonce *types.BlockNonce `json:"nonce"`
- BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
- WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
+ ParentHash common.Hash `json:"parentHash"`
+ OmmerHash *common.Hash `json:"sha3Uncles"`
+ Coinbase *common.Address `json:"miner"`
+ Root common.Hash `json:"stateRoot" gencodec:"required"`
+ TxHash *common.Hash `json:"transactionsRoot"`
+ ReceiptHash *common.Hash `json:"receiptsRoot"`
+ Bloom types.Bloom `json:"logsBloom"`
+ Difficulty *math.HexOrDecimal256 `json:"difficulty"`
+ Number *math.HexOrDecimal256 `json:"number" gencodec:"required"`
+ GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
+ GasUsed math.HexOrDecimal64 `json:"gasUsed"`
+ Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"`
+ Extra hexutil.Bytes `json:"extraData"`
+ MixDigest common.Hash `json:"mixHash"`
+ Nonce *types.BlockNonce `json:"nonce"`
+ BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
+ WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
+ BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"`
+ ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"`
+ ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
}
var enc header
enc.ParentHash = h.ParentHash
@@ -54,29 +57,35 @@ func (h header) MarshalJSON() ([]byte, error) {
enc.Nonce = h.Nonce
enc.BaseFee = (*math.HexOrDecimal256)(h.BaseFee)
enc.WithdrawalsHash = h.WithdrawalsHash
+ enc.BlobGasUsed = (*math.HexOrDecimal64)(h.BlobGasUsed)
+ enc.ExcessBlobGas = (*math.HexOrDecimal64)(h.ExcessBlobGas)
+ enc.ParentBeaconBlockRoot = h.ParentBeaconBlockRoot
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (h *header) UnmarshalJSON(input []byte) error {
type header struct {
- ParentHash *common.Hash `json:"parentHash"`
- OmmerHash *common.Hash `json:"sha3Uncles"`
- Coinbase *common.Address `json:"miner"`
- Root *common.Hash `json:"stateRoot" gencodec:"required"`
- TxHash *common.Hash `json:"transactionsRoot"`
- ReceiptHash *common.Hash `json:"receiptsRoot"`
- Bloom *types.Bloom `json:"logsBloom"`
- Difficulty *math.HexOrDecimal256 `json:"difficulty"`
- Number *math.HexOrDecimal256 `json:"number" gencodec:"required"`
- GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
- GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
- Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"`
- Extra *hexutil.Bytes `json:"extraData"`
- MixDigest *common.Hash `json:"mixHash"`
- Nonce *types.BlockNonce `json:"nonce"`
- BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
- WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
+ ParentHash *common.Hash `json:"parentHash"`
+ OmmerHash *common.Hash `json:"sha3Uncles"`
+ Coinbase *common.Address `json:"miner"`
+ Root *common.Hash `json:"stateRoot" gencodec:"required"`
+ TxHash *common.Hash `json:"transactionsRoot"`
+ ReceiptHash *common.Hash `json:"receiptsRoot"`
+ Bloom *types.Bloom `json:"logsBloom"`
+ Difficulty *math.HexOrDecimal256 `json:"difficulty"`
+ Number *math.HexOrDecimal256 `json:"number" gencodec:"required"`
+ GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
+ GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
+ Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"`
+ Extra *hexutil.Bytes `json:"extraData"`
+ MixDigest *common.Hash `json:"mixHash"`
+ Nonce *types.BlockNonce `json:"nonce"`
+ BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
+ WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
+ BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"`
+ ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"`
+ ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
}
var dec header
if err := json.Unmarshal(input, &dec); err != nil {
@@ -137,5 +146,14 @@ func (h *header) UnmarshalJSON(input []byte) error {
if dec.WithdrawalsHash != nil {
h.WithdrawalsHash = dec.WithdrawalsHash
}
+ if dec.BlobGasUsed != nil {
+ h.BlobGasUsed = (*uint64)(dec.BlobGasUsed)
+ }
+ if dec.ExcessBlobGas != nil {
+ h.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
+ }
+ if dec.ParentBeaconBlockRoot != nil {
+ h.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot
+ }
return nil
}
diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go
index bb195ef64..d47db4a87 100644
--- a/cmd/evm/internal/t8ntool/gen_stenv.go
+++ b/cmd/evm/internal/t8ntool/gen_stenv.go
@@ -33,7 +33,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
ParentUncleHash common.Hash `json:"parentUncleHash"`
- ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"`
+ ExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
@@ -81,7 +81,7 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
ParentUncleHash *common.Hash `json:"parentUncleHash"`
- ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"`
+ ExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go
index 396b341d2..600bc460f 100644
--- a/cmd/evm/internal/t8ntool/transition.go
+++ b/cmd/evm/internal/t8ntool/transition.go
@@ -334,7 +334,7 @@ func loadTransactions(txStr string, inputData *input, env stEnv, chainConfig *pa
txsWithKeys = inputData.Txs
}
// We may have to sign the transactions.
- signer := types.MakeSigner(chainConfig, big.NewInt(int64(env.Number)), env.Timestamp)
+ signer := types.LatestSignerForChainID(chainConfig.ChainID)
return signUnsignedTransactions(txsWithKeys, signer)
}
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index 017388efb..45fc98535 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -123,7 +123,8 @@ func runCmd(ctx *cli.Context) error {
sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver"))
preimages = ctx.Bool(DumpFlag.Name)
- blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests
+ blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests
+ blobBaseFee = new(big.Int) // TODO (MariusVanDerWijden) implement blob fee in state tests
)
if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(logconfig, os.Stdout)
@@ -221,6 +222,7 @@ func runCmd(ctx *cli.Context) error {
Coinbase: genesisConfig.Coinbase,
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
BlobHashes: blobHashes,
+ BlobBaseFee: blobBaseFee,
EVMConfig: vm.Config{
Tracer: tracer,
},
diff --git a/cmd/evm/testdata/28/env.json b/cmd/evm/testdata/28/env.json
index 5056fe29a..82f22ac62 100644
--- a/cmd/evm/testdata/28/env.json
+++ b/cmd/evm/testdata/28/env.json
@@ -9,8 +9,7 @@
"parentDifficulty" : "0x00",
"parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"currentRandom" : "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
- "withdrawals" : [
- ],
+ "withdrawals" : [],
"parentBaseFee" : "0x0a",
"parentGasUsed" : "0x00",
"parentGasLimit" : "0x7fffffffffffffff",
@@ -20,4 +19,4 @@
"0" : "0x3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6"
},
"parentBeaconBlockRoot": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00"
-}
\ No newline at end of file
+}
diff --git a/cmd/evm/testdata/28/exp.json b/cmd/evm/testdata/28/exp.json
index a55ce0aec..75c715e97 100644
--- a/cmd/evm/testdata/28/exp.json
+++ b/cmd/evm/testdata/28/exp.json
@@ -42,6 +42,6 @@
"currentBaseFee": "0x9",
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"currentExcessBlobGas": "0x0",
- "currentBlobGasUsed": "0x20000"
+ "blobGasUsed": "0x20000"
}
-}
\ No newline at end of file
+}
diff --git a/cmd/evm/testdata/29/alloc.json b/cmd/evm/testdata/29/alloc.json
index 70d47862a..d2c879a45 100644
--- a/cmd/evm/testdata/29/alloc.json
+++ b/cmd/evm/testdata/29/alloc.json
@@ -6,7 +6,7 @@
"storage" : {
}
},
- "0xbEac00dDB15f3B6d645C48263dC93862413A222D" : {
+ "0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02" : {
"balance" : "0x1",
"code" : "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500",
"nonce" : "0x00",
diff --git a/cmd/evm/testdata/29/exp.json b/cmd/evm/testdata/29/exp.json
index 16a881777..c4c001ec1 100644
--- a/cmd/evm/testdata/29/exp.json
+++ b/cmd/evm/testdata/29/exp.json
@@ -1,6 +1,6 @@
{
"alloc": {
- "0xbeac00ddb15f3b6d645c48263dc93862413a222d": {
+ "0x000f3df6d732807ef1319fb7b8bb8522d0beac02": {
"code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500",
"storage": {
"0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e",
@@ -14,7 +14,7 @@
}
},
"result": {
- "stateRoot": "0x2db9f6bc233e8fd0af2d8023404493a19b37d9d69ace71f4e73158851fced574",
+ "stateRoot": "0x19a4f821a7c0a6f4c934f9acb0fe9ce5417b68086e12513ecbc3e3f57e01573c",
"txRoot": "0x248074fabe112f7d93917f292b64932394f835bb98da91f21501574d58ec92ab",
"receiptsRoot": "0xf78dfb743fbd92ade140711c8bbc542b5e307f0ab7984eff35d751969fe57efa",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
@@ -40,6 +40,6 @@
"currentBaseFee": "0x9",
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"currentExcessBlobGas": "0x0",
- "currentBlobGasUsed": "0x0"
+ "blobGasUsed": "0x0"
}
-}
\ No newline at end of file
+}
diff --git a/cmd/evm/testdata/29/readme.md b/cmd/evm/testdata/29/readme.md
index 4383e328e..ab02ce9cf 100644
--- a/cmd/evm/testdata/29/readme.md
+++ b/cmd/evm/testdata/29/readme.md
@@ -1,29 +1,29 @@
## EIP 4788
This test contains testcases for EIP-4788. The 4788-contract is
-located at address `0xbeac00ddb15f3b6d645c48263dc93862413a222d`, and this test executes a simple transaction. It also
+located at address `0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02`, and this test executes a simple transaction. It also
implicitly invokes the system tx, which sets calls the contract and sets the
storage values
+
```
$ dir=./testdata/29/ && go run . t8n --state.fork=Cancun --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout
-INFO [08-15|20:07:56.335] Trie dumping started root=ecde45..2af8a7
-INFO [08-15|20:07:56.335] Trie dumping complete accounts=2 elapsed="225.848µs"
-INFO [08-15|20:07:56.335] Wrote file file=result.json
+INFO [09-27|15:34:53.049] Trie dumping started root=19a4f8..01573c
+INFO [09-27|15:34:53.049] Trie dumping complete accounts=2 elapsed="192.759µs"
+INFO [09-27|15:34:53.050] Wrote file file=result.json
{
"alloc": {
- "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
- "balance": "0x16345785d871db8",
- "nonce": "0x1"
- },
- "0xbeac00541d49391ed88abf392bfc1f4dea8c4143": {
+ "0x000f3df6d732807ef1319fb7b8bb8522d0beac02": {
"code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500",
"storage": {
"0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e",
"0x000000000000000000000000000000000000000000000000000000000001879e": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00"
},
- "balance": "0x
+ "balance": "0x1"
+ },
+ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "0x16345785d871db8",
+ "nonce": "0x1"
}
}
}
-
```
diff --git a/cmd/evm/testdata/9/readme.md b/cmd/evm/testdata/9/readme.md
index 539478028..357e20068 100644
--- a/cmd/evm/testdata/9/readme.md
+++ b/cmd/evm/testdata/9/readme.md
@@ -1,6 +1,6 @@
## EIP-1559 testing
-This test contains testcases for EIP-1559, which uses an new transaction type and has a new block parameter.
+This test contains testcases for EIP-1559, which uses a new transaction type and has a new block parameter.
### Prestate
diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go
index aebcc29eb..a6bb2c2d2 100644
--- a/cmd/geth/chaincmd.go
+++ b/cmd/geth/chaincmd.go
@@ -474,7 +474,7 @@ func dump(ctx *cli.Context) error {
if err != nil {
return err
}
- triedb := utils.MakeTrieDatabase(ctx, db, true, false) // always enable preimage lookup
+ triedb := utils.MakeTrieDatabase(ctx, db, true, true) // always enable preimage lookup
defer triedb.Close()
state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)
diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index a5d628d8a..027dac7bd 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -32,6 +32,8 @@ import (
"github.com/ethereum/go-ethereum/accounts/scwallet"
"github.com/ethereum/go-ethereum/accounts/usbwallet"
"github.com/ethereum/go-ethereum/cmd/utils"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
@@ -199,17 +201,18 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
if ctx.IsSet(utils.GraphQLEnabledFlag.Name) {
utils.RegisterGraphQLService(stack, backend, filterSystem, &cfg.Node)
}
-
// Add the Ethereum Stats daemon if requested.
if cfg.Ethstats.URL != "" {
utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL)
}
-
// Configure full-sync tester service if requested
- if ctx.IsSet(utils.SyncTargetFlag.Name) && cfg.Eth.SyncMode == downloader.FullSync {
- utils.RegisterFullSyncTester(stack, eth, ctx.Path(utils.SyncTargetFlag.Name))
+ if ctx.IsSet(utils.SyncTargetFlag.Name) {
+ hex := hexutil.MustDecode(ctx.String(utils.SyncTargetFlag.Name))
+ if len(hex) != common.HashLength {
+ utils.Fatalf("invalid sync target length: have %d, want %d", len(hex), common.HashLength)
+ }
+ utils.RegisterFullSyncTester(stack, eth, common.BytesToHash(hex))
}
-
// Start the dev mode if requested, or launch the engine API for
// interacting with external consensus client.
if ctx.IsSet(utils.DeveloperFlag.Name) {
diff --git a/cmd/geth/genesis_test.go b/cmd/geth/genesis_test.go
index 2506b42d1..ffe8176b0 100644
--- a/cmd/geth/genesis_test.go
+++ b/cmd/geth/genesis_test.go
@@ -176,12 +176,12 @@ func TestCustomBackend(t *testing.T) {
{ // Can't start pebble on top of leveldb
initArgs: []string{"--db.engine", "leveldb"},
execArgs: []string{"--db.engine", "pebble"},
- execExpect: `Fatal: Could not open database: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`,
+ execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`,
},
{ // Can't start leveldb on top of pebble
initArgs: []string{"--db.engine", "pebble"},
execArgs: []string{"--db.engine", "leveldb"},
- execExpect: `Fatal: Could not open database: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`,
+ execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`,
},
{ // Reject invalid backend choice
initArgs: []string{"--db.engine", "mssql"},
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 055c7a1c5..d9ac892e5 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -18,7 +18,6 @@
package utils
import (
- "bytes"
"context"
"crypto/ecdsa"
"encoding/hex"
@@ -39,11 +38,9 @@ import (
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/fdlimit"
- "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
- "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
@@ -72,7 +69,6 @@ import (
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
@@ -280,7 +276,6 @@ var (
StateSchemeFlag = &cli.StringFlag{
Name: "state.scheme",
Usage: "Scheme to use for storing ethereum state ('hash' or 'path')",
- Value: rawdb.HashScheme,
Category: flags.StateCategory,
}
StateHistoryFlag = &cli.Uint64Flag{
@@ -603,9 +598,9 @@ var (
}
// MISC settings
- SyncTargetFlag = &cli.PathFlag{
+ SyncTargetFlag = &cli.StringFlag{
Name: "synctarget",
- Usage: `File for containing the hex-encoded block-rlp as sync target(dev feature)`,
+ Usage: `Hash of the block to full sync to (dev testing feature)`,
TakesFile: true,
Category: flags.MiscCategory,
}
@@ -1699,7 +1694,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc))
godebug.SetGCPercent(int(gogc))
- if ctx.IsSet(SyncModeFlag.Name) {
+ if ctx.IsSet(SyncTargetFlag.Name) {
+ cfg.SyncMode = downloader.FullSync // dev sync target forces full sync
+ } else if ctx.IsSet(SyncModeFlag.Name) {
cfg.SyncMode = *flags.GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode)
}
if ctx.IsSet(NetworkIdFlag.Name) {
@@ -1731,15 +1728,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(StateHistoryFlag.Name) {
cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name)
}
- // Parse state scheme, abort the process if it's not compatible.
- chaindb := tryMakeReadOnlyDatabase(ctx, stack)
- scheme, err := ParseStateScheme(ctx, chaindb)
- chaindb.Close()
- if err != nil {
- Fatalf("%v", err)
+ if ctx.IsSet(StateSchemeFlag.Name) {
+ cfg.StateScheme = ctx.String(StateSchemeFlag.Name)
}
- cfg.StateScheme = scheme
-
// Parse transaction history flag, if user is still using legacy config
// file with 'TxLookupLimit' configured, copy the value to 'TransactionHistory'.
if cfg.TransactionHistory == ethconfig.Defaults.TransactionHistory && cfg.TxLookupLimit != ethconfig.Defaults.TxLookupLimit {
@@ -1984,21 +1975,9 @@ func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconf
}
// RegisterFullSyncTester adds the full-sync tester service into node.
-func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, path string) {
- blob, err := os.ReadFile(path)
- if err != nil {
- Fatalf("Failed to read block file: %v", err)
- }
- rlpBlob, err := hexutil.Decode(string(bytes.TrimRight(blob, "\r\n")))
- if err != nil {
- Fatalf("Failed to decode block blob: %v", err)
- }
- var block types.Block
- if err := rlp.DecodeBytes(rlpBlob, &block); err != nil {
- Fatalf("Failed to decode block: %v", err)
- }
- catalyst.RegisterFullSyncTester(stack, eth, &block)
- log.Info("Registered full-sync tester", "number", block.NumberU64(), "hash", block.Hash())
+func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, target common.Hash) {
+ catalyst.RegisterFullSyncTester(stack, eth, target)
+ log.Info("Registered full-sync tester", "hash", target)
}
func SetupMetrics(ctx *cli.Context) {
@@ -2187,7 +2166,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
}
- scheme, err := ParseStateScheme(ctx, chainDb)
+ scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), chainDb)
if err != nil {
Fatalf("%v", err)
}
@@ -2246,47 +2225,12 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
return preloads
}
-// ParseStateScheme resolves scheme identifier from CLI flag. If the provided
-// state scheme is not compatible with the one of persistent scheme, an error
-// will be returned.
-//
-// - none: use the scheme consistent with persistent state, or fallback
-// to hash-based scheme if state is empty.
-// - hash: use hash-based scheme or error out if not compatible with
-// persistent state scheme.
-// - path: use path-based scheme or error out if not compatible with
-// persistent state scheme.
-func ParseStateScheme(ctx *cli.Context, disk ethdb.Database) (string, error) {
- // If state scheme is not specified, use the scheme consistent
- // with persistent state, or fallback to hash mode if database
- // is empty.
- stored := rawdb.ReadStateScheme(disk)
- if !ctx.IsSet(StateSchemeFlag.Name) {
- if stored == "" {
- // use default scheme for empty database, flip it when
- // path mode is chosen as default
- log.Info("State schema set to default", "scheme", "hash")
- return rawdb.HashScheme, nil
- }
- log.Info("State scheme set to already existing", "scheme", stored)
- return stored, nil // reuse scheme of persistent scheme
- }
- // If state scheme is specified, ensure it's compatible with
- // persistent state.
- scheme := ctx.String(StateSchemeFlag.Name)
- if stored == "" || scheme == stored {
- log.Info("State scheme set by user", "scheme", scheme)
- return scheme, nil
- }
- return "", fmt.Errorf("incompatible state scheme, stored: %s, provided: %s", stored, scheme)
-}
-
// MakeTrieDatabase constructs a trie database based on the configured scheme.
func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool) *trie.Database {
config := &trie.Config{
Preimages: preimage,
}
- scheme, err := ParseStateScheme(ctx, disk)
+ scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), disk)
if err != nil {
Fatalf("%v", err)
}
diff --git a/common/types.go b/common/types.go
index bf74e4371..7184b2b11 100644
--- a/common/types.go
+++ b/common/types.go
@@ -239,9 +239,6 @@ func (a Address) Cmp(other Address) int {
// Bytes gets the string representation of the underlying address.
func (a Address) Bytes() []byte { return a[:] }
-// Hash converts an address to a hash by left-padding it with zeros.
-func (a Address) Hash() Hash { return BytesToHash(a[:]) }
-
// Big converts an address to a big integer.
func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) }
diff --git a/core/asm/asm.go b/core/asm/asm.go
index 7c1e14ec0..294eb6ffa 100644
--- a/core/asm/asm.go
+++ b/core/asm/asm.go
@@ -34,7 +34,7 @@ type instructionIterator struct {
started bool
}
-// NewInstructionIterator create a new instruction iterator.
+// NewInstructionIterator creates a new instruction iterator.
func NewInstructionIterator(code []byte) *instructionIterator {
it := new(instructionIterator)
it.code = code
diff --git a/core/asm/compiler.go b/core/asm/compiler.go
index 75bf726c9..02c589b2c 100644
--- a/core/asm/compiler.go
+++ b/core/asm/compiler.go
@@ -49,7 +49,7 @@ func NewCompiler(debug bool) *Compiler {
}
}
-// Feed feeds tokens in to ch and are interpreted by
+// Feed feeds tokens into ch and are interpreted by
// the compiler.
//
// feed is the first pass in the compile stage as it collects the used labels in the
diff --git a/core/asm/lex_test.go b/core/asm/lex_test.go
index 173031521..1e62d776d 100644
--- a/core/asm/lex_test.go
+++ b/core/asm/lex_test.go
@@ -72,12 +72,12 @@ func TestLexer(t *testing.T) {
input: "@label123",
tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}},
},
- // comment after label
+ // Comment after label
{
input: "@label123 ;; comment",
tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}},
},
- // comment after instruction
+ // Comment after instruction
{
input: "push 3 ;; comment\nadd",
tokens: []token{{typ: lineStart}, {typ: element, text: "push"}, {typ: number, text: "3"}, {typ: lineEnd, text: "\n"}, {typ: lineStart, lineno: 1}, {typ: element, lineno: 1, text: "add"}, {typ: eof, lineno: 1}},
diff --git a/core/blockchain.go b/core/blockchain.go
index f28e4ca71..9b59b7da5 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -576,7 +576,7 @@ func (bc *BlockChain) SetHead(head uint64) error {
header := bc.CurrentBlock()
block := bc.GetBlock(header.Hash(), header.Number.Uint64())
if block == nil {
- // This should never happen. In practice, previsouly currentBlock
+ // This should never happen. In practice, previously currentBlock
// contained the entire block whereas now only a "marker", so there
// is an ever so slight chance for a race we should handle.
log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
@@ -598,7 +598,7 @@ func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error {
header := bc.CurrentBlock()
block := bc.GetBlock(header.Hash(), header.Number.Uint64())
if block == nil {
- // This should never happen. In practice, previsouly currentBlock
+ // This should never happen. In practice, previously currentBlock
// contained the entire block whereas now only a "marker", so there
// is an ever so slight chance for a race we should handle.
log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
@@ -982,7 +982,7 @@ func (bc *BlockChain) stopWithoutSaving() {
func (bc *BlockChain) Stop() {
bc.stopWithoutSaving()
- // Ensure that the entirety of the state snapshot is journalled to disk.
+ // Ensure that the entirety of the state snapshot is journaled to disk.
var snapBase common.Hash
if bc.snaps != nil {
var err error
@@ -1193,7 +1193,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients)
// range. In this case, all tx indices of newly imported blocks should be
// generated.
- var batch = bc.db.NewBatch()
+ batch := bc.db.NewBatch()
for i, block := range blockChain {
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
rawdb.WriteTxLookupEntriesByBlock(batch, block)
@@ -2618,7 +2618,7 @@ func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) {
bc.flushInterval.Store(int64(interval))
}
-// GetTrieFlushInterval gets the in-memroy tries flush interval
+// GetTrieFlushInterval gets the in-memory tries flush interval
func (bc *BlockChain) GetTrieFlushInterval() time.Duration {
return time.Duration(bc.flushInterval.Load())
}
diff --git a/core/bloombits/matcher.go b/core/bloombits/matcher.go
index cf799c832..6a4cfb23d 100644
--- a/core/bloombits/matcher.go
+++ b/core/bloombits/matcher.go
@@ -58,7 +58,7 @@ type partialMatches struct {
// bit with the given number of fetch elements, or a response for such a request.
// It can also have the actual results set to be used as a delivery data struct.
//
-// The contest and error fields are used by the light client to terminate matching
+// The context and error fields are used by the light client to terminate matching
// early if an error is encountered on some path of the pipeline.
type Retrieval struct {
Bit uint
@@ -389,7 +389,7 @@ func (m *Matcher) distributor(dist chan *request, session *MatcherSession) {
shutdown = session.quit // Shutdown request channel, will gracefully wait for pending requests
)
- // assign is a helper method fo try to assign a pending bit an actively
+ // assign is a helper method to try to assign a pending bit an actively
// listening servicer, or schedule it up for later when one arrives.
assign := func(bit uint) {
select {
diff --git a/core/bloombits/matcher_test.go b/core/bloombits/matcher_test.go
index 36764c3f1..7f3d5f279 100644
--- a/core/bloombits/matcher_test.go
+++ b/core/bloombits/matcher_test.go
@@ -85,7 +85,7 @@ func TestMatcherRandom(t *testing.T) {
}
// Tests that the matcher can properly find matches if the starting block is
-// shifter from a multiple of 8. This is needed to cover an optimisation with
+// shifted from a multiple of 8. This is needed to cover an optimisation with
// bitset matching https://github.com/ethereum/go-ethereum/issues/15309.
func TestMatcherShifted(t *testing.T) {
t.Parallel()
@@ -106,7 +106,7 @@ func TestWildcardMatcher(t *testing.T) {
testMatcherBothModes(t, nil, 0, 10000, 0)
}
-// makeRandomIndexes generates a random filter system, composed on multiple filter
+// makeRandomIndexes generates a random filter system, composed of multiple filter
// criteria, each having one bloom list component for the address and arbitrarily
// many topic bloom list components.
func makeRandomIndexes(lengths []int, max int) [][]bloomIndexes {
diff --git a/core/chain_makers.go b/core/chain_makers.go
index c9c880dd6..3608329a1 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -88,11 +88,6 @@ func (b *BlockGen) SetPoS() {
b.header.Difficulty = new(big.Int)
}
-// SetBlobGas sets the data gas used by the blob in the generated block.
-func (b *BlockGen) SetBlobGas(blobGasUsed uint64) {
- b.header.BlobGasUsed = &blobGasUsed
-}
-
// addTx adds a transaction to the generated block. If no coinbase has
// been set, the block's coinbase is set to the zero address.
//
@@ -111,6 +106,9 @@ func (b *BlockGen) addTx(bc *BlockChain, vmConfig vm.Config, tx *types.Transacti
}
b.txs = append(b.txs, tx)
b.receipts = append(b.receipts, receipt)
+ if b.header.BlobGasUsed != nil {
+ *b.header.BlobGasUsed += receipt.BlobGasUsed
+ }
}
// AddTx adds a transaction to the generated block. If no coinbase has
diff --git a/core/evm.go b/core/evm.go
index 104f2c09d..46dcb3146 100644
--- a/core/evm.go
+++ b/core/evm.go
@@ -21,6 +21,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
)
@@ -40,6 +41,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
var (
beneficiary common.Address
baseFee *big.Int
+ blobBaseFee *big.Int
random *common.Hash
)
@@ -52,21 +54,24 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
if header.BaseFee != nil {
baseFee = new(big.Int).Set(header.BaseFee)
}
+ if header.ExcessBlobGas != nil {
+ blobBaseFee = eip4844.CalcBlobFee(*header.ExcessBlobGas)
+ }
if header.Difficulty.Cmp(common.Big0) == 0 {
random = &header.MixDigest
}
return vm.BlockContext{
- CanTransfer: CanTransfer,
- Transfer: Transfer,
- GetHash: GetHashFn(header, chain),
- Coinbase: beneficiary,
- BlockNumber: new(big.Int).Set(header.Number),
- Time: header.Time,
- Difficulty: new(big.Int).Set(header.Difficulty),
- BaseFee: baseFee,
- GasLimit: header.GasLimit,
- Random: random,
- ExcessBlobGas: header.ExcessBlobGas,
+ CanTransfer: CanTransfer,
+ Transfer: Transfer,
+ GetHash: GetHashFn(header, chain),
+ Coinbase: beneficiary,
+ BlockNumber: new(big.Int).Set(header.Number),
+ Time: header.Time,
+ Difficulty: new(big.Int).Set(header.Difficulty),
+ BaseFee: baseFee,
+ BlobBaseFee: blobBaseFee,
+ GasLimit: header.GasLimit,
+ Random: random,
}
}
diff --git a/core/genesis.go b/core/genesis.go
index baace3f99..0f1e8baf4 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -120,8 +120,8 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
return nil
}
-// deriveHash computes the state root according to the genesis specification.
-func (ga *GenesisAlloc) deriveHash() (common.Hash, error) {
+// hash computes the state root according to the genesis specification.
+func (ga *GenesisAlloc) hash() (common.Hash, error) {
// Create an ephemeral in-memory database for computing hash,
// all the derived states will be discarded to not pollute disk.
db := state.NewDatabase(rawdb.NewMemoryDatabase())
@@ -142,9 +142,9 @@ func (ga *GenesisAlloc) deriveHash() (common.Hash, error) {
return statedb.Commit(0, false)
}
-// flush is very similar with deriveHash, but the main difference is
-// all the generated states will be persisted into the given database.
-// Also, the genesis state specification will be flushed as well.
+// flush is very similar with hash, but the main difference is all the generated
+// states will be persisted into the given database. Also, the genesis state
+// specification will be flushed as well.
func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error {
statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil)
if err != nil {
@@ -179,39 +179,6 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhas
return nil
}
-// CommitGenesisState loads the stored genesis state with the given block
-// hash and commits it into the provided trie database.
-func CommitGenesisState(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error {
- var alloc GenesisAlloc
- blob := rawdb.ReadGenesisStateSpec(db, blockhash)
- if len(blob) != 0 {
- if err := alloc.UnmarshalJSON(blob); err != nil {
- return err
- }
- } else {
- // Genesis allocation is missing and there are several possibilities:
- // the node is legacy which doesn't persist the genesis allocation or
- // the persisted allocation is just lost.
- // - supported networks(mainnet, testnets), recover with defined allocations
- // - private network, can't recover
- var genesis *Genesis
- switch blockhash {
- case params.MainnetGenesisHash:
- genesis = DefaultGenesisBlock()
- case params.GoerliGenesisHash:
- genesis = DefaultGoerliGenesisBlock()
- case params.SepoliaGenesisHash:
- genesis = DefaultSepoliaGenesisBlock()
- }
- if genesis != nil {
- alloc = genesis.Alloc
- } else {
- return errors.New("not found")
- }
- }
- return alloc.flush(db, triedb, blockhash)
-}
-
// GenesisAccount is an account in the state of the genesis block.
type GenesisAccount struct {
Code []byte `json:"code,omitempty"`
@@ -444,7 +411,7 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
// ToBlock returns the genesis block according to genesis specification.
func (g *Genesis) ToBlock() *types.Block {
- root, err := g.Alloc.deriveHash()
+ root, err := g.Alloc.hash()
if err != nil {
panic(err)
}
diff --git a/core/genesis_test.go b/core/genesis_test.go
index 6a0f2df08..fac88ff37 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -231,7 +231,7 @@ func TestReadWriteGenesisAlloc(t *testing.T) {
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
{2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}},
}
- hash, _ = alloc.deriveHash()
+ hash, _ = alloc.hash()
)
blob, _ := json.Marshal(alloc)
rawdb.WriteGenesisStateSpec(db, hash, blob)
diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go
index ea437b811..78f1a70b1 100644
--- a/core/rawdb/accessors_trie.go
+++ b/core/rawdb/accessors_trie.go
@@ -305,3 +305,38 @@ func ReadStateScheme(db ethdb.Reader) string {
}
return HashScheme
}
+
+// ParseStateScheme checks if the specified state scheme is compatible with
+// the stored state.
+//
+// - If the provided scheme is none, use the scheme consistent with persistent
+// state, or fallback to hash-based scheme if state is empty.
+//
+// - If the provided scheme is hash, use hash-based scheme or error out if not
+// compatible with persistent state scheme.
+//
+// - If the provided scheme is path: use path-based scheme or error out if not
+// compatible with persistent state scheme.
+func ParseStateScheme(provided string, disk ethdb.Database) (string, error) {
+ // If state scheme is not specified, use the scheme consistent
+ // with persistent state, or fallback to hash mode if database
+ // is empty.
+ stored := ReadStateScheme(disk)
+ if provided == "" {
+ if stored == "" {
+ // use default scheme for empty database, flip it when
+ // path mode is chosen as default
+ log.Info("State schema set to default", "scheme", "hash")
+ return HashScheme, nil
+ }
+ log.Info("State scheme set to already existing", "scheme", stored)
+ return stored, nil // reuse scheme of persistent scheme
+ }
+ // If state scheme is specified, ensure it's compatible with
+ // persistent state.
+ if stored == "" || provided == stored {
+ log.Info("State scheme set by user", "scheme", provided)
+ return provided, nil
+ }
+ return "", fmt.Errorf("incompatible state scheme, stored: %s, provided: %s", stored, provided)
+}
diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go
index 22dbda4a2..cbfaf5b9e 100644
--- a/core/rawdb/chain_freezer.go
+++ b/core/rawdb/chain_freezer.go
@@ -200,7 +200,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
}
batch.Reset()
- // Step into the future and delete and dangling side chains
+ // Step into the future and delete any dangling side chains
if frozen > 0 {
tip := frozen
for len(dangling) > 0 {
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index e97eeb2aa..0c7cf9f11 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -34,7 +34,7 @@ import (
"github.com/olekukonko/tablewriter"
)
-// freezerdb is a database wrapper that enabled freezer data retrievals.
+// freezerdb is a database wrapper that enables freezer data retrievals.
type freezerdb struct {
ancientRoot string
ethdb.KeyValueStore
@@ -141,7 +141,7 @@ func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error)
// Unlike other ancient-related methods, this method does not return
// errNotSupported when invoked.
// The reason for this is that the caller might want to do several things:
- // 1. Check if something is in freezer,
+ // 1. Check if something is in the freezer,
// 2. If not, check leveldb.
//
// This will work, since the ancient-checks inside 'fn' will return errors,
@@ -209,7 +209,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
// of the freezer and database. Ensure that we don't shoot ourselves in the foot
// by serving up conflicting data, leading to both datastores getting corrupted.
//
- // - If both the freezer and key-value store is empty (no genesis), we just
+ // - If both the freezer and key-value store are empty (no genesis), we just
// initialized a new empty freezer, so everything's fine.
// - If the key-value store is empty, but the freezer is not, we need to make
// sure the user's genesis matches the freezer. That will be checked in the
@@ -218,7 +218,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
// - If neither the key-value store nor the freezer is empty, cross validate
// the genesis hashes to make sure they are compatible. If they are, also
// ensure that there's no gap between the freezer and subsequently leveldb.
- // - If the key-value store is not empty, but the freezer is we might just be
+ // - If the key-value store is not empty, but the freezer is, we might just be
// upgrading to the freezer release, or we might have had a small chain and
// not frozen anything yet. Ensure that no blocks are missing yet from the
// key-value store, since that would mean we already had an old freezer.
@@ -634,7 +634,7 @@ func printChainMetadata(db ethdb.KeyValueStore) {
fmt.Fprintf(os.Stderr, "\n\n")
}
-// ReadChainMetadata returns a set of key/value pairs that contains informatin
+// ReadChainMetadata returns a set of key/value pairs that contains information
// about the database chain status. This can be used for diagnostic purposes
// when investigating the state of the node.
func ReadChainMetadata(db ethdb.KeyValueStore) [][]string {
diff --git a/core/rawdb/databases_64bit.go b/core/rawdb/databases_64bit.go
index 1593e89bf..e9f9332ad 100644
--- a/core/rawdb/databases_64bit.go
+++ b/core/rawdb/databases_64bit.go
@@ -23,7 +23,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb/pebble"
)
-// Pebble is unsuported on 32bit architecture
+// Pebble is unsupported on 32bit architecture
const PebbleEnabled = true
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
index 1895f61da..19e4ed5b5 100644
--- a/core/rawdb/table.go
+++ b/core/rawdb/table.go
@@ -219,7 +219,7 @@ func (b *tableBatch) Put(key, value []byte) error {
return b.batch.Put(append([]byte(b.prefix), key...), value)
}
-// Delete inserts the a key removal into the batch for later committing.
+// Delete inserts a key removal into the batch for later committing.
func (b *tableBatch) Delete(key []byte) error {
return b.batch.Delete(append([]byte(b.prefix), key...))
}
diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go
index 1e683f76c..321bfbc6a 100644
--- a/core/state/snapshot/conversion.go
+++ b/core/state/snapshot/conversion.go
@@ -364,11 +364,11 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou
func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan trieKV, out chan common.Hash) {
var nodeWriter trie.NodeWriteFunc
if db != nil {
- nodeWriter = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ nodeWriter = func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(db, owner, path, hash, blob, scheme)
}
}
- t := trie.NewStackTrieWithOwner(nodeWriter, owner)
+ t := trie.NewStackTrie(nodeWriter)
for leaf := range in {
t.Update(leaf.key[:], leaf.value)
}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index b99a9890f..52b09fed9 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -978,7 +978,7 @@ func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (boo
nodes = trienode.NewNodeSet(addrHash)
slots = make(map[common.Hash][]byte)
)
- stack := trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ stack := trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
nodes.AddNode(path, trienode.NewDeleted())
size += common.StorageSize(len(path))
})
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
index 4e8fd1e10..772c698dd 100644
--- a/core/state/trie_prefetcher.go
+++ b/core/state/trie_prefetcher.go
@@ -37,7 +37,7 @@ var (
type triePrefetcher struct {
db Database // Database to fetch trie nodes through
root common.Hash // Root hash of the account trie for metrics
- fetches map[string]Trie // Partially or fully fetcher tries
+ fetches map[string]Trie // Partially or fully fetched tries. Only populated for inactive copies.
fetchers map[string]*subfetcher // Subfetchers for each trie
deliveryMissMeter metrics.Meter
@@ -197,7 +197,10 @@ func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte
// trieID returns an unique trie identifier consists the trie owner and root hash.
func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string {
- return string(append(owner.Bytes(), root.Bytes()...))
+ trieID := make([]byte, common.HashLength*2)
+ copy(trieID, owner.Bytes())
+ copy(trieID[common.HashLength:], root.Bytes())
+ return string(trieID)
}
// subfetcher is a trie fetcher goroutine responsible for pulling entries for a
diff --git a/core/state_processor.go b/core/state_processor.go
index 97130c700..f2e505896 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -24,7 +24,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
- "github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -169,7 +168,7 @@ func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, sta
if tx.Type() == types.BlobTxType {
receipt.BlobGasUsed = uint64(len(tx.BlobHashes()) * params.BlobTxBlobGasPerBlob)
- receipt.BlobGasPrice = eip4844.CalcBlobFee(*evm.Context.ExcessBlobGas)
+ receipt.BlobGasPrice = evm.Context.BlobBaseFee
}
// If the transaction created a contract, store the creation address in the receipt.
diff --git a/core/state_transition.go b/core/state_transition.go
index cb9287a82..1576b4e82 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -24,7 +24,6 @@ import (
"github.com/ethereum/go-ethereum/common"
cmath "github.com/ethereum/go-ethereum/common/math"
- "github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
@@ -248,7 +247,7 @@ func (st *StateTransition) buyGas() error {
balanceCheck.Add(balanceCheck, blobBalanceCheck)
// Pay for blobGasUsed * actual blob fee
blobFee := new(big.Int).SetUint64(blobGas)
- blobFee.Mul(blobFee, eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas))
+ blobFee.Mul(blobFee, st.evm.Context.BlobBaseFee)
mgval.Add(mgval, blobFee)
}
}
@@ -329,7 +328,7 @@ func (st *StateTransition) preCheck() error {
if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) {
if st.blobGasUsed() > 0 {
// Check that the user is paying at least the current blob fee
- blobFee := eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas)
+ blobFee := st.evm.Context.BlobBaseFee
if st.msg.BlobGasFeeCap.Cmp(blobFee) < 0 {
return fmt.Errorf("%w: address %v have %v want %v", ErrBlobFeeCapTooLow, st.msg.From.Hex(), st.msg.BlobGasFeeCap, blobFee)
}
diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go
index 36916c3f0..32c6c0e8f 100644
--- a/core/txpool/blobpool/blobpool.go
+++ b/core/txpool/blobpool/blobpool.go
@@ -97,6 +97,8 @@ type blobTxMeta struct {
execTipCap *uint256.Int // Needed to prioritize inclusion order across accounts and validate replacement price bump
execFeeCap *uint256.Int // Needed to validate replacement price bump
blobFeeCap *uint256.Int // Needed to validate replacement price bump
+ execGas uint64 // Needed to check inclusion validity before reading the blob
+ blobGas uint64 // Needed to check inclusion validity before reading the blob
basefeeJumps float64 // Absolute number of 1559 fee adjustments needed to reach the tx's fee cap
blobfeeJumps float64 // Absolute number of 4844 fee adjustments needed to reach the tx's blob fee cap
@@ -118,6 +120,8 @@ func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta {
execTipCap: uint256.MustFromBig(tx.GasTipCap()),
execFeeCap: uint256.MustFromBig(tx.GasFeeCap()),
blobFeeCap: uint256.MustFromBig(tx.BlobGasFeeCap()),
+ execGas: tx.Gas(),
+ blobGas: tx.BlobGas(),
}
meta.basefeeJumps = dynamicFeeJumps(meta.execFeeCap)
meta.blobfeeJumps = dynamicFeeJumps(meta.blobFeeCap)
@@ -307,8 +311,8 @@ type BlobPool struct {
spent map[common.Address]*uint256.Int // Expenditure tracking for individual accounts
evict *evictHeap // Heap of cheapest accounts for eviction when full
- eventFeed event.Feed // Event feed to send out new tx events on pool inclusion
- eventScope event.SubscriptionScope // Event scope to track and mass unsubscribe on termination
+ discoverFeed event.Feed // Event feed to send out new tx events on pool discovery (reorg excluded)
+ insertFeed event.Feed // Event feed to send out new tx events on pool inclusion (reorg included)
lock sync.RWMutex // Mutex protecting the pool during reorg handling
}
@@ -436,8 +440,6 @@ func (p *BlobPool) Close() error {
if err := p.store.Close(); err != nil {
errs = append(errs, err)
}
- p.eventScope.Close()
-
switch {
case errs == nil:
return nil
@@ -758,15 +760,21 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
// Run the reorg between the old and new head and figure out which accounts
// need to be rechecked and which transactions need to be readded
if reinject, inclusions := p.reorg(oldHead, newHead); reinject != nil {
+ var adds []*types.Transaction
for addr, txs := range reinject {
// Blindly push all the lost transactions back into the pool
for _, tx := range txs {
- p.reinject(addr, tx.Hash())
+ if err := p.reinject(addr, tx.Hash()); err == nil {
+ adds = append(adds, tx.WithoutBlobTxSidecar())
+ }
}
// Recheck the account's pooled transactions to drop included and
// invalidated one
p.recheck(addr, inclusions)
}
+ if len(adds) > 0 {
+ p.insertFeed.Send(core.NewTxsEvent{Txs: adds})
+ }
}
// Flush out any blobs from limbo that are older than the latest finality
if p.chain.Config().IsCancun(p.head.Number, p.head.Time) {
@@ -921,13 +929,13 @@ func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address][]*
// Note, the method will not initialize the eviction cache values as those will
// be done once for all transactions belonging to an account after all individual
// transactions are injected back into the pool.
-func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) {
+func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error {
// Retrieve the associated blob from the limbo. Without the blobs, we cannot
// add the transaction back into the pool as it is not mineable.
tx, err := p.limbo.pull(txhash)
if err != nil {
log.Error("Blobs unavailable, dropping reorged tx", "err", err)
- return
+ return err
}
// TODO: seems like an easy optimization here would be getting the serialized tx
// from limbo instead of re-serializing it here.
@@ -936,12 +944,12 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) {
blob, err := rlp.EncodeToBytes(tx)
if err != nil {
log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
- return
+ return err
}
id, err := p.store.Put(blob)
if err != nil {
log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err)
- return
+ return err
}
// Update the indixes and metrics
@@ -949,7 +957,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) {
if _, ok := p.index[addr]; !ok {
if err := p.reserve(addr, true); err != nil {
log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err)
- return
+ return err
}
p.index[addr] = []*blobTxMeta{meta}
p.spent[addr] = meta.costCap
@@ -960,6 +968,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) {
}
p.lookup[meta.hash] = meta.id
p.stored += uint64(meta.size)
+ return nil
}
// SetGasTip implements txpool.SubPool, allowing the blob pool's gas requirements
@@ -1154,9 +1163,19 @@ func (p *BlobPool) Get(hash common.Hash) *types.Transaction {
// Add inserts a set of blob transactions into the pool if they pass validation (both
// consensus validity and pool restictions).
func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
- errs := make([]error, len(txs))
+ var (
+ adds = make([]*types.Transaction, 0, len(txs))
+ errs = make([]error, len(txs))
+ )
for i, tx := range txs {
errs[i] = p.add(tx)
+ if errs[i] == nil {
+ adds = append(adds, tx.WithoutBlobTxSidecar())
+ }
+ }
+ if len(adds) > 0 {
+ p.discoverFeed.Send(core.NewTxsEvent{Txs: adds})
+ p.insertFeed.Send(core.NewTxsEvent{Txs: adds})
}
return errs
}
@@ -1384,6 +1403,8 @@ func (p *BlobPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTr
Time: time.Now(), // TODO(karalabe): Maybe save these and use that?
GasFeeCap: tx.execFeeCap.ToBig(),
GasTipCap: tx.execTipCap.ToBig(),
+ Gas: tx.execGas,
+ BlobGas: tx.blobGas,
})
}
if len(lazies) > 0 {
@@ -1468,10 +1489,14 @@ func (p *BlobPool) updateLimboMetrics() {
limboSlotusedGauge.Update(int64(slotused))
}
-// SubscribeTransactions registers a subscription of NewTxsEvent and
-// starts sending event to the given channel.
-func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription {
- return p.eventScope.Track(p.eventFeed.Subscribe(ch))
+// SubscribeTransactions registers a subscription for new transaction events,
+// supporting feeding only newly seen or also resurrected transactions.
+func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
+ if reorgs {
+ return p.insertFeed.Subscribe(ch)
+ } else {
+ return p.discoverFeed.Subscribe(ch)
+ }
}
// Nonce returns the next nonce of an account, with all transactions executable
diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go
index 2430028f9..e71204185 100644
--- a/core/txpool/legacypool/legacypool.go
+++ b/core/txpool/legacypool/legacypool.go
@@ -208,7 +208,6 @@ type LegacyPool struct {
chain BlockChain
gasTip atomic.Pointer[big.Int]
txFeed event.Feed
- scope event.SubscriptionScope
signer types.Signer
mu sync.RWMutex
@@ -404,9 +403,6 @@ func (pool *LegacyPool) loop() {
// Close terminates the transaction pool.
func (pool *LegacyPool) Close() error {
- // Unsubscribe all subscriptions registered from txpool
- pool.scope.Close()
-
// Terminate the pool reorger and return
close(pool.reorgShutdownCh)
pool.wg.Wait()
@@ -425,10 +421,14 @@ func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) {
<-wait
}
-// SubscribeTransactions registers a subscription of NewTxsEvent and
-// starts sending event to the given channel.
-func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription {
- return pool.scope.Track(pool.txFeed.Subscribe(ch))
+// SubscribeTransactions registers a subscription for new transaction events,
+// supporting feeding only newly seen or also resurrected transactions.
+func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
+ // The legacy pool has a very messed up internal shuffling, so it's kind of
+ // hard to separate newly discovered transaction from resurrected ones. This
+ // is because the new txs are added to the queue, resurrected ones too and
+ // reorgs run lazily, so separating the two would need a marker.
+ return pool.txFeed.Subscribe(ch)
}
// SetGasTip updates the minimum gas tip required by the transaction pool for a
@@ -552,6 +552,8 @@ func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.L
Time: txs[i].Time(),
GasFeeCap: txs[i].GasFeeCap(),
GasTipCap: txs[i].GasTipCap(),
+ Gas: txs[i].Gas(),
+ BlobGas: txs[i].BlobGas(),
}
}
pending[addr] = lazies
diff --git a/core/txpool/legacypool/list.go b/core/txpool/legacypool/list.go
index 384fa7b61..05ae0b58c 100644
--- a/core/txpool/legacypool/list.go
+++ b/core/txpool/legacypool/list.go
@@ -205,7 +205,7 @@ func (m *sortedMap) Remove(nonce uint64) bool {
// removed from the list.
//
// Note, all transactions with nonces lower than start will also be returned to
-// prevent getting into and invalid state. This is not something that should ever
+// prevent getting into an invalid state. This is not something that should ever
// happen but better to be self correcting than failing!
func (m *sortedMap) Ready(start uint64) types.Transactions {
// Short circuit if no transactions are available
@@ -421,7 +421,7 @@ func (l *list) Remove(tx *types.Transaction) (bool, types.Transactions) {
// removed from the list.
//
// Note, all transactions with nonces lower than start will also be returned to
-// prevent getting into and invalid state. This is not something that should ever
+// prevent getting into an invalid state. This is not something that should ever
// happen but better to be self correcting than failing!
func (l *list) Ready(start uint64) types.Transactions {
txs := l.txs.Ready(start)
diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go
index 85312c431..de05b38d4 100644
--- a/core/txpool/subpool.go
+++ b/core/txpool/subpool.go
@@ -30,13 +30,16 @@ import (
// enough for the miner and other APIs to handle large batches of transactions;
// and supports pulling up the entire transaction when really needed.
type LazyTransaction struct {
- Pool SubPool // Transaction subpool to pull the real transaction up
+ Pool LazyResolver // Transaction resolver to pull the real transaction up
Hash common.Hash // Transaction hash to pull up if needed
Tx *types.Transaction // Transaction if already resolved
Time time.Time // Time when the transaction was first seen
GasFeeCap *big.Int // Maximum fee per gas the transaction may consume
GasTipCap *big.Int // Maximum miner tip per gas the transaction can pay
+
+ Gas uint64 // Amount of gas required by the transaction
+ BlobGas uint64 // Amount of blob gas required by the transaction
}
// Resolve retrieves the full transaction belonging to a lazy handle if it is still
@@ -48,6 +51,14 @@ func (ltx *LazyTransaction) Resolve() *types.Transaction {
return ltx.Tx
}
+// LazyResolver is a minimal interface needed for a transaction pool to satisfy
+// resolving lazy transactions. It's mostly a helper to avoid the entire sub-
+// pool being injected into the lazy transaction.
+type LazyResolver interface {
+ // Get returns a transaction if it is contained in the pool, or nil otherwise.
+ Get(hash common.Hash) *types.Transaction
+}
+
// AddressReserver is passed by the main transaction pool to subpools, so they
// may request (and relinquish) exclusive access to certain addresses.
type AddressReserver func(addr common.Address, reserve bool) error
@@ -99,8 +110,10 @@ type SubPool interface {
// account and sorted by nonce.
Pending(enforceTips bool) map[common.Address][]*LazyTransaction
- // SubscribeTransactions subscribes to new transaction events.
- SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription
+ // SubscribeTransactions subscribes to new transaction events. The subscriber
+ // can decide whether to receive notifications only for newly seen transactions
+ // or also for reorged out ones.
+ SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription
// Nonce returns the next nonce of an account, with all transactions executable
// by the pool already applied on top.
diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go
index cacae7bc0..0d4e05da4 100644
--- a/core/txpool/txpool.go
+++ b/core/txpool/txpool.go
@@ -155,13 +155,15 @@ func (p *TxPool) Close() error {
if err := <-errc; err != nil {
errs = append(errs, err)
}
-
// Terminate each subpool
for _, subpool := range p.subpools {
if err := subpool.Close(); err != nil {
errs = append(errs, err)
}
}
+ // Unsubscribe anyone still listening for tx events
+ p.subs.Close()
+
if len(errs) > 0 {
return fmt.Errorf("subpool close errors: %v", errs)
}
@@ -316,12 +318,12 @@ func (p *TxPool) Pending(enforceTips bool) map[common.Address][]*LazyTransaction
return txs
}
-// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and starts sending
-// events to the given channel.
-func (p *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
+// SubscribeTransactions registers a subscription for new transaction events,
+// supporting feeding only newly seen or also resurrected transactions.
+func (p *TxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
subs := make([]event.Subscription, len(p.subpools))
for i, subpool := range p.subpools {
- subs[i] = subpool.SubscribeTransactions(ch)
+ subs[i] = subpool.SubscribeTransactions(ch, reorgs)
}
return p.subs.Track(event.JoinSubscriptions(subs...))
}
diff --git a/core/types/hashing.go b/core/types/hashing.go
index 9a6a80ac5..224d7a87e 100644
--- a/core/types/hashing.go
+++ b/core/types/hashing.go
@@ -95,7 +95,7 @@ type DerivableList interface {
func encodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte {
buf.Reset()
list.EncodeIndex(i, buf)
- // It's really unfortunate that we need to do perform this copy.
+ // It's really unfortunate that we need to perform this copy.
// StackTrie holds onto the values until Hash is called, so the values
// written to it must not alias.
return common.CopyBytes(buf.Bytes())
diff --git a/core/types/state_account.go b/core/types/state_account.go
index 314f4943e..ad07ca3f3 100644
--- a/core/types/state_account.go
+++ b/core/types/state_account.go
@@ -87,7 +87,7 @@ func SlimAccountRLP(account StateAccount) []byte {
return data
}
-// FullAccount decodes the data on the 'slim RLP' format and return
+// FullAccount decodes the data on the 'slim RLP' format and returns
// the consensus format account.
func FullAccount(data []byte) (*StateAccount, error) {
var slim SlimAccount
diff --git a/core/types/transaction.go b/core/types/transaction.go
index 78a1b9ba6..6f83c21d8 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -168,7 +168,7 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error {
}
// UnmarshalBinary decodes the canonical encoding of transactions.
-// It supports legacy RLP transactions and EIP2718 typed transactions.
+// It supports legacy RLP transactions and EIP-2718 typed transactions.
func (tx *Transaction) UnmarshalBinary(b []byte) error {
if len(b) > 0 && b[0] > 0x7f {
// It's a legacy transaction.
@@ -180,7 +180,7 @@ func (tx *Transaction) UnmarshalBinary(b []byte) error {
tx.setDecoded(&data, uint64(len(b)))
return nil
}
- // It's an EIP2718 typed transaction envelope.
+ // It's an EIP-2718 typed transaction envelope.
inner, err := tx.decodeTyped(b)
if err != nil {
return err
@@ -395,7 +395,7 @@ func (tx *Transaction) BlobGasFeeCap() *big.Int {
return nil
}
-// BlobHashes returns the hases of the blob commitments for blob transactions, nil otherwise.
+// BlobHashes returns the hashes of the blob commitments for blob transactions, nil otherwise.
func (tx *Transaction) BlobHashes() []common.Hash {
if blobtx, ok := tx.inner.(*BlobTx); ok {
return blobtx.BlobHashes
diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go
index cd57effcb..9e26642f7 100644
--- a/core/types/transaction_signing.go
+++ b/core/types/transaction_signing.go
@@ -57,7 +57,7 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint
}
// LatestSigner returns the 'most permissive' Signer available for the given chain
-// configuration. Specifically, this enables support of all types of transacrions
+// configuration. Specifically, this enables support of all types of transactions
// when their respective forks are scheduled to occur at any block number (or time)
// in the chain config.
//
diff --git a/core/vm/contract.go b/core/vm/contract.go
index bb0902969..e4b03bd74 100644
--- a/core/vm/contract.go
+++ b/core/vm/contract.go
@@ -31,13 +31,13 @@ type ContractRef interface {
// AccountRef implements ContractRef.
//
// Account references are used during EVM initialisation and
-// it's primary use is to fetch addresses. Removing this object
+// its primary use is to fetch addresses. Removing this object
// proves difficult because of the cached jump destinations which
// are fetched from the parent contract (i.e. the caller), which
// is a ContractRef.
type AccountRef common.Address
-// Address casts AccountRef to a Address
+// Address casts AccountRef to an Address
func (ar AccountRef) Address() common.Address { return (common.Address)(ar) }
// Contract represents an ethereum contract in the state database. It contains
diff --git a/core/vm/eips.go b/core/vm/eips.go
index 704c1ce12..35f0a3f7c 100644
--- a/core/vm/eips.go
+++ b/core/vm/eips.go
@@ -282,9 +282,15 @@ func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
return nil, nil
}
-// enable4844 applies EIP-4844 (DATAHASH opcode)
+// opBlobBaseFee implements BLOBBASEFEE opcode
+func opBlobBaseFee(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+ blobBaseFee, _ := uint256.FromBig(interpreter.evm.Context.BlobBaseFee)
+ scope.Stack.push(blobBaseFee)
+ return nil, nil
+}
+
+// enable4844 applies EIP-4844 (BLOBHASH opcode)
func enable4844(jt *JumpTable) {
- // New opcode
jt[BLOBHASH] = &operation{
execute: opBlobHash,
constantGas: GasFastestStep,
@@ -293,6 +299,16 @@ func enable4844(jt *JumpTable) {
}
}
+// enable7516 applies EIP-7516 (BLOBBASEFEE opcode)
+func enable7516(jt *JumpTable) {
+ jt[BLOBBASEFEE] = &operation{
+ execute: opBlobBaseFee,
+ constantGas: GasQuickStep,
+ minStack: minStack(0, 1),
+ maxStack: maxStack(0, 1),
+ }
+}
+
// enable6780 applies EIP-6780 (deactivate SELFDESTRUCT)
func enable6780(jt *JumpTable) {
jt[SELFDESTRUCT] = &operation{
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 40e2f3554..2c6cc7d48 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -67,14 +67,14 @@ type BlockContext struct {
GetHash GetHashFunc
// Block information
- Coinbase common.Address // Provides information for COINBASE
- GasLimit uint64 // Provides information for GASLIMIT
- BlockNumber *big.Int // Provides information for NUMBER
- Time uint64 // Provides information for TIME
- Difficulty *big.Int // Provides information for DIFFICULTY
- BaseFee *big.Int // Provides information for BASEFEE
- Random *common.Hash // Provides information for PREVRANDAO
- ExcessBlobGas *uint64 // ExcessBlobGas field in the header, needed to compute the data
+ Coinbase common.Address // Provides information for COINBASE
+ GasLimit uint64 // Provides information for GASLIMIT
+ BlockNumber *big.Int // Provides information for NUMBER
+ Time uint64 // Provides information for TIME
+ Difficulty *big.Int // Provides information for DIFFICULTY
+ BaseFee *big.Int // Provides information for BASEFEE
+ BlobBaseFee *big.Int // Provides information for BLOBBASEFEE
+ Random *common.Hash // Provides information for PREVRANDAO
}
// TxContext provides the EVM with information about a transaction.
diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go
index 5153c8b7a..4b141d8f9 100644
--- a/core/vm/gas_table.go
+++ b/core/vm/gas_table.go
@@ -104,7 +104,7 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi
// Legacy rules should be applied if we are in Petersburg (removal of EIP-1283)
// OR Constantinople is not active
if evm.chainRules.IsPetersburg || !evm.chainRules.IsConstantinople {
- // This checks for 3 scenario's and calculates gas accordingly:
+ // This checks for 3 scenarios and calculates gas accordingly:
//
// 1. From a zero-value address to a non-zero value (NEW VALUE)
// 2. From a non-zero value address to a zero-value address (DELETE)
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index 873337850..28da2e80e 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -45,7 +45,7 @@ type EVMInterpreter struct {
table *JumpTable
hasher crypto.KeccakState // Keccak256 hasher instance shared across opcodes
- hasherBuf common.Hash // Keccak256 hasher result array shared aross opcodes
+ hasherBuf common.Hash // Keccak256 hasher result array shared across opcodes
readOnly bool // Whether to throw on stateful modifications
returnData []byte // Last CALL's return data for subsequent reuse
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
index 702b18661..fb8725832 100644
--- a/core/vm/jump_table.go
+++ b/core/vm/jump_table.go
@@ -82,7 +82,8 @@ func validate(jt JumpTable) JumpTable {
func newCancunInstructionSet() JumpTable {
instructionSet := newShanghaiInstructionSet()
- enable4844(&instructionSet) // EIP-4844 (DATAHASH opcode)
+ enable4844(&instructionSet) // EIP-4844 (BLOBHASH opcode)
+ enable7516(&instructionSet) // EIP-7516 (BLOBBASEFEE opcode)
enable1153(&instructionSet) // EIP-1153 "Transient Storage"
enable5656(&instructionSet) // EIP-5656 (MCOPY opcode)
enable6780(&instructionSet) // EIP-6780 SELFDESTRUCT only in same transaction
diff --git a/core/vm/jump_table_export.go b/core/vm/jump_table_export.go
index 6ea47d63a..b74109da0 100644
--- a/core/vm/jump_table_export.go
+++ b/core/vm/jump_table_export.go
@@ -22,7 +22,7 @@ import (
"github.com/ethereum/go-ethereum/params"
)
-// LookupInstructionSet returns the instructionset for the fork configured by
+// LookupInstructionSet returns the instruction set for the fork configured by
// the rules.
func LookupInstructionSet(rules params.Rules) (JumpTable, error) {
switch {
@@ -56,7 +56,7 @@ func LookupInstructionSet(rules params.Rules) (JumpTable, error) {
return newFrontierInstructionSet(), nil
}
-// Stack returns the mininum and maximum stack requirements.
+// Stack returns the minimum and maximum stack requirements.
func (op *operation) Stack() (int, int) {
return op.minStack, op.maxStack
}
diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go
index 2929b8ce9..a11cf05a1 100644
--- a/core/vm/opcodes.go
+++ b/core/vm/opcodes.go
@@ -101,6 +101,7 @@ const (
SELFBALANCE OpCode = 0x47
BASEFEE OpCode = 0x48
BLOBHASH OpCode = 0x49
+ BLOBBASEFEE OpCode = 0x4a
)
// 0x50 range - 'storage' and execution.
@@ -287,6 +288,7 @@ var opCodeToString = map[OpCode]string{
SELFBALANCE: "SELFBALANCE",
BASEFEE: "BASEFEE",
BLOBHASH: "BLOBHASH",
+ BLOBBASEFEE: "BLOBBASEFEE",
// 0x50 range - 'storage' and execution.
POP: "POP",
@@ -444,6 +446,7 @@ var stringToOp = map[string]OpCode{
"CHAINID": CHAINID,
"BASEFEE": BASEFEE,
"BLOBHASH": BLOBHASH,
+ "BLOBBASEFEE": BLOBBASEFEE,
"DELEGATECALL": DELEGATECALL,
"STATICCALL": STATICCALL,
"CODESIZE": CODESIZE,
diff --git a/core/vm/runtime/env.go b/core/vm/runtime/env.go
index 7e330e073..64aa550a2 100644
--- a/core/vm/runtime/env.go
+++ b/core/vm/runtime/env.go
@@ -37,6 +37,7 @@ func NewEnv(cfg *Config) *vm.EVM {
Difficulty: cfg.Difficulty,
GasLimit: cfg.GasLimit,
BaseFee: cfg.BaseFee,
+ BlobBaseFee: cfg.BlobBaseFee,
Random: cfg.Random,
}
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index 480e5cec6..cfd7e4dbc 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -44,6 +44,7 @@ type Config struct {
Debug bool
EVMConfig vm.Config
BaseFee *big.Int
+ BlobBaseFee *big.Int
BlobHashes []common.Hash
Random *common.Hash
@@ -95,6 +96,9 @@ func setDefaults(cfg *Config) {
if cfg.BaseFee == nil {
cfg.BaseFee = big.NewInt(params.InitialBaseFee)
}
+ if cfg.BlobBaseFee == nil {
+ cfg.BlobBaseFee = new(big.Int)
+ }
}
// Execute executes the code using the input as call data during the execution.
diff --git a/eth/api_backend.go b/eth/api_backend.go
index a0c14f133..601e55515 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -334,7 +334,7 @@ func (b *EthAPIBackend) TxPool() *txpool.TxPool {
}
func (b *EthAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
- return b.eth.txPool.SubscribeNewTxsEvent(ch)
+ return b.eth.txPool.SubscribeTransactions(ch, true)
}
func (b *EthAPIBackend) SyncProgress() ethereum.SyncProgress {
diff --git a/eth/backend.go b/eth/backend.go
index af0351779..c6787870c 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -133,8 +133,12 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
if err != nil {
return nil, err
}
+ scheme, err := rawdb.ParseStateScheme(config.StateScheme, chainDb)
+ if err != nil {
+ return nil, err
+ }
// Try to recover offline state pruning only in hash-based.
- if config.StateScheme == rawdb.HashScheme {
+ if scheme == rawdb.HashScheme {
if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb); err != nil {
log.Error("Failed to recover state", "error", err)
}
@@ -194,7 +198,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
SnapshotLimit: config.SnapshotCache,
Preimages: config.Preimages,
StateHistory: config.StateHistory,
- StateScheme: config.StateScheme,
+ StateScheme: scheme,
}
)
// Override the chain config with provided settings.
diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go
index 08cce0558..d1e199141 100644
--- a/eth/catalyst/api.go
+++ b/eth/catalyst/api.go
@@ -513,7 +513,7 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe
block, err := engine.ExecutableDataToBlock(params, versionedHashes, beaconRoot)
if err != nil {
log.Warn("Invalid NewPayload params", "params", params, "error", err)
- return engine.PayloadStatusV1{Status: engine.INVALID}, nil
+ return api.invalid(err, nil), nil
}
// Stash away the last update to warn the user if the beacon client goes offline
api.lastNewPayloadLock.Lock()
@@ -560,7 +560,7 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe
log.Warn("Invalid timestamp", "parent", block.Time(), "block", block.Time())
return api.invalid(errors.New("invalid timestamp"), parent.Header()), nil
}
- // Another cornercase: if the node is in snap sync mode, but the CL client
+ // Another corner case: if the node is in snap sync mode, but the CL client
// tries to make it import a block. That should be denied as pushing something
// into the database directly will conflict with the assumptions of snap sync
// that it has an empty db that it can fill itself.
@@ -694,20 +694,21 @@ func (api *ConsensusAPI) checkInvalidAncestor(check common.Hash, head common.Has
}
}
-// invalid returns a response "INVALID" with the latest valid hash supplied by latest or to the current head
-// if no latestValid block was provided.
+// invalid returns a response "INVALID" with the latest valid hash supplied by latest.
func (api *ConsensusAPI) invalid(err error, latestValid *types.Header) engine.PayloadStatusV1 {
- currentHash := api.eth.BlockChain().CurrentBlock().Hash()
+ var currentHash *common.Hash
if latestValid != nil {
- // Set latest valid hash to 0x0 if parent is PoW block
- currentHash = common.Hash{}
- if latestValid.Difficulty.BitLen() == 0 {
+ if latestValid.Difficulty.BitLen() != 0 {
+ // Set latest valid hash to 0x0 if parent is PoW block
+ currentHash = &common.Hash{}
+ } else {
// Otherwise set latest valid hash to parent hash
- currentHash = latestValid.Hash()
+ h := latestValid.Hash()
+ currentHash = &h
}
}
errorMsg := err.Error()
- return engine.PayloadStatusV1{Status: engine.INVALID, LatestValidHash: ¤tHash, ValidationError: &errorMsg}
+ return engine.PayloadStatusV1{Status: engine.INVALID, LatestValidHash: currentHash, ValidationError: &errorMsg}
}
// heartbeat loops indefinitely, and checks if there have been beacon client updates
@@ -776,7 +777,7 @@ func (api *ConsensusAPI) ExchangeCapabilities([]string) []string {
// GetPayloadBodiesByHashV1 implements engine_getPayloadBodiesByHashV1 which allows for retrieval of a list
// of block bodies by the engine api.
func (api *ConsensusAPI) GetPayloadBodiesByHashV1(hashes []common.Hash) []*engine.ExecutionPayloadBodyV1 {
- var bodies = make([]*engine.ExecutionPayloadBodyV1, len(hashes))
+ bodies := make([]*engine.ExecutionPayloadBodyV1, len(hashes))
for i, hash := range hashes {
block := api.eth.BlockChain().GetBlockByHash(hash)
bodies[i] = getBody(block)
diff --git a/eth/catalyst/simulated_beacon.go b/eth/catalyst/simulated_beacon.go
index 1f7a3266c..a9a2bb4a9 100644
--- a/eth/catalyst/simulated_beacon.go
+++ b/eth/catalyst/simulated_beacon.go
@@ -199,7 +199,7 @@ func (c *SimulatedBeacon) sealBlock(withdrawals []*types.Withdrawal) error {
func (c *SimulatedBeacon) loopOnDemand() {
var (
newTxs = make(chan core.NewTxsEvent)
- sub = c.eth.TxPool().SubscribeNewTxsEvent(newTxs)
+ sub = c.eth.TxPool().SubscribeTransactions(newTxs, true)
)
defer sub.Unsubscribe()
diff --git a/eth/catalyst/tester.go b/eth/catalyst/tester.go
index 3e9159a17..0922ac0ba 100644
--- a/eth/catalyst/tester.go
+++ b/eth/catalyst/tester.go
@@ -20,7 +20,7 @@ import (
"sync"
"time"
- "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/log"
@@ -28,23 +28,27 @@ import (
)
// FullSyncTester is an auxiliary service that allows Geth to perform full sync
-// alone without consensus-layer attached. Users must specify a valid block as
-// the sync target. This tester can be applied to different networks, no matter
-// it's pre-merge or post-merge, but only for full-sync.
+// alone without consensus-layer attached. Users must specify a valid block hash
+// as the sync target.
+//
+// This tester can be applied to different networks, no matter it's pre-merge or
+// post-merge, but only for full-sync.
type FullSyncTester struct {
- api *ConsensusAPI
- block *types.Block
- closed chan struct{}
- wg sync.WaitGroup
+ stack *node.Node
+ backend *eth.Ethereum
+ target common.Hash
+ closed chan struct{}
+ wg sync.WaitGroup
}
// RegisterFullSyncTester registers the full-sync tester service into the node
// stack for launching and stopping the service controlled by node.
-func RegisterFullSyncTester(stack *node.Node, backend *eth.Ethereum, block *types.Block) (*FullSyncTester, error) {
+func RegisterFullSyncTester(stack *node.Node, backend *eth.Ethereum, target common.Hash) (*FullSyncTester, error) {
cl := &FullSyncTester{
- api: newConsensusAPIWithoutHeartbeat(backend),
- block: block,
- closed: make(chan struct{}),
+ stack: stack,
+ backend: backend,
+ target: target,
+ closed: make(chan struct{}),
}
stack.RegisterLifecycle(cl)
return cl, nil
@@ -56,29 +60,25 @@ func (tester *FullSyncTester) Start() error {
go func() {
defer tester.wg.Done()
+ // Trigger beacon sync with the provided block hash as trusted
+ // chain head.
+ err := tester.backend.Downloader().BeaconDevSync(downloader.FullSync, tester.target, tester.closed)
+ if err != nil {
+ log.Info("Failed to trigger beacon sync", "err", err)
+ }
+
ticker := time.NewTicker(time.Second * 5)
defer ticker.Stop()
for {
select {
case <-ticker.C:
- // Don't bother downloader in case it's already syncing.
- if tester.api.eth.Downloader().Synchronising() {
- continue
- }
- // Short circuit in case the target block is already stored
- // locally. TODO(somehow terminate the node stack if target
- // is reached).
- if tester.api.eth.BlockChain().HasBlock(tester.block.Hash(), tester.block.NumberU64()) {
- log.Info("Full-sync target reached", "number", tester.block.NumberU64(), "hash", tester.block.Hash())
+ // Stop in case the target block is already stored locally.
+ if block := tester.backend.BlockChain().GetBlockByHash(tester.target); block != nil {
+ log.Info("Full-sync target reached", "number", block.NumberU64(), "hash", block.Hash())
+ go tester.stack.Close() // async since we need to close ourselves
return
}
- // Trigger beacon sync with the provided block header as
- // trusted chain head.
- err := tester.api.eth.Downloader().BeaconSync(downloader.FullSync, tester.block.Header(), tester.block.Header())
- if err != nil {
- log.Info("Failed to beacon sync", "err", err)
- }
case <-tester.closed:
return
diff --git a/eth/downloader/beacondevsync.go b/eth/downloader/beacondevsync.go
new file mode 100644
index 000000000..9a38fedd4
--- /dev/null
+++ b/eth/downloader/beacondevsync.go
@@ -0,0 +1,81 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package downloader
+
+import (
+ "errors"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// BeaconDevSync is a development helper to test synchronization by providing
+// a block hash instead of header to run the beacon sync against.
+//
+// The method will reach out to the network to retrieve the header of the sync
+// target instead of receiving it from the consensus node.
+//
+// Note, this must not be used in live code. If the forkchcoice endpoint where
+// to use this instead of giving us the payload first, then essentially nobody
+// in the network would have the block yet that we'd attempt to retrieve.
+func (d *Downloader) BeaconDevSync(mode SyncMode, hash common.Hash, stop chan struct{}) error {
+ // Be very loud that this code should not be used in a live node
+ log.Warn("----------------------------------")
+ log.Warn("Beacon syncing with hash as target", "hash", hash)
+ log.Warn("This is unhealthy for a live node!")
+ log.Warn("----------------------------------")
+
+ log.Info("Waiting for peers to retrieve sync target")
+ for {
+ // If the node is going down, unblock
+ select {
+ case <-stop:
+ return errors.New("stop requested")
+ default:
+ }
+ // Pick a random peer to sync from and keep retrying if none are yet
+ // available due to fresh startup
+ d.peers.lock.RLock()
+ var peer *peerConnection
+ for _, peer = range d.peers.peers {
+ break
+ }
+ d.peers.lock.RUnlock()
+
+ if peer == nil {
+ time.Sleep(time.Second)
+ continue
+ }
+ // Found a peer, attempt to retrieve the header whilst blocking and
+ // retry if it fails for whatever reason
+ log.Info("Attempting to retrieve sync target", "peer", peer.id)
+ headers, metas, err := d.fetchHeadersByHash(peer, hash, 1, 0, false)
+ if err != nil || len(headers) != 1 {
+ log.Warn("Failed to fetch sync target", "headers", len(headers), "err", err)
+ time.Sleep(time.Second)
+ continue
+ }
+ // Head header retrieved, if the hash matches, start the actual sync
+ if metas[0] != hash {
+ log.Error("Received invalid sync target", "want", hash, "have", metas[0])
+ time.Sleep(time.Second)
+ continue
+ }
+ return d.BeaconSync(mode, headers[0], headers[0])
+ }
+}
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 7fed48bdb..2ca7e328c 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -286,11 +286,6 @@ func (d *Downloader) Progress() ethereum.SyncProgress {
}
}
-// Synchronising returns whether the downloader is currently retrieving blocks.
-func (d *Downloader) Synchronising() bool {
- return d.synchronising.Load()
-}
-
// RegisterPeer injects a new download peer into the set of block source to be
// used for fetching hashes and blocks from.
func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error {
@@ -309,11 +304,6 @@ func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error {
return nil
}
-// RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer.
-func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error {
- return d.RegisterPeer(id, version, &lightPeerWrapper{peer})
-}
-
// UnregisterPeer remove a peer from the known list, preventing any action from
// the specified peer. An effort is also made to return any pending fetches into
// the queue.
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index ffe445ea8..e4875b959 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -177,7 +177,7 @@ func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header {
// function can be used to retrieve batches of headers from the particular peer.
func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
// Service the header query via the live handler code
- rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{
+ rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{
Hash: origin,
},
@@ -205,7 +205,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i
}
res := ð.Response{
Req: req,
- Res: (*eth.BlockHeadersPacket)(&headers),
+ Res: (*eth.BlockHeadersRequest)(&headers),
Meta: hashes,
Time: 1,
Done: make(chan error, 1), // Ignore the returned status
@@ -221,7 +221,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i
// function can be used to retrieve batches of headers from the particular peer.
func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
// Service the header query via the live handler code
- rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{
+ rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{
Number: origin,
},
@@ -249,7 +249,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int,
}
res := ð.Response{
Req: req,
- Res: (*eth.BlockHeadersPacket)(&headers),
+ Res: (*eth.BlockHeadersRequest)(&headers),
Meta: hashes,
Time: 1,
Done: make(chan error, 1), // Ignore the returned status
@@ -286,7 +286,7 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et
}
res := ð.Response{
Req: req,
- Res: (*eth.BlockBodiesPacket)(&bodies),
+ Res: (*eth.BlockBodiesResponse)(&bodies),
Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes},
Time: 1,
Done: make(chan error, 1), // Ignore the returned status
@@ -317,7 +317,7 @@ func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *
}
res := ð.Response{
Req: req,
- Res: (*eth.ReceiptsPacket)(&receipts),
+ Res: (*eth.ReceiptsResponse)(&receipts),
Meta: hashes,
Time: 1,
Done: make(chan error, 1), // Ignore the returned status
@@ -437,9 +437,9 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
}
}
-func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) }
-func TestCanonicalSynchronisation66Snap(t *testing.T) { testCanonSync(t, eth.ETH66, SnapSync) }
-func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) }
+func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) }
+func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) }
+func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) }
func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) }
func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) }
func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) }
@@ -461,8 +461,8 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if a large batch of blocks are being downloaded, it is throttled
// until the cached blocks are retrieved.
-func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) }
-func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) }
+func TestThrottling68Full(t *testing.T) { testThrottling(t, eth.ETH68, FullSync) }
+func TestThrottling68Snap(t *testing.T) { testThrottling(t, eth.ETH68, SnapSync) }
func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) }
func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) }
@@ -543,9 +543,9 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
// Tests that simple synchronization against a forked chain works correctly. In
// this test common ancestor lookup should *not* be short circuited, and a full
// binary search should be executed.
-func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) }
-func TestForkedSync66Snap(t *testing.T) { testForkedSync(t, eth.ETH66, SnapSync) }
-func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) }
+func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) }
+func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) }
+func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) }
func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) }
func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) }
func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) }
@@ -573,9 +573,9 @@ func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that synchronising against a much shorter but much heavier fork works
// currently and is not dropped.
-func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) }
-func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) }
-func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) }
+func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) }
+func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) }
+func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) }
func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) }
func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) }
func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) }
@@ -605,9 +605,9 @@ func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that chain forks are contained within a certain interval of the current
// chain head, ensuring that malicious peers cannot waste resources by feeding
// long dead chains.
-func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) }
-func TestBoundedForkedSync66Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, SnapSync) }
-func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) }
+func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) }
+func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) }
+func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) }
func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) }
func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) }
func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) }
@@ -636,14 +636,14 @@ func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that chain forks are contained within a certain interval of the current
// chain head for short but heavy forks too. These are a bit special because they
// take different ancestor lookup paths.
-func TestBoundedHeavyForkedSync66Full(t *testing.T) {
- testBoundedHeavyForkedSync(t, eth.ETH66, FullSync)
+func TestBoundedHeavyForkedSync68Full(t *testing.T) {
+ testBoundedHeavyForkedSync(t, eth.ETH68, FullSync)
}
-func TestBoundedHeavyForkedSync66Snap(t *testing.T) {
- testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync)
+func TestBoundedHeavyForkedSync68Snap(t *testing.T) {
+ testBoundedHeavyForkedSync(t, eth.ETH68, SnapSync)
}
-func TestBoundedHeavyForkedSync66Light(t *testing.T) {
- testBoundedHeavyForkedSync(t, eth.ETH66, LightSync)
+func TestBoundedHeavyForkedSync68Light(t *testing.T) {
+ testBoundedHeavyForkedSync(t, eth.ETH68, LightSync)
}
func TestBoundedHeavyForkedSync67Full(t *testing.T) {
testBoundedHeavyForkedSync(t, eth.ETH67, FullSync)
@@ -678,9 +678,9 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
}
// Tests that a canceled download wipes all previously accumulated state.
-func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) }
-func TestCancel66Snap(t *testing.T) { testCancel(t, eth.ETH66, SnapSync) }
-func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) }
+func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) }
+func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) }
+func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) }
func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) }
func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) }
func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) }
@@ -708,9 +708,9 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) {
}
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
-func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) }
-func TestMultiSynchronisation66Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, SnapSync) }
-func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) }
+func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) }
+func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) }
+func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) }
func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) }
func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) }
func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) }
@@ -735,9 +735,9 @@ func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
// Tests that synchronisations behave well in multi-version protocol environments
// and not wreak havoc on other nodes in the network.
-func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) }
-func TestMultiProtoSynchronisation66Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH66, SnapSync) }
-func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) }
+func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) }
+func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) }
+func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) }
func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) }
func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) }
func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) }
@@ -750,7 +750,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
chain := testChainBase.shorten(blockCacheMaxItems - 15)
// Create peers of every type
- tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:])
+ tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:])
tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:])
// Synchronise with the requested peer and make sure all blocks were retrieved
@@ -760,7 +760,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
assertOwnChain(t, tester, len(chain.blocks))
// Check that no peers have been dropped off
- for _, version := range []int{66, 67} {
+ for _, version := range []int{68, 67} {
peer := fmt.Sprintf("peer %d", version)
if _, ok := tester.peers[peer]; !ok {
t.Errorf("%s dropped", peer)
@@ -770,9 +770,9 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if a block is empty (e.g. header only), no body request should be
// made, and instead the header should be assembled into a whole block in itself.
-func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) }
-func TestEmptyShortCircuit66Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, SnapSync) }
-func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) }
+func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) }
+func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) }
+func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) }
func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) }
func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) }
func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) }
@@ -821,9 +821,9 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
// Tests that headers are enqueued continuously, preventing malicious nodes from
// stalling the downloader by feeding gapped header chains.
-func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) }
-func TestMissingHeaderAttack66Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, SnapSync) }
-func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) }
+func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) }
+func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) }
+func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) }
func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) }
func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) }
func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) }
@@ -850,9 +850,9 @@ func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if requested headers are shifted (i.e. first is missing), the queue
// detects the invalid numbering.
-func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) }
-func TestShiftedHeaderAttack66Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) }
-func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) }
+func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) }
+func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) }
+func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) }
func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) }
func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) }
func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) }
@@ -880,14 +880,14 @@ func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
// Tests that a peer advertising a high TD doesn't get to stall the downloader
// afterwards by not sending any useful hashes.
-func TestHighTDStarvationAttack66Full(t *testing.T) {
- testHighTDStarvationAttack(t, eth.ETH66, FullSync)
+func TestHighTDStarvationAttack68Full(t *testing.T) {
+ testHighTDStarvationAttack(t, eth.ETH68, FullSync)
}
-func TestHighTDStarvationAttack66Snap(t *testing.T) {
- testHighTDStarvationAttack(t, eth.ETH66, SnapSync)
+func TestHighTDStarvationAttack68Snap(t *testing.T) {
+ testHighTDStarvationAttack(t, eth.ETH68, SnapSync)
}
-func TestHighTDStarvationAttack66Light(t *testing.T) {
- testHighTDStarvationAttack(t, eth.ETH66, LightSync)
+func TestHighTDStarvationAttack68Light(t *testing.T) {
+ testHighTDStarvationAttack(t, eth.ETH68, LightSync)
}
func TestHighTDStarvationAttack67Full(t *testing.T) {
testHighTDStarvationAttack(t, eth.ETH67, FullSync)
@@ -911,7 +911,7 @@ func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
}
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
-func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) }
+func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) }
func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) }
func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
@@ -960,9 +960,9 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
// Tests that synchronisation progress (origin block number, current block number
// and highest block number) is tracked and updated correctly.
-func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) }
-func TestSyncProgress66Snap(t *testing.T) { testSyncProgress(t, eth.ETH66, SnapSync) }
-func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) }
+func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) }
+func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) }
+func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) }
func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) }
func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) }
func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) }
@@ -1040,9 +1040,9 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync
// Tests that synchronisation progress (origin block number and highest block
// number) is tracked and updated correctly in case of a fork (or manual head
// revertal).
-func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) }
-func TestForkedSyncProgress66Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, SnapSync) }
-func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) }
+func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) }
+func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) }
+func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) }
func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) }
func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) }
func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) }
@@ -1114,9 +1114,9 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if synchronisation is aborted due to some failure, then the progress
// origin is not updated in the next sync cycle, as it should be considered the
// continuation of the previous sync and not a new instance.
-func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) }
-func TestFailedSyncProgress66Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, SnapSync) }
-func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) }
+func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) }
+func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) }
+func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) }
func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) }
func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) }
func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) }
@@ -1183,9 +1183,9 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if an attacker fakes a chain height, after the attack is detected,
// the progress height is successfully reduced at the next sync invocation.
-func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) }
-func TestFakedSyncProgress66Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, SnapSync) }
-func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) }
+func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) }
+func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) }
+func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) }
func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) }
func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) }
func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) }
@@ -1330,8 +1330,10 @@ func TestRemoteHeaderRequestSpan(t *testing.T) {
// Tests that peers below a pre-configured checkpoint block are prevented from
// being fast-synced from, avoiding potential cheap eclipse attacks.
-func TestBeaconSync66Full(t *testing.T) { testBeaconSync(t, eth.ETH66, FullSync) }
-func TestBeaconSync66Snap(t *testing.T) { testBeaconSync(t, eth.ETH66, SnapSync) }
+func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) }
+func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) }
+func TestBeaconSync67Full(t *testing.T) { testBeaconSync(t, eth.ETH67, FullSync) }
+func TestBeaconSync67Snap(t *testing.T) { testBeaconSync(t, eth.ETH67, SnapSync) }
func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) {
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
diff --git a/eth/downloader/fetchers.go b/eth/downloader/fetchers.go
index 021e8c4f9..cc4279b0d 100644
--- a/eth/downloader/fetchers.go
+++ b/eth/downloader/fetchers.go
@@ -58,14 +58,14 @@ func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amo
case res := <-resCh:
// Headers successfully retrieved, update the metrics
headerReqTimer.Update(time.Since(start))
- headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket))))
+ headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest))))
// Don't reject the packet even if it turns out to be bad, downloader will
// disconnect the peer on its own terms. Simply delivery the headers to
// be processed by the caller
res.Done <- nil
- return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil
+ return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil
}
}
@@ -103,13 +103,13 @@ func (d *Downloader) fetchHeadersByNumber(p *peerConnection, number uint64, amou
case res := <-resCh:
// Headers successfully retrieved, update the metrics
headerReqTimer.Update(time.Since(start))
- headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket))))
+ headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest))))
// Don't reject the packet even if it turns out to be bad, downloader will
// disconnect the peer on its own terms. Simply delivery the headers to
// be processed by the caller
res.Done <- nil
- return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil
+ return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil
}
}
diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go
index 9440972c6..5105fda66 100644
--- a/eth/downloader/fetchers_concurrent_bodies.go
+++ b/eth/downloader/fetchers_concurrent_bodies.go
@@ -89,7 +89,7 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan
// deliver is responsible for taking a generic response packet from the concurrent
// fetcher, unpacking the body data and delivering it to the downloader's queue.
func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
- txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesPacket).Unpack()
+ txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesResponse).Unpack()
hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes}
accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2])
diff --git a/eth/downloader/fetchers_concurrent_headers.go b/eth/downloader/fetchers_concurrent_headers.go
index 84c7f2098..8201f4ca7 100644
--- a/eth/downloader/fetchers_concurrent_headers.go
+++ b/eth/downloader/fetchers_concurrent_headers.go
@@ -81,7 +81,7 @@ func (q *headerQueue) request(peer *peerConnection, req *fetchRequest, resCh cha
// deliver is responsible for taking a generic response packet from the concurrent
// fetcher, unpacking the header data and delivering it to the downloader's queue.
func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
- headers := *packet.Res.(*eth.BlockHeadersPacket)
+ headers := *packet.Res.(*eth.BlockHeadersRequest)
hashes := packet.Meta.([]common.Hash)
accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh)
diff --git a/eth/downloader/fetchers_concurrent_receipts.go b/eth/downloader/fetchers_concurrent_receipts.go
index 1c853c218..3169f030b 100644
--- a/eth/downloader/fetchers_concurrent_receipts.go
+++ b/eth/downloader/fetchers_concurrent_receipts.go
@@ -88,7 +88,7 @@ func (q *receiptQueue) request(peer *peerConnection, req *fetchRequest, resCh ch
// deliver is responsible for taking a generic response packet from the concurrent
// fetcher, unpacking the receipt data and delivering it to the downloader's queue.
func (q *receiptQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
- receipts := *packet.Res.(*eth.ReceiptsPacket)
+ receipts := *packet.Res.(*eth.ReceiptsResponse)
hashes := packet.Meta.([]common.Hash) // {receipt hashes}
accepted, err := q.queue.DeliverReceipts(peer.id, receipts, hashes)
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index 6b8269495..4c43af527 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -55,39 +55,16 @@ type peerConnection struct {
lock sync.RWMutex
}
-// LightPeer encapsulates the methods required to synchronise with a remote light peer.
-type LightPeer interface {
+// Peer encapsulates the methods required to synchronise with a remote full peer.
+type Peer interface {
Head() (common.Hash, *big.Int)
RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error)
RequestHeadersByNumber(uint64, int, int, bool, chan *eth.Response) (*eth.Request, error)
-}
-// Peer encapsulates the methods required to synchronise with a remote full peer.
-type Peer interface {
- LightPeer
RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error)
RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error)
}
-// lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods.
-type lightPeerWrapper struct {
- peer LightPeer
-}
-
-func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() }
-func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
- return w.peer.RequestHeadersByHash(h, amount, skip, reverse, sink)
-}
-func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
- return w.peer.RequestHeadersByNumber(i, amount, skip, reverse, sink)
-}
-func (w *lightPeerWrapper) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) {
- panic("RequestBodies not supported in light client mode sync")
-}
-func (w *lightPeerWrapper) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) {
- panic("RequestReceipts not supported in light client mode sync")
-}
-
// newPeerConnection creates a new downloader peer.
func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection {
return &peerConnection{
diff --git a/eth/downloader/skeleton.go b/eth/downloader/skeleton.go
index a07e1695f..4f1f46204 100644
--- a/eth/downloader/skeleton.go
+++ b/eth/downloader/skeleton.go
@@ -794,7 +794,7 @@ func (s *skeleton) executeTask(peer *peerConnection, req *headerRequest) {
case res := <-resCh:
// Headers successfully retrieved, update the metrics
- headers := *res.Res.(*eth.BlockHeadersPacket)
+ headers := *res.Res.(*eth.BlockHeadersRequest)
headerReqTimer.Update(time.Since(start))
s.peers.rates.Update(peer.id, eth.BlockHeadersMsg, res.Time, len(headers))
diff --git a/eth/downloader/skeleton_test.go b/eth/downloader/skeleton_test.go
index 6a76d78ac..c31007765 100644
--- a/eth/downloader/skeleton_test.go
+++ b/eth/downloader/skeleton_test.go
@@ -173,7 +173,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski
}
res := ð.Response{
Req: req,
- Res: (*eth.BlockHeadersPacket)(&headers),
+ Res: (*eth.BlockHeadersRequest)(&headers),
Meta: hashes,
Time: 1,
Done: make(chan error),
@@ -811,7 +811,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
// Create a peer set to feed headers through
peerset := newPeerSet()
for _, peer := range tt.peers {
- peerset.Register(newPeerConnection(peer.id, eth.ETH66, peer, log.New("id", peer.id)))
+ peerset.Register(newPeerConnection(peer.id, eth.ETH67, peer, log.New("id", peer.id)))
}
// Create a peer dropper to track malicious peers
dropped := make(map[string]int)
@@ -913,7 +913,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
skeleton.Sync(tt.newHead, nil, true)
}
if tt.newPeer != nil {
- if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil {
+ if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH67, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil {
t.Errorf("test %d: failed to register new peer: %v", i, err)
}
}
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index 5eddd7a19..1f0eb69cd 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -27,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool/blobpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/eth/downloader"
@@ -65,7 +64,6 @@ var Defaults = Config{
TxLookupLimit: 2350000,
TransactionHistory: 2350000,
StateHistory: params.FullImmutabilityThreshold,
- StateScheme: rawdb.HashScheme,
LightPeers: 100,
DatabaseCache: 512,
TrieCleanCache: 154,
@@ -84,7 +82,7 @@ var Defaults = Config{
//go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go
-// Config contains configuration options for of the ETH and LES protocols.
+// Config contains configuration options for ETH and LES protocols.
type Config struct {
// The genesis block, which is inserted if the database is empty.
// If nil, the Ethereum main net block is used.
@@ -106,7 +104,11 @@ type Config struct {
TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
TransactionHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
StateHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose state histories are reserved.
- StateScheme string `toml:",omitempty"` // State scheme used to store ethereum state and merkle trie nodes on top
+
+ // State scheme represents the scheme used to store ethereum states and trie
+ // nodes on top. It can be 'hash', 'path', or none which means use the scheme
+ // consistent with persistent state.
+ StateScheme string `toml:",omitempty"`
// RequiredBlocks is a set of block number -> hash mappings which must be in the
// canonical chain of all remote peers. Setting the option makes geth verify the
@@ -184,7 +186,7 @@ func CreateConsensusEngine(config *params.ChainConfig, db ethdb.Database) (conse
return beacon.New(clique.New(config.Clique, db)), nil
}
// If defaulting to proof-of-work, enforce an already merged network since
- // we cannot run PoW algorithms and more, so we cannot even follow a chain
+ // we cannot run PoW algorithms anymore, so we cannot even follow a chain
// not coordinated by a beacon node.
if !config.TerminalTotalDifficultyPassed {
return nil, errors.New("ethash is only supported as a historical component of already merged networks")
diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go
index 35608031d..8751c4e3e 100644
--- a/eth/fetcher/block_fetcher.go
+++ b/eth/fetcher/block_fetcher.go
@@ -483,7 +483,7 @@ func (f *BlockFetcher) loop() {
select {
case res := <-resCh:
res.Done <- nil
- f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersPacket), time.Now().Add(res.Time))
+ f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersRequest), time.Now().Add(res.Time))
case <-timeout.C:
// The peer didn't respond in time. The request
@@ -541,7 +541,7 @@ func (f *BlockFetcher) loop() {
case res := <-resCh:
res.Done <- nil
// Ignoring withdrawals here, since the block fetcher is not used post-merge.
- txs, uncles, _ := res.Res.(*eth.BlockBodiesPacket).Unpack()
+ txs, uncles, _ := res.Res.(*eth.BlockBodiesResponse).Unpack()
f.FilterBodies(peer, txs, uncles, time.Now())
case <-timeout.C:
diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go
index 7c490df3f..6927300b1 100644
--- a/eth/fetcher/block_fetcher_test.go
+++ b/eth/fetcher/block_fetcher_test.go
@@ -213,7 +213,7 @@ func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*t
}
res := ð.Response{
Req: req,
- Res: (*eth.BlockHeadersPacket)(&headers),
+ Res: (*eth.BlockHeadersRequest)(&headers),
Time: drift,
Done: make(chan error, 1), // Ignore the returned status
}
@@ -255,7 +255,7 @@ func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*typ
}
res := ð.Response{
Req: req,
- Res: (*eth.BlockBodiesPacket)(&bodies),
+ Res: (*eth.BlockBodiesResponse)(&bodies),
Time: drift,
Done: make(chan error, 1), // Ignore the returned status
}
diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go
index a11b5e216..574762696 100644
--- a/eth/fetcher/tx_fetcher.go
+++ b/eth/fetcher/tx_fetcher.go
@@ -20,6 +20,7 @@ import (
"bytes"
"errors"
"fmt"
+ "math"
mrand "math/rand"
"sort"
"time"
@@ -38,16 +39,22 @@ const (
// can announce in a short time.
maxTxAnnounces = 4096
- // maxTxRetrievals is the maximum transaction number can be fetched in one
- // request. The rationale to pick 256 is:
- // - In eth protocol, the softResponseLimit is 2MB. Nowadays according to
- // Etherscan the average transaction size is around 200B, so in theory
- // we can include lots of transaction in a single protocol packet.
- // - However the maximum size of a single transaction is raised to 128KB,
- // so pick a middle value here to ensure we can maximize the efficiency
- // of the retrieval and response size overflow won't happen in most cases.
+ // maxTxRetrievals is the maximum number of transactions that can be fetched
+ // in one request. The rationale for picking 256 is to have a reasonabe lower
+ // bound for the transferred data (don't waste RTTs, transfer more meaningful
+ // batch sizes), but also have an upper bound on the sequentiality to allow
+ // using our entire peerset for deliveries.
+ //
+ // This number also acts as a failsafe against malicious announces which might
+ // cause us to request more data than we'd expect.
maxTxRetrievals = 256
+ // maxTxRetrievalSize is the max number of bytes that delivered transactions
+ // should weigh according to the announcements. The 128KB was chosen to limit
+ // retrieving a maximum of one blob transaction at a time to minimize hogging
+ // a connection between two peers.
+ maxTxRetrievalSize = 128 * 1024
+
// maxTxUnderpricedSetSize is the size of the underpriced transaction set that
// is used to track recent transactions that have been dropped so we don't
// re-request them.
@@ -105,6 +112,14 @@ var (
type txAnnounce struct {
origin string // Identifier of the peer originating the notification
hashes []common.Hash // Batch of transaction hashes being announced
+ metas []*txMetadata // Batch of metadatas associated with the hashes (nil before eth/68)
+}
+
+// txMetadata is a set of extra data transmitted along the announcement for better
+// fetch scheduling.
+type txMetadata struct {
+ kind byte // Transaction consensus type
+ size uint32 // Transaction size in bytes
}
// txRequest represents an in-flight transaction retrieval request destined to
@@ -120,6 +135,7 @@ type txRequest struct {
type txDelivery struct {
origin string // Identifier of the peer originating the notification
hashes []common.Hash // Batch of transaction hashes having been delivered
+ metas []txMetadata // Batch of metadatas associated with the delivered hashes
direct bool // Whether this is a direct reply or a broadcast
}
@@ -155,14 +171,14 @@ type TxFetcher struct {
// Stage 1: Waiting lists for newly discovered transactions that might be
// broadcast without needing explicit request/reply round trips.
- waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast
- waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist
- waitslots map[string]map[common.Hash]struct{} // Waiting announcements grouped by peer (DoS protection)
+ waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast
+ waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist
+ waitslots map[string]map[common.Hash]*txMetadata // Waiting announcements grouped by peer (DoS protection)
// Stage 2: Queue of transactions that waiting to be allocated to some peer
// to be retrieved directly.
- announces map[string]map[common.Hash]struct{} // Set of announced transactions, grouped by origin peer
- announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash
+ announces map[string]map[common.Hash]*txMetadata // Set of announced transactions, grouped by origin peer
+ announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash
// Stage 3: Set of transactions currently being retrieved, some which may be
// fulfilled and some rescheduled. Note, this step shares 'announces' from the
@@ -175,6 +191,7 @@ type TxFetcher struct {
hasTx func(common.Hash) bool // Retrieves a tx from the local txpool
addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool
fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer
+ dropPeer func(string) // Drops a peer in case of announcement violation
step chan struct{} // Notification channel when the fetcher loop iterates
clock mclock.Clock // Time wrapper to simulate in tests
@@ -183,14 +200,14 @@ type TxFetcher struct {
// NewTxFetcher creates a transaction fetcher to retrieve transaction
// based on hash announcements.
-func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher {
- return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil)
+func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string)) *TxFetcher {
+ return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, dropPeer, mclock.System{}, nil)
}
// NewTxFetcherForTests is a testing method to mock out the realtime clock with
// a simulated version and the internal randomness with a deterministic one.
func NewTxFetcherForTests(
- hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error,
+ hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string),
clock mclock.Clock, rand *mrand.Rand) *TxFetcher {
return &TxFetcher{
notify: make(chan *txAnnounce),
@@ -199,8 +216,8 @@ func NewTxFetcherForTests(
quit: make(chan struct{}),
waitlist: make(map[common.Hash]map[string]struct{}),
waittime: make(map[common.Hash]mclock.AbsTime),
- waitslots: make(map[string]map[common.Hash]struct{}),
- announces: make(map[string]map[common.Hash]struct{}),
+ waitslots: make(map[string]map[common.Hash]*txMetadata),
+ announces: make(map[string]map[common.Hash]*txMetadata),
announced: make(map[common.Hash]map[string]struct{}),
fetching: make(map[common.Hash]string),
requests: make(map[string]*txRequest),
@@ -209,6 +226,7 @@ func NewTxFetcherForTests(
hasTx: hasTx,
addTxs: addTxs,
fetchTxs: fetchTxs,
+ dropPeer: dropPeer,
clock: clock,
rand: rand,
}
@@ -216,7 +234,7 @@ func NewTxFetcherForTests(
// Notify announces the fetcher of the potential availability of a new batch of
// transactions in the network.
-func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
+func (f *TxFetcher) Notify(peer string, types []byte, sizes []uint32, hashes []common.Hash) error {
// Keep track of all the announced transactions
txAnnounceInMeter.Mark(int64(len(hashes)))
@@ -226,28 +244,35 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
// still valuable to check here because it runs concurrent to the internal
// loop, so anything caught here is time saved internally.
var (
- unknowns = make([]common.Hash, 0, len(hashes))
+ unknownHashes = make([]common.Hash, 0, len(hashes))
+ unknownMetas = make([]*txMetadata, 0, len(hashes))
+
duplicate int64
underpriced int64
)
- for _, hash := range hashes {
+ for i, hash := range hashes {
switch {
case f.hasTx(hash):
duplicate++
case f.isKnownUnderpriced(hash):
underpriced++
default:
- unknowns = append(unknowns, hash)
+ unknownHashes = append(unknownHashes, hash)
+ if types == nil {
+ unknownMetas = append(unknownMetas, nil)
+ } else {
+ unknownMetas = append(unknownMetas, &txMetadata{kind: types[i], size: sizes[i]})
+ }
}
}
txAnnounceKnownMeter.Mark(duplicate)
txAnnounceUnderpricedMeter.Mark(underpriced)
// If anything's left to announce, push it into the internal loop
- if len(unknowns) == 0 {
+ if len(unknownHashes) == 0 {
return nil
}
- announce := &txAnnounce{origin: peer, hashes: unknowns}
+ announce := &txAnnounce{origin: peer, hashes: unknownHashes, metas: unknownMetas}
select {
case f.notify <- announce:
return nil
@@ -290,6 +315,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
// re-requesting them and dropping the peer in case of malicious transfers.
var (
added = make([]common.Hash, 0, len(txs))
+ metas = make([]txMetadata, 0, len(txs))
)
// proceed in batches
for i := 0; i < len(txs); i += 128 {
@@ -325,6 +351,10 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
otherreject++
}
added = append(added, batch[j].Hash())
+ metas = append(metas, txMetadata{
+ kind: batch[j].Type(),
+ size: uint32(batch[j].Size()),
+ })
}
knownMeter.Mark(duplicate)
underpricedMeter.Mark(underpriced)
@@ -337,7 +367,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
}
}
select {
- case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}:
+ case f.cleanup <- &txDelivery{origin: peer, hashes: added, metas: metas, direct: direct}:
return nil
case <-f.quit:
return errTerminated
@@ -394,13 +424,15 @@ func (f *TxFetcher) loop() {
want := used + len(ann.hashes)
if want > maxTxAnnounces {
txAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces))
+
ann.hashes = ann.hashes[:want-maxTxAnnounces]
+ ann.metas = ann.metas[:want-maxTxAnnounces]
}
// All is well, schedule the remainder of the transactions
idleWait := len(f.waittime) == 0
_, oldPeer := f.announces[ann.origin]
- for _, hash := range ann.hashes {
+ for i, hash := range ann.hashes {
// If the transaction is already downloading, add it to the list
// of possible alternates (in case the current retrieval fails) and
// also account it for the peer.
@@ -409,9 +441,9 @@ func (f *TxFetcher) loop() {
// Stage 2 and 3 share the set of origins per tx
if announces := f.announces[ann.origin]; announces != nil {
- announces[hash] = struct{}{}
+ announces[hash] = ann.metas[i]
} else {
- f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}}
+ f.announces[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]}
}
continue
}
@@ -422,9 +454,9 @@ func (f *TxFetcher) loop() {
// Stage 2 and 3 share the set of origins per tx
if announces := f.announces[ann.origin]; announces != nil {
- announces[hash] = struct{}{}
+ announces[hash] = ann.metas[i]
} else {
- f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}}
+ f.announces[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]}
}
continue
}
@@ -432,12 +464,18 @@ func (f *TxFetcher) loop() {
// yet downloading, add the peer as an alternate origin in the
// waiting list.
if f.waitlist[hash] != nil {
+ // Ignore double announcements from the same peer. This is
+ // especially important if metadata is also passed along to
+ // prevent malicious peers flip-flopping good/bad values.
+ if _, ok := f.waitlist[hash][ann.origin]; ok {
+ continue
+ }
f.waitlist[hash][ann.origin] = struct{}{}
if waitslots := f.waitslots[ann.origin]; waitslots != nil {
- waitslots[hash] = struct{}{}
+ waitslots[hash] = ann.metas[i]
} else {
- f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}}
+ f.waitslots[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]}
}
continue
}
@@ -446,9 +484,9 @@ func (f *TxFetcher) loop() {
f.waittime[hash] = f.clock.Now()
if waitslots := f.waitslots[ann.origin]; waitslots != nil {
- waitslots[hash] = struct{}{}
+ waitslots[hash] = ann.metas[i]
} else {
- f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}}
+ f.waitslots[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]}
}
}
// If a new item was added to the waitlist, schedule it into the fetcher
@@ -474,9 +512,9 @@ func (f *TxFetcher) loop() {
f.announced[hash] = f.waitlist[hash]
for peer := range f.waitlist[hash] {
if announces := f.announces[peer]; announces != nil {
- announces[hash] = struct{}{}
+ announces[hash] = f.waitslots[peer][hash]
} else {
- f.announces[peer] = map[common.Hash]struct{}{hash: {}}
+ f.announces[peer] = map[common.Hash]*txMetadata{hash: f.waitslots[peer][hash]}
}
delete(f.waitslots[peer], hash)
if len(f.waitslots[peer]) == 0 {
@@ -545,10 +583,27 @@ func (f *TxFetcher) loop() {
case delivery := <-f.cleanup:
// Independent if the delivery was direct or broadcast, remove all
- // traces of the hash from internal trackers
- for _, hash := range delivery.hashes {
+ // traces of the hash from internal trackers. That said, compare any
+ // advertised metadata with the real ones and drop bad peers.
+ for i, hash := range delivery.hashes {
if _, ok := f.waitlist[hash]; ok {
for peer, txset := range f.waitslots {
+ if meta := txset[hash]; meta != nil {
+ if delivery.metas[i].kind != meta.kind {
+ log.Warn("Announced transaction type mismatch", "peer", peer, "tx", hash, "type", delivery.metas[i].kind, "ann", meta.kind)
+ f.dropPeer(peer)
+ } else if delivery.metas[i].size != meta.size {
+ log.Warn("Announced transaction size mismatch", "peer", peer, "tx", hash, "size", delivery.metas[i].size, "ann", meta.size)
+ if math.Abs(float64(delivery.metas[i].size)-float64(meta.size)) > 8 {
+ // Normally we should drop a peer considering this is a protocol violation.
+ // However, due to the RLP vs consensus format messyness, allow a few bytes
+ // wiggle-room where we only warn, but don't drop.
+ //
+ // TODO(karalabe): Get rid of this relaxation when clients are proven stable.
+ f.dropPeer(peer)
+ }
+ }
+ }
delete(txset, hash)
if len(txset) == 0 {
delete(f.waitslots, peer)
@@ -558,6 +613,22 @@ func (f *TxFetcher) loop() {
delete(f.waittime, hash)
} else {
for peer, txset := range f.announces {
+ if meta := txset[hash]; meta != nil {
+ if delivery.metas[i].kind != meta.kind {
+ log.Warn("Announced transaction type mismatch", "peer", peer, "tx", hash, "type", delivery.metas[i].kind, "ann", meta.kind)
+ f.dropPeer(peer)
+ } else if delivery.metas[i].size != meta.size {
+ log.Warn("Announced transaction size mismatch", "peer", peer, "tx", hash, "size", delivery.metas[i].size, "ann", meta.size)
+ if math.Abs(float64(delivery.metas[i].size)-float64(meta.size)) > 8 {
+ // Normally we should drop a peer considering this is a protocol violation.
+ // However, due to the RLP vs consensus format messyness, allow a few bytes
+ // wiggle-room where we only warn, but don't drop.
+ //
+ // TODO(karalabe): Get rid of this relaxation when clients are proven stable.
+ f.dropPeer(peer)
+ }
+ }
+ }
delete(txset, hash)
if len(txset) == 0 {
delete(f.announces, peer)
@@ -794,25 +865,36 @@ func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{},
if len(f.announces[peer]) == 0 {
return // continue in the for-each
}
- hashes := make([]common.Hash, 0, maxTxRetrievals)
- f.forEachHash(f.announces[peer], func(hash common.Hash) bool {
- if _, ok := f.fetching[hash]; !ok {
- // Mark the hash as fetching and stash away possible alternates
- f.fetching[hash] = peer
+ var (
+ hashes = make([]common.Hash, 0, maxTxRetrievals)
+ bytes uint64
+ )
+ f.forEachAnnounce(f.announces[peer], func(hash common.Hash, meta *txMetadata) bool {
+ // If the transaction is already fetching, skip to the next one
+ if _, ok := f.fetching[hash]; ok {
+ return true
+ }
+ // Mark the hash as fetching and stash away possible alternates
+ f.fetching[hash] = peer
- if _, ok := f.alternates[hash]; ok {
- panic(fmt.Sprintf("alternate tracker already contains fetching item: %v", f.alternates[hash]))
- }
- f.alternates[hash] = f.announced[hash]
- delete(f.announced, hash)
+ if _, ok := f.alternates[hash]; ok {
+ panic(fmt.Sprintf("alternate tracker already contains fetching item: %v", f.alternates[hash]))
+ }
+ f.alternates[hash] = f.announced[hash]
+ delete(f.announced, hash)
- // Accumulate the hash and stop if the limit was reached
- hashes = append(hashes, hash)
- if len(hashes) >= maxTxRetrievals {
- return false // break in the for-each
+ // Accumulate the hash and stop if the limit was reached
+ hashes = append(hashes, hash)
+ if len(hashes) >= maxTxRetrievals {
+ return false // break in the for-each
+ }
+ if meta != nil { // Only set eth/68 and upwards
+ bytes += uint64(meta.size)
+ if bytes >= maxTxRetrievalSize {
+ return false
}
}
- return true // continue in the for-each
+ return true // scheduled, try to add more
})
// If any hashes were allocated, request them from the peer
if len(hashes) > 0 {
@@ -857,27 +939,28 @@ func (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string))
}
}
-// forEachHash does a range loop over a map of hashes in production, but during
-// testing it does a deterministic sorted random to allow reproducing issues.
-func (f *TxFetcher) forEachHash(hashes map[common.Hash]struct{}, do func(hash common.Hash) bool) {
+// forEachAnnounce does a range loop over a map of announcements in production,
+// but during testing it does a deterministic sorted random to allow reproducing
+// issues.
+func (f *TxFetcher) forEachAnnounce(announces map[common.Hash]*txMetadata, do func(hash common.Hash, meta *txMetadata) bool) {
// If we're running production, use whatever Go's map gives us
if f.rand == nil {
- for hash := range hashes {
- if !do(hash) {
+ for hash, meta := range announces {
+ if !do(hash, meta) {
return
}
}
return
}
// We're running the test suite, make iteration deterministic
- list := make([]common.Hash, 0, len(hashes))
- for hash := range hashes {
+ list := make([]common.Hash, 0, len(announces))
+ for hash := range announces {
list = append(list, hash)
}
sortHashes(list)
rotateHashes(list, f.rand.Intn(len(list)))
for _, hash := range list {
- if !do(hash) {
+ if !do(hash, announces[hash]) {
return
}
}
diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go
index 980c1a6c2..fbb9ff9dc 100644
--- a/eth/fetcher/tx_fetcher_test.go
+++ b/eth/fetcher/tx_fetcher_test.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/params"
)
var (
@@ -41,9 +42,20 @@ var (
testTxsHashes = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()}
)
+type announce struct {
+ hash common.Hash
+ kind *byte
+ size *uint32
+}
+
+func typeptr(t byte) *byte { return &t }
+func sizeptr(n uint32) *uint32 { return &n }
+
type doTxNotify struct {
peer string
hashes []common.Hash
+ types []byte
+ sizes []uint32
}
type doTxEnqueue struct {
peer string
@@ -57,7 +69,14 @@ type doWait struct {
type doDrop string
type doFunc func()
+type isWaitingWithMeta map[string][]announce
type isWaiting map[string][]common.Hash
+
+type isScheduledWithMeta struct {
+ tracking map[string][]announce
+ fetching map[string][]common.Hash
+ dangling map[string][]common.Hash
+}
type isScheduled struct {
tracking map[string][]common.Hash
fetching map[string][]common.Hash
@@ -81,6 +100,7 @@ func TestTransactionFetcherWaiting(t *testing.T) {
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -162,6 +182,212 @@ func TestTransactionFetcherWaiting(t *testing.T) {
})
}
+// Tests that transaction announcements with associated metadata are added to a
+// waitlist, and none of them are scheduled for retrieval until the wait expires.
+//
+// This test is an extended version of TestTransactionFetcherWaiting. It's mostly
+// to cover the metadata checkes without bloating up the basic behavioral tests
+// with all the useless extra fields.
+func TestTransactionFetcherWaitingWithMeta(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ nil,
+ )
+ },
+ steps: []interface{}{
+ // Initial announcement to get something into the waitlist
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 222}},
+ isWaitingWithMeta(map[string][]announce{
+ "A": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
+ {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
+ },
+ }),
+ // Announce from a new peer to check that no overwrite happens
+ doTxNotify{peer: "B", hashes: []common.Hash{{0x03}, {0x04}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{333, 444}},
+ isWaitingWithMeta(map[string][]announce{
+ "A": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
+ {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
+ },
+ "B": {
+ {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
+ {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
+ },
+ }),
+ // Announce clashing hashes but unique new peer
+ doTxNotify{peer: "C", hashes: []common.Hash{{0x01}, {0x04}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 444}},
+ isWaitingWithMeta(map[string][]announce{
+ "A": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
+ {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
+ },
+ "B": {
+ {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
+ {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
+ },
+ "C": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
+ {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
+ },
+ }),
+ // Announce existing and clashing hashes from existing peer. Clashes
+ // should not overwrite previous announcements.
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x03}, {0x05}}, types: []byte{types.LegacyTxType, types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{999, 333, 555}},
+ isWaitingWithMeta(map[string][]announce{
+ "A": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
+ {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
+ {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
+ {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)},
+ },
+ "B": {
+ {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
+ {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
+ },
+ "C": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
+ {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
+ },
+ }),
+ // Announce clashing hashes with conflicting metadata. Somebody will
+ // be in the wrong, but we don't know yet who.
+ doTxNotify{peer: "D", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.BlobTxType}, sizes: []uint32{999, 222}},
+ isWaitingWithMeta(map[string][]announce{
+ "A": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
+ {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
+ {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
+ {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)},
+ },
+ "B": {
+ {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
+ {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
+ },
+ "C": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
+ {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
+ },
+ "D": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)},
+ {common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)},
+ },
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ // Wait for the arrival timeout which should move all expired items
+ // from the wait list to the scheduler
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduledWithMeta{
+ tracking: map[string][]announce{
+ "A": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
+ {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
+ {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
+ {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)},
+ },
+ "B": {
+ {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
+ {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
+ },
+ "C": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
+ {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
+ },
+ "D": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)},
+ {common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)},
+ },
+ },
+ fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer
+ "A": {{0x03}, {0x05}},
+ "C": {{0x01}, {0x04}},
+ "D": {{0x02}},
+ },
+ },
+ // Queue up a non-fetchable transaction and then trigger it with a new
+ // peer (weird case to test 1 line in the fetcher)
+ doTxNotify{peer: "C", hashes: []common.Hash{{0x06}, {0x07}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{666, 777}},
+ isWaitingWithMeta(map[string][]announce{
+ "C": {
+ {common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)},
+ {common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)},
+ },
+ }),
+ doWait{time: txArriveTimeout, step: true},
+ isScheduledWithMeta{
+ tracking: map[string][]announce{
+ "A": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
+ {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
+ {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
+ {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)},
+ },
+ "B": {
+ {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
+ {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
+ },
+ "C": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
+ {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
+ {common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)},
+ {common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)},
+ },
+ "D": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)},
+ {common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)},
+ },
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x03}, {0x05}},
+ "C": {{0x01}, {0x04}},
+ "D": {{0x02}},
+ },
+ },
+ doTxNotify{peer: "E", hashes: []common.Hash{{0x06}, {0x07}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{666, 777}},
+ isScheduledWithMeta{
+ tracking: map[string][]announce{
+ "A": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
+ {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
+ {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
+ {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)},
+ },
+ "B": {
+ {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
+ {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
+ },
+ "C": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
+ {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
+ {common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)},
+ {common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)},
+ },
+ "D": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)},
+ {common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)},
+ },
+ "E": {
+ {common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)},
+ {common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)},
+ },
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x03}, {0x05}},
+ "C": {{0x01}, {0x04}},
+ "D": {{0x02}},
+ "E": {{0x06}, {0x07}},
+ },
+ },
+ },
+ })
+}
+
// Tests that transaction announcements skip the waiting list if they are
// already scheduled.
func TestTransactionFetcherSkipWaiting(t *testing.T) {
@@ -171,6 +397,7 @@ func TestTransactionFetcherSkipWaiting(t *testing.T) {
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -234,6 +461,7 @@ func TestTransactionFetcherSingletonRequesting(t *testing.T) {
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -313,6 +541,7 @@ func TestTransactionFetcherFailedRescheduling(t *testing.T) {
<-proceed
return errors.New("peer disconnected")
},
+ nil,
)
},
steps: []interface{}{
@@ -382,6 +611,7 @@ func TestTransactionFetcherCleanup(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -421,6 +651,7 @@ func TestTransactionFetcherCleanupEmpty(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -459,6 +690,7 @@ func TestTransactionFetcherMissingRescheduling(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -505,6 +737,7 @@ func TestTransactionFetcherMissingCleanup(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -543,6 +776,7 @@ func TestTransactionFetcherBroadcasts(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -591,6 +825,7 @@ func TestTransactionFetcherWaitTimerResets(t *testing.T) {
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -648,6 +883,7 @@ func TestTransactionFetcherTimeoutRescheduling(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -713,6 +949,7 @@ func TestTransactionFetcherTimeoutTimerResets(t *testing.T) {
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -757,21 +994,21 @@ func TestTransactionFetcherTimeoutTimerResets(t *testing.T) {
})
}
-// Tests that if thousands of transactions are announces, only a small
+// Tests that if thousands of transactions are announced, only a small
// number of them will be requested at a time.
func TestTransactionFetcherRateLimiting(t *testing.T) {
- // Create a slew of transactions and to announce them
+ // Create a slew of transactions and announce them
var hashes []common.Hash
for i := 0; i < maxTxAnnounces; i++ {
hashes = append(hashes, common.Hash{byte(i / 256), byte(i % 256)})
}
-
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -792,6 +1029,68 @@ func TestTransactionFetcherRateLimiting(t *testing.T) {
})
}
+// Tests that if huge transactions are announced, only a small number of them will
+// be requested at a time, to keep the responses below a resonable level.
+func TestTransactionFetcherBandwidthLimiting(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ nil,
+ )
+ },
+ steps: []interface{}{
+ // Announce mid size transactions from A to verify that multiple
+ // ones can be piled into a single request.
+ doTxNotify{peer: "A",
+ hashes: []common.Hash{{0x01}, {0x02}, {0x03}, {0x04}},
+ types: []byte{types.LegacyTxType, types.LegacyTxType, types.LegacyTxType, types.LegacyTxType},
+ sizes: []uint32{48 * 1024, 48 * 1024, 48 * 1024, 48 * 1024},
+ },
+ // Announce exactly on the limit transactions to see that only one
+ // gets requested
+ doTxNotify{peer: "B",
+ hashes: []common.Hash{{0x05}, {0x06}},
+ types: []byte{types.LegacyTxType, types.LegacyTxType},
+ sizes: []uint32{maxTxRetrievalSize, maxTxRetrievalSize},
+ },
+ // Announce oversized blob transactions to see that overflows are ok
+ doTxNotify{peer: "C",
+ hashes: []common.Hash{{0x07}, {0x08}},
+ types: []byte{types.BlobTxType, types.BlobTxType},
+ sizes: []uint32{params.MaxBlobGasPerBlock, params.MaxBlobGasPerBlock},
+ },
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduledWithMeta{
+ tracking: map[string][]announce{
+ "A": {
+ {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)},
+ {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)},
+ {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)},
+ {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)},
+ },
+ "B": {
+ {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(maxTxRetrievalSize)},
+ {common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(maxTxRetrievalSize)},
+ },
+ "C": {
+ {common.Hash{0x07}, typeptr(types.BlobTxType), sizeptr(params.MaxBlobGasPerBlock)},
+ {common.Hash{0x08}, typeptr(types.BlobTxType), sizeptr(params.MaxBlobGasPerBlock)},
+ },
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x02}, {0x03}, {0x04}},
+ "B": {{0x06}},
+ "C": {{0x08}},
+ },
+ },
+ },
+ })
+}
+
// Tests that then number of transactions a peer is allowed to announce and/or
// request at the same time is hard capped.
func TestTransactionFetcherDoSProtection(t *testing.T) {
@@ -810,6 +1109,7 @@ func TestTransactionFetcherDoSProtection(t *testing.T) {
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -877,6 +1177,7 @@ func TestTransactionFetcherUnderpricedDedup(t *testing.T) {
return errs
},
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -946,6 +1247,7 @@ func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) {
return errs
},
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: append(steps, []interface{}{
@@ -968,6 +1270,7 @@ func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -1021,6 +1324,7 @@ func TestTransactionFetcherDrop(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -1087,6 +1391,7 @@ func TestTransactionFetcherDropRescheduling(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -1120,6 +1425,74 @@ func TestTransactionFetcherDropRescheduling(t *testing.T) {
})
}
+// Tests that announced transactions with the wrong transaction type or size will
+// result in a dropped peer.
+func TestInvalidAnnounceMetadata(t *testing.T) {
+ drop := make(chan string, 2)
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ func(peer string) { drop <- peer },
+ )
+ },
+ steps: []interface{}{
+ // Initial announcement to get something into the waitlist
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}, types: []byte{testTxs[0].Type(), testTxs[1].Type()}, sizes: []uint32{uint32(testTxs[0].Size()), uint32(testTxs[1].Size())}},
+ isWaitingWithMeta(map[string][]announce{
+ "A": {
+ {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))},
+ {testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))},
+ },
+ }),
+ // Announce from new peers conflicting transactions
+ doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{1024 + uint32(testTxs[0].Size())}},
+ doTxNotify{peer: "C", hashes: []common.Hash{testTxsHashes[1]}, types: []byte{1 + testTxs[1].Type()}, sizes: []uint32{uint32(testTxs[1].Size())}},
+ isWaitingWithMeta(map[string][]announce{
+ "A": {
+ {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))},
+ {testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))},
+ },
+ "B": {
+ {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(1024 + uint32(testTxs[0].Size()))},
+ },
+ "C": {
+ {testTxsHashes[1], typeptr(1 + testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))},
+ },
+ }),
+ // Schedule all the transactions for retrieval
+ doWait{time: txArriveTimeout, step: true},
+ isWaitingWithMeta(nil),
+ isScheduledWithMeta{
+ tracking: map[string][]announce{
+ "A": {
+ {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))},
+ {testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))},
+ },
+ "B": {
+ {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(1024 + uint32(testTxs[0].Size()))},
+ },
+ "C": {
+ {testTxsHashes[1], typeptr(1 + testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))},
+ },
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ "C": {testTxsHashes[1]},
+ },
+ },
+ // Deliver the transactions and wait for B to be dropped
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}},
+ doFunc(func() { <-drop }),
+ doFunc(func() { <-drop }),
+ },
+ })
+}
+
// This test reproduces a crash caught by the fuzzer. The root cause was a
// dangling transaction timing out and clashing on re-add with a concurrently
// announced one.
@@ -1132,6 +1505,7 @@ func TestTransactionFetcherFuzzCrash01(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -1159,6 +1533,7 @@ func TestTransactionFetcherFuzzCrash02(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -1188,6 +1563,7 @@ func TestTransactionFetcherFuzzCrash03(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ nil,
)
},
steps: []interface{}{
@@ -1224,6 +1600,7 @@ func TestTransactionFetcherFuzzCrash04(t *testing.T) {
<-proceed
return errors.New("peer disconnected")
},
+ nil,
)
},
steps: []interface{}{
@@ -1274,9 +1651,34 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
// Crunch through all the test steps and execute them
for i, step := range tt.steps {
+ // Auto-expand certain steps to ones with metadata
+ switch old := step.(type) {
+ case isWaiting:
+ new := make(isWaitingWithMeta)
+ for peer, hashes := range old {
+ for _, hash := range hashes {
+ new[peer] = append(new[peer], announce{hash, nil, nil})
+ }
+ }
+ step = new
+
+ case isScheduled:
+ new := isScheduledWithMeta{
+ tracking: make(map[string][]announce),
+ fetching: old.fetching,
+ dangling: old.dangling,
+ }
+ for peer, hashes := range old.tracking {
+ for _, hash := range hashes {
+ new.tracking[peer] = append(new.tracking[peer], announce{hash, nil, nil})
+ }
+ }
+ step = new
+ }
+ // Process the original or expanded steps
switch step := step.(type) {
case doTxNotify:
- if err := fetcher.Notify(step.peer, step.hashes); err != nil {
+ if err := fetcher.Notify(step.peer, step.types, step.sizes, step.hashes); err != nil {
t.Errorf("step %d: %v", i, err)
}
<-wait // Fetcher needs to process this, wait until it's done
@@ -1307,24 +1709,34 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
case doFunc:
step()
- case isWaiting:
+ case isWaitingWithMeta:
// We need to check that the waiting list (stage 1) internals
// match with the expected set. Check the peer->hash mappings
// first.
- for peer, hashes := range step {
+ for peer, announces := range step {
waiting := fetcher.waitslots[peer]
if waiting == nil {
t.Errorf("step %d: peer %s missing from waitslots", i, peer)
continue
}
- for _, hash := range hashes {
- if _, ok := waiting[hash]; !ok {
- t.Errorf("step %d, peer %s: hash %x missing from waitslots", i, peer, hash)
+ for _, ann := range announces {
+ if meta, ok := waiting[ann.hash]; !ok {
+ t.Errorf("step %d, peer %s: hash %x missing from waitslots", i, peer, ann.hash)
+ } else {
+ if (meta == nil && (ann.kind != nil || ann.size != nil)) ||
+ (meta != nil && (ann.kind == nil || ann.size == nil)) ||
+ (meta != nil && (meta.kind != *ann.kind || meta.size != *ann.size)) {
+ t.Errorf("step %d, peer %s, hash %x: waitslot metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, *ann.kind, *ann.size)
+ }
}
}
- for hash := range waiting {
- if !containsHash(hashes, hash) {
- t.Errorf("step %d, peer %s: hash %x extra in waitslots", i, peer, hash)
+ for hash, meta := range waiting {
+ ann := announce{hash: hash}
+ if meta != nil {
+ ann.kind, ann.size = &meta.kind, &meta.size
+ }
+ if !containsAnnounce(announces, ann) {
+ t.Errorf("step %d, peer %s: announce %v extra in waitslots", i, peer, ann)
}
}
}
@@ -1334,13 +1746,13 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
}
}
// Peer->hash sets correct, check the hash->peer and timeout sets
- for peer, hashes := range step {
- for _, hash := range hashes {
- if _, ok := fetcher.waitlist[hash][peer]; !ok {
- t.Errorf("step %d, hash %x: peer %s missing from waitlist", i, hash, peer)
+ for peer, announces := range step {
+ for _, ann := range announces {
+ if _, ok := fetcher.waitlist[ann.hash][peer]; !ok {
+ t.Errorf("step %d, hash %x: peer %s missing from waitlist", i, ann.hash, peer)
}
- if _, ok := fetcher.waittime[hash]; !ok {
- t.Errorf("step %d: hash %x missing from waittime", i, hash)
+ if _, ok := fetcher.waittime[ann.hash]; !ok {
+ t.Errorf("step %d: hash %x missing from waittime", i, ann.hash)
}
}
}
@@ -1349,15 +1761,15 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
t.Errorf("step %d, hash %x: empty peerset in waitlist", i, hash)
}
for peer := range peers {
- if !containsHash(step[peer], hash) {
+ if !containsHashInAnnounces(step[peer], hash) {
t.Errorf("step %d, hash %x: peer %s extra in waitlist", i, hash, peer)
}
}
}
for hash := range fetcher.waittime {
var found bool
- for _, hashes := range step {
- if containsHash(hashes, hash) {
+ for _, announces := range step {
+ if containsHashInAnnounces(announces, hash) {
found = true
break
}
@@ -1367,23 +1779,33 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
}
}
- case isScheduled:
+ case isScheduledWithMeta:
// Check that all scheduled announces are accounted for and no
// extra ones are present.
- for peer, hashes := range step.tracking {
+ for peer, announces := range step.tracking {
scheduled := fetcher.announces[peer]
if scheduled == nil {
t.Errorf("step %d: peer %s missing from announces", i, peer)
continue
}
- for _, hash := range hashes {
- if _, ok := scheduled[hash]; !ok {
- t.Errorf("step %d, peer %s: hash %x missing from announces", i, peer, hash)
+ for _, ann := range announces {
+ if meta, ok := scheduled[ann.hash]; !ok {
+ t.Errorf("step %d, peer %s: hash %x missing from announces", i, peer, ann.hash)
+ } else {
+ if (meta == nil && (ann.kind != nil || ann.size != nil)) ||
+ (meta != nil && (ann.kind == nil || ann.size == nil)) ||
+ (meta != nil && (meta.kind != *ann.kind || meta.size != *ann.size)) {
+ t.Errorf("step %d, peer %s, hash %x: announce metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, *ann.kind, *ann.size)
+ }
}
}
- for hash := range scheduled {
- if !containsHash(hashes, hash) {
- t.Errorf("step %d, peer %s: hash %x extra in announces", i, peer, hash)
+ for hash, meta := range scheduled {
+ ann := announce{hash: hash}
+ if meta != nil {
+ ann.kind, ann.size = &meta.kind, &meta.size
+ }
+ if !containsAnnounce(announces, ann) {
+ t.Errorf("step %d, peer %s: announce %x extra in announces", i, peer, hash)
}
}
}
@@ -1483,17 +1905,17 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
// retrieval but not actively being downloaded are tracked only
// in the stage 2 `announced` map.
var queued []common.Hash
- for _, hashes := range step.tracking {
- for _, hash := range hashes {
+ for _, announces := range step.tracking {
+ for _, ann := range announces {
var found bool
for _, hs := range step.fetching {
- if containsHash(hs, hash) {
+ if containsHash(hs, ann.hash) {
found = true
break
}
}
if !found {
- queued = append(queued, hash)
+ queued = append(queued, ann.hash)
}
}
}
@@ -1526,6 +1948,42 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
}
}
+// containsAnnounce returns whether an announcement is contained within a slice
+// of announcements.
+func containsAnnounce(slice []announce, ann announce) bool {
+ for _, have := range slice {
+ if have.hash == ann.hash {
+ if have.kind == nil || ann.kind == nil {
+ if have.kind != ann.kind {
+ return false
+ }
+ } else if *have.kind != *ann.kind {
+ return false
+ }
+ if have.size == nil || ann.size == nil {
+ if have.size != ann.size {
+ return false
+ }
+ } else if *have.size != *ann.size {
+ return false
+ }
+ return true
+ }
+ }
+ return false
+}
+
+// containsHashInAnnounces returns whether a hash is contained within a slice
+// of announcements.
+func containsHashInAnnounces(slice []announce, hash common.Hash) bool {
+ for _, have := range slice {
+ if have.hash == hash {
+ return true
+ }
+ }
+ return false
+}
+
// containsHash returns whether a hash is contained within a hash slice.
func containsHash(slice []common.Hash, hash common.Hash) bool {
for _, have := range slice {
diff --git a/eth/handler.go b/eth/handler.go
index 59040442e..f0021e564 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -55,9 +55,7 @@ const (
txMaxBroadcastSize = 4096
)
-var (
- syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge
-)
+var syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge
// txPool defines the methods needed from a transaction pool implementation to
// support all the operations needed by the Ethereum chain protocols.
@@ -77,9 +75,10 @@ type txPool interface {
// The slice should be modifiable by the caller.
Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction
- // SubscribeNewTxsEvent should return an event subscription of
- // NewTxsEvent and send events to the given channel.
- SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
+ // SubscribeTransactions subscribes to new transaction events. The subscriber
+ // can decide whether to receive notifications only for newly seen transactions
+ // or also for reorged out ones.
+ SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription
}
// handlerConfig is the collection of initialization parameters to create a full
@@ -89,7 +88,7 @@ type handlerConfig struct {
Chain *core.BlockChain // Blockchain to serve data from
TxPool txPool // Transaction pool to propagate from
Merger *consensus.Merger // The manager for eth1/2 transition
- Network uint64 // Network identifier to adfvertise
+ Network uint64 // Network identifier to advertise
Sync downloader.SyncMode // Whether to snap or full sync
BloomCache uint64 // Megabytes to alloc for snap sync bloom
EventMux *event.TypeMux // Legacy event mux, deprecate for `feed`
@@ -255,7 +254,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
}
td := new(big.Int).Add(ptd, block.Difficulty())
if !h.chain.Config().IsTerminalPoWBlock(ptd, td) {
- log.Info("Filtered out non-termimal pow block", "number", block.NumberU64(), "hash", block.Hash())
+ log.Info("Filtered out non-terminal pow block", "number", block.NumberU64(), "hash", block.Hash())
return 0, nil
}
if err := h.chain.InsertBlockWithoutSetHead(block); err != nil {
@@ -278,7 +277,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
addTxs := func(txs []*types.Transaction) []error {
return h.txpool.Add(txs, false, false)
}
- h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx)
+ h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx, h.removePeer)
h.chainSync = newChainSyncer(h)
return h, nil
}
@@ -416,7 +415,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
select {
case res := <-resCh:
- headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket))
+ headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersRequest))
if len(headers) == 0 {
// Required blocks are allowed to be missing if the remote
// node is not yet synced
@@ -463,7 +462,7 @@ func (h *handler) runSnapExtension(peer *snap.Peer, handler snap.Handler) error
snap.EgressRegistrationErrorMeter.Mark(1)
}
}
- peer.Log().Warn("Snapshot extension registration failed", "err", err)
+ peer.Log().Debug("Snapshot extension registration failed", "err", err)
return err
}
return handler(peer)
@@ -511,10 +510,10 @@ func (h *handler) unregisterPeer(id string) {
func (h *handler) Start(maxPeers int) {
h.maxPeers = maxPeers
- // broadcast transactions
+ // broadcast and announce transactions (only new ones, not resurrected ones)
h.wg.Add(1)
h.txsCh = make(chan core.NewTxsEvent, txChanSize)
- h.txsSub = h.txpool.SubscribeNewTxsEvent(h.txsCh)
+ h.txsSub = h.txpool.SubscribeTransactions(h.txsCh, false)
go h.txBroadcastLoop()
// broadcast mined blocks
@@ -594,26 +593,33 @@ func (h *handler) BroadcastBlock(block *types.Block, propagate bool) {
}
// BroadcastTransactions will propagate a batch of transactions
-// - To a square root of all peers
+// - To a square root of all peers for non-blob transactions
// - And, separately, as announcements to all peers which are not known to
// already have the given transaction.
func (h *handler) BroadcastTransactions(txs types.Transactions) {
var (
- annoCount int // Count of announcements made
- annoPeers int
- directCount int // Count of the txs sent directly to peers
- directPeers int // Count of the peers that were sent transactions directly
+ blobTxs int // Number of blob transactions to announce only
+ largeTxs int // Number of large transactions to announce only
+
+ directCount int // Number of transactions sent directly to peers (duplicates included)
+ directPeers int // Number of peers that were sent transactions directly
+ annCount int // Number of transactions announced across all peers (duplicates included)
+ annPeers int // Number of peers announced about transactions
txset = make(map[*ethPeer][]common.Hash) // Set peer->hash to transfer directly
annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce
-
)
// Broadcast transactions to a batch of peers not knowing about it
for _, tx := range txs {
peers := h.peers.peersWithoutTransaction(tx.Hash())
var numDirect int
- if tx.Size() <= txMaxBroadcastSize {
+ switch {
+ case tx.Type() == types.BlobTxType:
+ blobTxs++
+ case tx.Size() > txMaxBroadcastSize:
+ largeTxs++
+ default:
numDirect = int(math.Sqrt(float64(len(peers))))
}
// Send the tx unconditionally to a subset of our peers
@@ -631,13 +637,12 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) {
peer.AsyncSendTransactions(hashes)
}
for peer, hashes := range annos {
- annoPeers++
- annoCount += len(hashes)
+ annPeers++
+ annCount += len(hashes)
peer.AsyncSendPooledTransactionHashes(hashes)
}
- log.Debug("Transaction broadcast", "txs", len(txs),
- "announce packs", annoPeers, "announced hashes", annoCount,
- "tx packs", directPeers, "broadcast txs", directCount)
+ log.Debug("Distributed transactions", "plaintxs", len(txs)-blobTxs-largeTxs, "blobtxs", blobTxs, "largetxs", largeTxs,
+ "bcastpeers", directPeers, "bcastcount", directCount, "annpeers", annPeers, "anncount", annCount)
}
// minedBroadcastLoop sends mined blocks to connected peers.
diff --git a/eth/handler_eth.go b/eth/handler_eth.go
index 2aba16f92..2a839f615 100644
--- a/eth/handler_eth.go
+++ b/eth/handler_eth.go
@@ -17,6 +17,7 @@
package eth
import (
+ "errors"
"fmt"
"math/big"
"time"
@@ -66,16 +67,21 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
case *eth.NewBlockPacket:
return h.handleBlockBroadcast(peer, packet.Block, packet.TD)
- case *eth.NewPooledTransactionHashesPacket66:
- return h.txFetcher.Notify(peer.ID(), *packet)
+ case *eth.NewPooledTransactionHashesPacket67:
+ return h.txFetcher.Notify(peer.ID(), nil, nil, *packet)
case *eth.NewPooledTransactionHashesPacket68:
- return h.txFetcher.Notify(peer.ID(), packet.Hashes)
+ return h.txFetcher.Notify(peer.ID(), packet.Types, packet.Sizes, packet.Hashes)
case *eth.TransactionsPacket:
+ for _, tx := range *packet {
+ if tx.Type() == types.BlobTxType {
+ return errors.New("disallowed broadcast blob transaction")
+ }
+ }
return h.txFetcher.Enqueue(peer.ID(), *packet, false)
- case *eth.PooledTransactionsPacket:
+ case *eth.PooledTransactionsResponse:
return h.txFetcher.Enqueue(peer.ID(), *packet, true)
default:
@@ -90,9 +96,7 @@ func (h *ethHandler) handleBlockAnnounces(peer *eth.Peer, hashes []common.Hash,
// the chain already entered the pos stage and disconnect the
// remote peer.
if h.merger.PoSFinalized() {
- // TODO (MariusVanDerWijden) drop non-updated peers after the merge
- return nil
- // return errors.New("unexpected block announces")
+ return errors.New("disallowed block announcement")
}
// Schedule all the unknown hashes for retrieval
var (
@@ -118,9 +122,7 @@ func (h *ethHandler) handleBlockBroadcast(peer *eth.Peer, block *types.Block, td
// the chain already entered the pos stage and disconnect the
// remote peer.
if h.merger.PoSFinalized() {
- // TODO (MariusVanDerWijden) drop non-updated peers after the merge
- return nil
- // return errors.New("unexpected block announces")
+ return errors.New("disallowed block broadcast")
}
// Schedule the block for import
h.blockFetcher.Enqueue(peer.ID(), block)
diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go
index a16abc5ed..bb342acc1 100644
--- a/eth/handler_eth_test.go
+++ b/eth/handler_eth_test.go
@@ -58,7 +58,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
h.blockBroadcasts.Send(packet.Block)
return nil
- case *eth.NewPooledTransactionHashesPacket66:
+ case *eth.NewPooledTransactionHashesPacket67:
h.txAnnounces.Send(([]common.Hash)(*packet))
return nil
@@ -70,7 +70,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
h.txBroadcasts.Send(([]*types.Transaction)(*packet))
return nil
- case *eth.PooledTransactionsPacket:
+ case *eth.PooledTransactionsResponse:
h.txBroadcasts.Send(([]*types.Transaction)(*packet))
return nil
@@ -81,7 +81,6 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
// Tests that peers are correctly accepted (or rejected) based on the advertised
// fork IDs in the protocol handshake.
-func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) }
func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) }
func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) }
@@ -237,7 +236,6 @@ func testForkIDSplit(t *testing.T, protocol uint) {
}
// Tests that received transactions are added to the local pool.
-func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) }
func TestRecvTransactions67(t *testing.T) { testRecvTransactions(t, eth.ETH67) }
func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) }
@@ -251,7 +249,7 @@ func testRecvTransactions(t *testing.T, protocol uint) {
handler.handler.synced.Store(true) // mark synced to accept transactions
txs := make(chan core.NewTxsEvent)
- sub := handler.txpool.SubscribeNewTxsEvent(txs)
+ sub := handler.txpool.SubscribeTransactions(txs, false)
defer sub.Unsubscribe()
// Create a source peer to send messages through and a sink handler to receive them
@@ -296,7 +294,6 @@ func testRecvTransactions(t *testing.T, protocol uint) {
}
// This test checks that pending transactions are sent.
-func TestSendTransactions66(t *testing.T) { testSendTransactions(t, eth.ETH66) }
func TestSendTransactions67(t *testing.T) { testSendTransactions(t, eth.ETH67) }
func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) }
@@ -356,7 +353,7 @@ func testSendTransactions(t *testing.T, protocol uint) {
seen := make(map[common.Hash]struct{})
for len(seen) < len(insert) {
switch protocol {
- case 66, 67, 68:
+ case 67, 68:
select {
case hashes := <-anns:
for _, hash := range hashes {
@@ -382,7 +379,6 @@ func testSendTransactions(t *testing.T, protocol uint) {
// Tests that transactions get propagated to all attached peers, either via direct
// broadcasts or via announcements/retrievals.
-func TestTransactionPropagation66(t *testing.T) { testTransactionPropagation(t, eth.ETH66) }
func TestTransactionPropagation67(t *testing.T) { testTransactionPropagation(t, eth.ETH67) }
func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) }
@@ -428,7 +424,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) {
for i := 0; i < len(sinks); i++ {
txChs[i] = make(chan core.NewTxsEvent, 1024)
- sub := sinks[i].txpool.SubscribeNewTxsEvent(txChs[i])
+ sub := sinks[i].txpool.SubscribeTransactions(txChs[i], false)
defer sub.Unsubscribe()
}
// Fill the source pool with transactions and wait for them at the sinks
@@ -490,8 +486,8 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
defer sourcePipe.Close()
defer sinkPipe.Close()
- sourcePeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil)
- sinkPeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil)
+ sourcePeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil)
+ sinkPeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil)
defer sourcePeer.Close()
defer sinkPeer.Close()
@@ -543,7 +539,6 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
// Tests that a propagated malformed block (uncles or transactions don't match
// with the hashes in the header) gets discarded and not broadcast forward.
-func TestBroadcastMalformedBlock66(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH66) }
func TestBroadcastMalformedBlock67(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH67) }
func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) }
diff --git a/eth/handler_test.go b/eth/handler_test.go
index 2e0a98845..6d6132ee4 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -113,15 +113,17 @@ func (p *testTxPool) Pending(enforceTips bool) map[common.Address][]*txpool.Lazy
Time: tx.Time(),
GasFeeCap: tx.GasFeeCap(),
GasTipCap: tx.GasTipCap(),
+ Gas: tx.Gas(),
+ BlobGas: tx.BlobGas(),
})
}
}
return pending
}
-// SubscribeNewTxsEvent should return an event subscription of NewTxsEvent and
+// SubscribeTransactions should return an event subscription of NewTxsEvent and
// send events to the given channel.
-func (p *testTxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
+func (p *testTxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
return p.txFeed.Subscribe(ch)
}
diff --git a/eth/peerset.go b/eth/peerset.go
index b9cc1e03a..b27d3964a 100644
--- a/eth/peerset.go
+++ b/eth/peerset.go
@@ -18,6 +18,7 @@ package eth
import (
"errors"
+ "fmt"
"math/big"
"sync"
@@ -74,7 +75,7 @@ func (ps *peerSet) registerSnapExtension(peer *snap.Peer) error {
// Reject the peer if it advertises `snap` without `eth` as `snap` is only a
// satellite protocol meaningful with the chain selection of `eth`
if !peer.RunningCap(eth.ProtocolName, eth.ProtocolVersions) {
- return errSnapWithoutEth
+ return fmt.Errorf("%w: have %v", errSnapWithoutEth, peer.Caps())
}
// Ensure nobody can double connect
ps.lock.Lock()
diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go
index b2ce883bc..42d0412a1 100644
--- a/eth/protocols/eth/handler.go
+++ b/eth/protocols/eth/handler.go
@@ -23,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p"
@@ -45,10 +44,6 @@ const (
// nowadays, the practical limit will always be softResponseLimit.
maxBodiesServe = 1024
- // maxNodeDataServe is the maximum number of state trie nodes to serve. This
- // number is there to limit the number of disk lookups.
- maxNodeDataServe = 1024
-
// maxReceiptsServe is the maximum number of block receipts to serve. This
// number is mostly there to limit the number of disk lookups. With block
// containing 200+ transactions nowadays, the practical limit will always
@@ -98,12 +93,12 @@ type TxPool interface {
func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol {
protocols := make([]p2p.Protocol, 0, len(ProtocolVersions))
for _, version := range ProtocolVersions {
- version := version // Closure
-
- // Path scheme does not support GetNodeData, don't advertise eth66 on it
- if version <= ETH66 && backend.Chain().TrieDB().Scheme() == rawdb.PathScheme {
+ // Blob transactions require eth/68 announcements, disable everything else
+ if version <= ETH67 && backend.Chain().Config().CancunTime != nil {
continue
}
+ version := version // Closure
+
protocols = append(protocols, p2p.Protocol{
Name: ProtocolName,
Version: version,
@@ -171,36 +166,19 @@ type Decoder interface {
Time() time.Time
}
-var eth66 = map[uint64]msgHandler{
- NewBlockHashesMsg: handleNewBlockhashes,
- NewBlockMsg: handleNewBlock,
- TransactionsMsg: handleTransactions,
- NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66,
- GetBlockHeadersMsg: handleGetBlockHeaders66,
- BlockHeadersMsg: handleBlockHeaders66,
- GetBlockBodiesMsg: handleGetBlockBodies66,
- BlockBodiesMsg: handleBlockBodies66,
- GetNodeDataMsg: handleGetNodeData66,
- NodeDataMsg: handleNodeData66,
- GetReceiptsMsg: handleGetReceipts66,
- ReceiptsMsg: handleReceipts66,
- GetPooledTransactionsMsg: handleGetPooledTransactions66,
- PooledTransactionsMsg: handlePooledTransactions66,
-}
-
var eth67 = map[uint64]msgHandler{
NewBlockHashesMsg: handleNewBlockhashes,
NewBlockMsg: handleNewBlock,
TransactionsMsg: handleTransactions,
- NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66,
- GetBlockHeadersMsg: handleGetBlockHeaders66,
- BlockHeadersMsg: handleBlockHeaders66,
- GetBlockBodiesMsg: handleGetBlockBodies66,
- BlockBodiesMsg: handleBlockBodies66,
- GetReceiptsMsg: handleGetReceipts66,
- ReceiptsMsg: handleReceipts66,
- GetPooledTransactionsMsg: handleGetPooledTransactions66,
- PooledTransactionsMsg: handlePooledTransactions66,
+ NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes67,
+ GetBlockHeadersMsg: handleGetBlockHeaders,
+ BlockHeadersMsg: handleBlockHeaders,
+ GetBlockBodiesMsg: handleGetBlockBodies,
+ BlockBodiesMsg: handleBlockBodies,
+ GetReceiptsMsg: handleGetReceipts,
+ ReceiptsMsg: handleReceipts,
+ GetPooledTransactionsMsg: handleGetPooledTransactions,
+ PooledTransactionsMsg: handlePooledTransactions,
}
var eth68 = map[uint64]msgHandler{
@@ -208,14 +186,14 @@ var eth68 = map[uint64]msgHandler{
NewBlockMsg: handleNewBlock,
TransactionsMsg: handleTransactions,
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes68,
- GetBlockHeadersMsg: handleGetBlockHeaders66,
- BlockHeadersMsg: handleBlockHeaders66,
- GetBlockBodiesMsg: handleGetBlockBodies66,
- BlockBodiesMsg: handleBlockBodies66,
- GetReceiptsMsg: handleGetReceipts66,
- ReceiptsMsg: handleReceipts66,
- GetPooledTransactionsMsg: handleGetPooledTransactions66,
- PooledTransactionsMsg: handlePooledTransactions66,
+ GetBlockHeadersMsg: handleGetBlockHeaders,
+ BlockHeadersMsg: handleBlockHeaders,
+ GetBlockBodiesMsg: handleGetBlockBodies,
+ BlockBodiesMsg: handleBlockBodies,
+ GetReceiptsMsg: handleGetReceipts,
+ ReceiptsMsg: handleReceipts,
+ GetPooledTransactionsMsg: handleGetPooledTransactions,
+ PooledTransactionsMsg: handlePooledTransactions,
}
// handleMessage is invoked whenever an inbound message is received from a remote
@@ -231,14 +209,10 @@ func handleMessage(backend Backend, peer *Peer) error {
}
defer msg.Discard()
- var handlers = eth66
- if peer.Version() == ETH67 {
- handlers = eth67
- }
+ var handlers = eth67
if peer.Version() >= ETH68 {
handlers = eth68
}
-
// Track the amount of time it takes to serve the request and run the handler
if metrics.Enabled {
h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go
index bf2874721..41e18bfb3 100644
--- a/eth/protocols/eth/handler_test.go
+++ b/eth/protocols/eth/handler_test.go
@@ -28,7 +28,6 @@ import (
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/core/types"
@@ -151,7 +150,6 @@ func (b *testBackend) Handle(*Peer, Packet) error {
}
// Tests that block headers can be retrieved from a remote chain based on user queries.
-func TestGetBlockHeaders66(t *testing.T) { testGetBlockHeaders(t, ETH66) }
func TestGetBlockHeaders67(t *testing.T) { testGetBlockHeaders(t, ETH67) }
func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) }
@@ -178,29 +176,29 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
// Create a batch of tests for various scenarios
limit := uint64(maxHeadersServe)
tests := []struct {
- query *GetBlockHeadersPacket // The query to execute for header retrieval
- expect []common.Hash // The hashes of the block whose headers are expected
+ query *GetBlockHeadersRequest // The query to execute for header retrieval
+ expect []common.Hash // The hashes of the block whose headers are expected
}{
// A single random block should be retrievable by hash
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
[]common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},
},
// A single random block should be retrievable by number
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 1},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 1},
[]common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},
},
// Multiple headers should be retrievable in both directions
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3},
[]common.Hash{
backend.chain.GetBlockByNumber(limit / 2).Hash(),
backend.chain.GetBlockByNumber(limit/2 + 1).Hash(),
backend.chain.GetBlockByNumber(limit/2 + 2).Hash(),
},
}, {
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
[]common.Hash{
backend.chain.GetBlockByNumber(limit / 2).Hash(),
backend.chain.GetBlockByNumber(limit/2 - 1).Hash(),
@@ -209,14 +207,14 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
},
// Multiple headers with skip lists should be retrievable
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
[]common.Hash{
backend.chain.GetBlockByNumber(limit / 2).Hash(),
backend.chain.GetBlockByNumber(limit/2 + 4).Hash(),
backend.chain.GetBlockByNumber(limit/2 + 8).Hash(),
},
}, {
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
[]common.Hash{
backend.chain.GetBlockByNumber(limit / 2).Hash(),
backend.chain.GetBlockByNumber(limit/2 - 4).Hash(),
@@ -225,31 +223,31 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
},
// The chain endpoints should be retrievable
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 0}, Amount: 1},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 0}, Amount: 1},
[]common.Hash{backend.chain.GetBlockByNumber(0).Hash()},
},
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1},
[]common.Hash{backend.chain.CurrentBlock().Hash()},
},
{ // If the peer requests a bit into the future, we deliver what we have
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10},
[]common.Hash{backend.chain.CurrentBlock().Hash()},
},
// Ensure protocol limits are honored
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true},
getHashes(backend.chain.CurrentBlock().Number.Uint64(), limit),
},
// Check that requesting more than available is handled gracefully
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3},
[]common.Hash{
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(),
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64()).Hash(),
},
}, {
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
[]common.Hash{
backend.chain.GetBlockByNumber(4).Hash(),
backend.chain.GetBlockByNumber(0).Hash(),
@@ -257,13 +255,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
},
// Check that requesting more than available is handled gracefully, even if mid skip
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3},
[]common.Hash{
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(),
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 1).Hash(),
},
}, {
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
[]common.Hash{
backend.chain.GetBlockByNumber(4).Hash(),
backend.chain.GetBlockByNumber(1).Hash(),
@@ -271,7 +269,7 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
},
// Check a corner case where requesting more can iterate past the endpoints
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true},
[]common.Hash{
backend.chain.GetBlockByNumber(2).Hash(),
backend.chain.GetBlockByNumber(1).Hash(),
@@ -280,24 +278,24 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
},
// Check a corner case where skipping overflow loops back into the chain start
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1},
[]common.Hash{
backend.chain.GetBlockByNumber(3).Hash(),
},
},
// Check a corner case where skipping overflow loops back to the same header
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64},
[]common.Hash{
backend.chain.GetBlockByNumber(1).Hash(),
},
},
// Check that non existing headers aren't returned
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: unknown}, Amount: 1},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: unknown}, Amount: 1},
[]common.Hash{},
}, {
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1},
[]common.Hash{},
},
}
@@ -309,13 +307,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
headers = append(headers, backend.chain.GetBlockByHash(hash).Header())
}
// Send the hash request and verify the response
- p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
- RequestId: 123,
- GetBlockHeadersPacket: tt.query,
+ p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{
+ RequestId: 123,
+ GetBlockHeadersRequest: tt.query,
})
- if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket66{
- RequestId: 123,
- BlockHeadersPacket: headers,
+ if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket{
+ RequestId: 123,
+ BlockHeadersRequest: headers,
}); err != nil {
t.Errorf("test %d: headers mismatch: %v", i, err)
}
@@ -324,11 +322,11 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil {
tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0
- p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
- RequestId: 456,
- GetBlockHeadersPacket: tt.query,
+ p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{
+ RequestId: 456,
+ GetBlockHeadersRequest: tt.query,
})
- expected := &BlockHeadersPacket66{RequestId: 456, BlockHeadersPacket: headers}
+ expected := &BlockHeadersPacket{RequestId: 456, BlockHeadersRequest: headers}
if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil {
t.Errorf("test %d by hash: headers mismatch: %v", i, err)
}
@@ -338,7 +336,6 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
}
// Tests that block contents can be retrieved from a remote chain based on their hashes.
-func TestGetBlockBodies66(t *testing.T) { testGetBlockBodies(t, ETH66) }
func TestGetBlockBodies67(t *testing.T) { testGetBlockBodies(t, ETH67) }
func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) }
@@ -420,139 +417,20 @@ func testGetBlockBodies(t *testing.T, protocol uint) {
}
// Send the hash request and verify the response
- p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket66{
- RequestId: 123,
- GetBlockBodiesPacket: hashes,
+ p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket{
+ RequestId: 123,
+ GetBlockBodiesRequest: hashes,
})
- if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket66{
- RequestId: 123,
- BlockBodiesPacket: bodies,
+ if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket{
+ RequestId: 123,
+ BlockBodiesResponse: bodies,
}); err != nil {
t.Fatalf("test %d: bodies mismatch: %v", i, err)
}
}
}
-// Tests that the state trie nodes can be retrieved based on hashes.
-func TestGetNodeData66(t *testing.T) { testGetNodeData(t, ETH66, false) }
-func TestGetNodeData67(t *testing.T) { testGetNodeData(t, ETH67, true) }
-func TestGetNodeData68(t *testing.T) { testGetNodeData(t, ETH68, true) }
-
-func testGetNodeData(t *testing.T, protocol uint, drop bool) {
- t.Parallel()
-
- // Define three accounts to simulate transactions with
- acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
- acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
- acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
- acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
-
- signer := types.HomesteadSigner{}
- // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_makers_test)
- generator := func(i int, block *core.BlockGen) {
- switch i {
- case 0:
- // In block 1, the test bank sends account #1 some ether.
- tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey)
- block.AddTx(tx)
- case 1:
- // In block 2, the test bank sends some more ether to account #1.
- // acc1Addr passes it on to account #2.
- tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey)
- tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, acc1Key)
- block.AddTx(tx1)
- block.AddTx(tx2)
- case 2:
- // Block 3 is empty but was mined by account #2.
- block.SetCoinbase(acc2Addr)
- block.SetExtra([]byte("yeehaw"))
- case 3:
- // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
- b2 := block.PrevBlock(1).Header()
- b2.Extra = []byte("foo")
- block.AddUncle(b2)
- b3 := block.PrevBlock(2).Header()
- b3.Extra = []byte("foo")
- block.AddUncle(b3)
- }
- }
- // Assemble the test environment
- backend := newTestBackendWithGenerator(4, false, generator)
- defer backend.close()
-
- peer, _ := newTestPeer("peer", protocol, backend)
- defer peer.close()
-
- // Collect all state tree hashes.
- var hashes []common.Hash
- it := backend.db.NewIterator(nil, nil)
- for it.Next() {
- if key := it.Key(); len(key) == common.HashLength {
- hashes = append(hashes, common.BytesToHash(key))
- }
- }
- it.Release()
-
- // Request all hashes.
- p2p.Send(peer.app, GetNodeDataMsg, &GetNodeDataPacket66{
- RequestId: 123,
- GetNodeDataPacket: hashes,
- })
- msg, err := peer.app.ReadMsg()
- if !drop {
- if err != nil {
- t.Fatalf("failed to read node data response: %v", err)
- }
- } else {
- if err != nil {
- return
- }
- t.Fatalf("succeeded to read node data response on non-supporting protocol: %v", msg)
- }
- if msg.Code != NodeDataMsg {
- t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, NodeDataMsg)
- }
- var res NodeDataPacket66
- if err := msg.Decode(&res); err != nil {
- t.Fatalf("failed to decode response node data: %v", err)
- }
-
- // Verify that all hashes correspond to the requested data.
- data := res.NodeDataPacket
- for i, want := range hashes {
- if hash := crypto.Keccak256Hash(data[i]); hash != want {
- t.Errorf("data hash mismatch: have %x, want %x", hash, want)
- }
- }
-
- // Reconstruct state tree from the received data.
- reconstructDB := rawdb.NewMemoryDatabase()
- for i := 0; i < len(data); i++ {
- rawdb.WriteLegacyTrieNode(reconstructDB, hashes[i], data[i])
- }
-
- // Sanity check whether all state matches.
- accounts := []common.Address{testAddr, acc1Addr, acc2Addr}
- for i := uint64(0); i <= backend.chain.CurrentBlock().Number.Uint64(); i++ {
- root := backend.chain.GetBlockByNumber(i).Root()
- reconstructed, _ := state.New(root, state.NewDatabase(reconstructDB), nil)
- for j, acc := range accounts {
- state, _ := backend.chain.StateAt(root)
- bw := state.GetBalance(acc)
- bh := reconstructed.GetBalance(acc)
-
- if (bw == nil) != (bh == nil) {
- t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
- }
- if bw != nil && bh != nil && bw.Cmp(bh) != 0 {
- t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
- }
- }
- }
-}
-
// Tests that the transaction receipts can be retrieved based on hashes.
-func TestGetBlockReceipts66(t *testing.T) { testGetBlockReceipts(t, ETH66) }
func TestGetBlockReceipts67(t *testing.T) { testGetBlockReceipts(t, ETH67) }
func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) }
@@ -613,13 +491,13 @@ func testGetBlockReceipts(t *testing.T, protocol uint) {
receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash()))
}
// Send the hash request and verify the response
- p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket66{
- RequestId: 123,
- GetReceiptsPacket: hashes,
+ p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket{
+ RequestId: 123,
+ GetReceiptsRequest: hashes,
})
- if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket66{
- RequestId: 123,
- ReceiptsPacket: receipts,
+ if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket{
+ RequestId: 123,
+ ReceiptsResponse: receipts,
}); err != nil {
t.Errorf("receipts mismatch: %v", err)
}
diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go
index da741791b..069e92dad 100644
--- a/eth/protocols/eth/handlers.go
+++ b/eth/protocols/eth/handlers.go
@@ -22,27 +22,25 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
-// handleGetBlockHeaders66 is the eth/66 version of handleGetBlockHeaders
-func handleGetBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {
+func handleGetBlockHeaders(backend Backend, msg Decoder, peer *Peer) error {
// Decode the complex header query
- var query GetBlockHeadersPacket66
+ var query GetBlockHeadersPacket
if err := msg.Decode(&query); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
- response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersPacket, peer)
+ response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersRequest, peer)
return peer.ReplyBlockHeadersRLP(query.RequestId, response)
}
// ServiceGetBlockHeadersQuery assembles the response to a header query. It is
// exposed to allow external packages to test protocol behavior.
-func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue {
+func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue {
if query.Skip == 0 {
// The fast path: when the request is for a contiguous segment of headers.
return serviceContiguousBlockHeaderQuery(chain, query)
@@ -51,7 +49,7 @@ func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersP
}
}
-func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue {
+func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue {
hashMode := query.Origin.Hash != (common.Hash{})
first := true
maxNonCanonical := uint64(100)
@@ -140,7 +138,7 @@ func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBloc
return headers
}
-func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket) []rlp.RawValue {
+func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest) []rlp.RawValue {
count := query.Amount
if count > maxHeadersServe {
count = maxHeadersServe
@@ -203,19 +201,19 @@ func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHe
}
}
-func handleGetBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
+func handleGetBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
// Decode the block body retrieval message
- var query GetBlockBodiesPacket66
+ var query GetBlockBodiesPacket
if err := msg.Decode(&query); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
- response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesPacket)
+ response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesRequest)
return peer.ReplyBlockBodiesRLP(query.RequestId, response)
}
// ServiceGetBlockBodiesQuery assembles the response to a body query. It is
// exposed to allow external packages to test protocol behavior.
-func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPacket) []rlp.RawValue {
+func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesRequest) []rlp.RawValue {
// Gather blocks until the fetch or network limits is reached
var (
bytes int
@@ -234,60 +232,19 @@ func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPack
return bodies
}
-func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error {
- // Decode the trie node data retrieval message
- var query GetNodeDataPacket66
- if err := msg.Decode(&query); err != nil {
- return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
- }
- response := ServiceGetNodeDataQuery(backend.Chain(), query.GetNodeDataPacket)
- return peer.ReplyNodeData(query.RequestId, response)
-}
-
-// ServiceGetNodeDataQuery assembles the response to a node data query. It is
-// exposed to allow external packages to test protocol behavior.
-func ServiceGetNodeDataQuery(chain *core.BlockChain, query GetNodeDataPacket) [][]byte {
- // Request nodes by hash is not supported in path-based scheme.
- if chain.TrieDB().Scheme() == rawdb.PathScheme {
- return nil
- }
- // Gather state data until the fetch or network limits is reached
- var (
- bytes int
- nodes [][]byte
- )
- for lookups, hash := range query {
- if bytes >= softResponseLimit || len(nodes) >= maxNodeDataServe ||
- lookups >= 2*maxNodeDataServe {
- break
- }
- // Retrieve the requested state entry
- entry, err := chain.TrieDB().Node(hash)
- if len(entry) == 0 || err != nil {
- // Read the contract code with prefix only to save unnecessary lookups.
- entry, err = chain.ContractCodeWithPrefix(hash)
- }
- if err == nil && len(entry) > 0 {
- nodes = append(nodes, entry)
- bytes += len(entry)
- }
- }
- return nodes
-}
-
-func handleGetReceipts66(backend Backend, msg Decoder, peer *Peer) error {
+func handleGetReceipts(backend Backend, msg Decoder, peer *Peer) error {
// Decode the block receipts retrieval message
- var query GetReceiptsPacket66
+ var query GetReceiptsPacket
if err := msg.Decode(&query); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
- response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsPacket)
+ response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsRequest)
return peer.ReplyReceiptsRLP(query.RequestId, response)
}
// ServiceGetReceiptsQuery assembles the response to a receipt query. It is
// exposed to allow external packages to test protocol behavior.
-func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsPacket) []rlp.RawValue {
+func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsRequest) []rlp.RawValue {
// Gather state data until the fetch or network limits is reached
var (
bytes int
@@ -356,15 +313,15 @@ func handleNewBlock(backend Backend, msg Decoder, peer *Peer) error {
return backend.Handle(peer, ann)
}
-func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {
+func handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error {
// A batch of headers arrived to one of our previous requests
- res := new(BlockHeadersPacket66)
+ res := new(BlockHeadersPacket)
if err := msg.Decode(res); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
metadata := func() interface{} {
- hashes := make([]common.Hash, len(res.BlockHeadersPacket))
- for i, header := range res.BlockHeadersPacket {
+ hashes := make([]common.Hash, len(res.BlockHeadersRequest))
+ for i, header := range res.BlockHeadersRequest {
hashes[i] = header.Hash()
}
return hashes
@@ -372,24 +329,24 @@ func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {
return peer.dispatchResponse(&Response{
id: res.RequestId,
code: BlockHeadersMsg,
- Res: &res.BlockHeadersPacket,
+ Res: &res.BlockHeadersRequest,
}, metadata)
}
-func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
+func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
// A batch of block bodies arrived to one of our previous requests
- res := new(BlockBodiesPacket66)
+ res := new(BlockBodiesPacket)
if err := msg.Decode(res); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
metadata := func() interface{} {
var (
- txsHashes = make([]common.Hash, len(res.BlockBodiesPacket))
- uncleHashes = make([]common.Hash, len(res.BlockBodiesPacket))
- withdrawalHashes = make([]common.Hash, len(res.BlockBodiesPacket))
+ txsHashes = make([]common.Hash, len(res.BlockBodiesResponse))
+ uncleHashes = make([]common.Hash, len(res.BlockBodiesResponse))
+ withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse))
)
hasher := trie.NewStackTrie(nil)
- for i, body := range res.BlockBodiesPacket {
+ for i, body := range res.BlockBodiesResponse {
txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher)
uncleHashes[i] = types.CalcUncleHash(body.Uncles)
if body.Withdrawals != nil {
@@ -401,33 +358,20 @@ func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
return peer.dispatchResponse(&Response{
id: res.RequestId,
code: BlockBodiesMsg,
- Res: &res.BlockBodiesPacket,
+ Res: &res.BlockBodiesResponse,
}, metadata)
}
-func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error {
- // A batch of node state data arrived to one of our previous requests
- res := new(NodeDataPacket66)
- if err := msg.Decode(res); err != nil {
- return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
- }
- return peer.dispatchResponse(&Response{
- id: res.RequestId,
- code: NodeDataMsg,
- Res: &res.NodeDataPacket,
- }, nil) // No post-processing, we're not using this packet anymore
-}
-
-func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error {
+func handleReceipts(backend Backend, msg Decoder, peer *Peer) error {
// A batch of receipts arrived to one of our previous requests
- res := new(ReceiptsPacket66)
+ res := new(ReceiptsPacket)
if err := msg.Decode(res); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
metadata := func() interface{} {
hasher := trie.NewStackTrie(nil)
- hashes := make([]common.Hash, len(res.ReceiptsPacket))
- for i, receipt := range res.ReceiptsPacket {
+ hashes := make([]common.Hash, len(res.ReceiptsResponse))
+ for i, receipt := range res.ReceiptsResponse {
hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher)
}
return hashes
@@ -435,17 +379,17 @@ func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error {
return peer.dispatchResponse(&Response{
id: res.RequestId,
code: ReceiptsMsg,
- Res: &res.ReceiptsPacket,
+ Res: &res.ReceiptsResponse,
}, metadata)
}
-func handleNewPooledTransactionHashes66(backend Backend, msg Decoder, peer *Peer) error {
+func handleNewPooledTransactionHashes67(backend Backend, msg Decoder, peer *Peer) error {
// New transaction announcement arrived, make sure we have
// a valid and fresh chain to handle them
if !backend.AcceptTxs() {
return nil
}
- ann := new(NewPooledTransactionHashesPacket66)
+ ann := new(NewPooledTransactionHashesPacket67)
if err := msg.Decode(ann); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
@@ -476,17 +420,17 @@ func handleNewPooledTransactionHashes68(backend Backend, msg Decoder, peer *Peer
return backend.Handle(peer, ann)
}
-func handleGetPooledTransactions66(backend Backend, msg Decoder, peer *Peer) error {
+func handleGetPooledTransactions(backend Backend, msg Decoder, peer *Peer) error {
// Decode the pooled transactions retrieval message
- var query GetPooledTransactionsPacket66
+ var query GetPooledTransactionsPacket
if err := msg.Decode(&query); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
- hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsPacket, peer)
+ hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsRequest)
return peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs)
}
-func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsPacket, peer *Peer) ([]common.Hash, []rlp.RawValue) {
+func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsRequest) ([]common.Hash, []rlp.RawValue) {
// Gather transactions until the fetch or network limits is reached
var (
bytes int
@@ -534,17 +478,17 @@ func handleTransactions(backend Backend, msg Decoder, peer *Peer) error {
return backend.Handle(peer, &txs)
}
-func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error {
+func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error {
// Transactions arrived, make sure we have a valid and fresh chain to handle them
if !backend.AcceptTxs() {
return nil
}
// Transactions can be processed, parse all of them and deliver to the pool
- var txs PooledTransactionsPacket66
+ var txs PooledTransactionsPacket
if err := msg.Decode(&txs); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
- for i, tx := range txs.PooledTransactionsPacket {
+ for i, tx := range txs.PooledTransactionsResponse {
// Validate and mark the remote transaction
if tx == nil {
return fmt.Errorf("%w: transaction %d is nil", errDecode, i)
@@ -553,5 +497,5 @@ func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error
}
requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId)
- return backend.Handle(peer, &txs.PooledTransactionsPacket)
+ return backend.Handle(peer, &txs.PooledTransactionsResponse)
}
diff --git a/eth/protocols/eth/handshake_test.go b/eth/protocols/eth/handshake_test.go
index dca66e0c5..d96cfc816 100644
--- a/eth/protocols/eth/handshake_test.go
+++ b/eth/protocols/eth/handshake_test.go
@@ -27,7 +27,8 @@ import (
)
// Tests that handshake failures are detected and reported correctly.
-func TestHandshake66(t *testing.T) { testHandshake(t, ETH66) }
+func TestHandshake67(t *testing.T) { testHandshake(t, ETH67) }
+func TestHandshake68(t *testing.T) { testHandshake(t, ETH68) }
func testHandshake(t *testing.T, protocol uint) {
t.Parallel()
diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go
index 219f486c8..938af0cab 100644
--- a/eth/protocols/eth/peer.go
+++ b/eth/protocols/eth/peer.go
@@ -219,7 +219,7 @@ func (p *Peer) AsyncSendTransactions(hashes []common.Hash) {
func (p *Peer) sendPooledTransactionHashes66(hashes []common.Hash) error {
// Mark all the transactions as known, but ensure we don't overflow our limits
p.knownTxs.Add(hashes...)
- return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket66(hashes))
+ return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket67(hashes))
}
// sendPooledTransactionHashes68 sends transaction hashes (tagged with their type
@@ -248,15 +248,15 @@ func (p *Peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) {
}
}
-// ReplyPooledTransactionsRLP is the eth/66 version of SendPooledTransactionsRLP.
+// ReplyPooledTransactionsRLP is the response to RequestTxs.
func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs []rlp.RawValue) error {
// Mark all the transactions as known, but ensure we don't overflow our limits
p.knownTxs.Add(hashes...)
- // Not packed into PooledTransactionsPacket to avoid RLP decoding
- return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket66{
- RequestId: id,
- PooledTransactionsRLPPacket: txs,
+ // Not packed into PooledTransactionsResponse to avoid RLP decoding
+ return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket{
+ RequestId: id,
+ PooledTransactionsRLPResponse: txs,
})
}
@@ -309,36 +309,28 @@ func (p *Peer) AsyncSendNewBlock(block *types.Block, td *big.Int) {
}
}
-// ReplyBlockHeadersRLP is the eth/66 response to GetBlockHeaders.
+// ReplyBlockHeadersRLP is the response to GetBlockHeaders.
func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error {
- return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket66{
- RequestId: id,
- BlockHeadersRLPPacket: headers,
+ return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket{
+ RequestId: id,
+ BlockHeadersRLPResponse: headers,
})
}
-// ReplyBlockBodiesRLP is the eth/66 response to GetBlockBodies.
+// ReplyBlockBodiesRLP is the response to GetBlockBodies.
func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error {
- // Not packed into BlockBodiesPacket to avoid RLP decoding
- return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket66{
- RequestId: id,
- BlockBodiesRLPPacket: bodies,
+ // Not packed into BlockBodiesResponse to avoid RLP decoding
+ return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket{
+ RequestId: id,
+ BlockBodiesRLPResponse: bodies,
})
}
-// ReplyNodeData is the eth/66 response to GetNodeData.
-func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error {
- return p2p.Send(p.rw, NodeDataMsg, &NodeDataPacket66{
- RequestId: id,
- NodeDataPacket: data,
- })
-}
-
-// ReplyReceiptsRLP is the eth/66 response to GetReceipts.
+// ReplyReceiptsRLP is the response to GetReceipts.
func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error {
- return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket66{
- RequestId: id,
- ReceiptsRLPPacket: receipts,
+ return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket{
+ RequestId: id,
+ ReceiptsRLPResponse: receipts,
})
}
@@ -353,9 +345,9 @@ func (p *Peer) RequestOneHeader(hash common.Hash, sink chan *Response) (*Request
sink: sink,
code: GetBlockHeadersMsg,
want: BlockHeadersMsg,
- data: &GetBlockHeadersPacket66{
+ data: &GetBlockHeadersPacket{
RequestId: id,
- GetBlockHeadersPacket: &GetBlockHeadersPacket{
+ GetBlockHeadersRequest: &GetBlockHeadersRequest{
Origin: HashOrNumber{Hash: hash},
Amount: uint64(1),
Skip: uint64(0),
@@ -380,9 +372,9 @@ func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, re
sink: sink,
code: GetBlockHeadersMsg,
want: BlockHeadersMsg,
- data: &GetBlockHeadersPacket66{
+ data: &GetBlockHeadersPacket{
RequestId: id,
- GetBlockHeadersPacket: &GetBlockHeadersPacket{
+ GetBlockHeadersRequest: &GetBlockHeadersRequest{
Origin: HashOrNumber{Hash: origin},
Amount: uint64(amount),
Skip: uint64(skip),
@@ -407,9 +399,9 @@ func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, rever
sink: sink,
code: GetBlockHeadersMsg,
want: BlockHeadersMsg,
- data: &GetBlockHeadersPacket66{
+ data: &GetBlockHeadersPacket{
RequestId: id,
- GetBlockHeadersPacket: &GetBlockHeadersPacket{
+ GetBlockHeadersRequest: &GetBlockHeadersRequest{
Origin: HashOrNumber{Number: origin},
Amount: uint64(amount),
Skip: uint64(skip),
@@ -434,31 +426,9 @@ func (p *Peer) RequestBodies(hashes []common.Hash, sink chan *Response) (*Reques
sink: sink,
code: GetBlockBodiesMsg,
want: BlockBodiesMsg,
- data: &GetBlockBodiesPacket66{
- RequestId: id,
- GetBlockBodiesPacket: hashes,
- },
- }
- if err := p.dispatchRequest(req); err != nil {
- return nil, err
- }
- return req, nil
-}
-
-// RequestNodeData fetches a batch of arbitrary data from a node's known state
-// data, corresponding to the specified hashes.
-func (p *Peer) RequestNodeData(hashes []common.Hash, sink chan *Response) (*Request, error) {
- p.Log().Debug("Fetching batch of state data", "count", len(hashes))
- id := rand.Uint64()
-
- req := &Request{
- id: id,
- sink: sink,
- code: GetNodeDataMsg,
- want: NodeDataMsg,
- data: &GetNodeDataPacket66{
- RequestId: id,
- GetNodeDataPacket: hashes,
+ data: &GetBlockBodiesPacket{
+ RequestId: id,
+ GetBlockBodiesRequest: hashes,
},
}
if err := p.dispatchRequest(req); err != nil {
@@ -477,9 +447,9 @@ func (p *Peer) RequestReceipts(hashes []common.Hash, sink chan *Response) (*Requ
sink: sink,
code: GetReceiptsMsg,
want: ReceiptsMsg,
- data: &GetReceiptsPacket66{
- RequestId: id,
- GetReceiptsPacket: hashes,
+ data: &GetReceiptsPacket{
+ RequestId: id,
+ GetReceiptsRequest: hashes,
},
}
if err := p.dispatchRequest(req); err != nil {
@@ -494,9 +464,9 @@ func (p *Peer) RequestTxs(hashes []common.Hash) error {
id := rand.Uint64()
requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id)
- return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket66{
- RequestId: id,
- GetPooledTransactionsPacket: hashes,
+ return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket{
+ RequestId: id,
+ GetPooledTransactionsRequest: hashes,
})
}
diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go
index 4b9f5ad6b..0f44f83de 100644
--- a/eth/protocols/eth/protocol.go
+++ b/eth/protocols/eth/protocol.go
@@ -30,7 +30,6 @@ import (
// Constants to match up protocol versions and messages
const (
- ETH66 = 66
ETH67 = 67
ETH68 = 68
)
@@ -41,11 +40,11 @@ const ProtocolName = "eth"
// ProtocolVersions are the supported versions of the `eth` protocol (first
// is primary).
-var ProtocolVersions = []uint{ETH68, ETH67, ETH66}
+var ProtocolVersions = []uint{ETH68, ETH67}
// protocolLengths are the number of implemented message corresponding to
// different protocol versions.
-var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17, ETH66: 17}
+var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17}
// maxMessageSize is the maximum cap on the size of a protocol message.
const maxMessageSize = 10 * 1024 * 1024
@@ -62,8 +61,6 @@ const (
NewPooledTransactionHashesMsg = 0x08
GetPooledTransactionsMsg = 0x09
PooledTransactionsMsg = 0x0a
- GetNodeDataMsg = 0x0d
- NodeDataMsg = 0x0e
GetReceiptsMsg = 0x0f
ReceiptsMsg = 0x10
)
@@ -85,7 +82,7 @@ type Packet interface {
Kind() byte // Kind returns the message type.
}
-// StatusPacket is the network packet for the status message for eth/64 and later.
+// StatusPacket is the network packet for the status message.
type StatusPacket struct {
ProtocolVersion uint32
NetworkID uint64
@@ -118,18 +115,18 @@ func (p *NewBlockHashesPacket) Unpack() ([]common.Hash, []uint64) {
// TransactionsPacket is the network packet for broadcasting new transactions.
type TransactionsPacket []*types.Transaction
-// GetBlockHeadersPacket represents a block header query.
-type GetBlockHeadersPacket struct {
+// GetBlockHeadersRequest represents a block header query.
+type GetBlockHeadersRequest struct {
Origin HashOrNumber // Block from which to retrieve headers
Amount uint64 // Maximum number of headers to retrieve
Skip uint64 // Blocks to skip between consecutive headers
Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis)
}
-// GetBlockHeadersPacket66 represents a block header query over eth/66
-type GetBlockHeadersPacket66 struct {
+// GetBlockHeadersPacket represents a block header query with request ID wrapping.
+type GetBlockHeadersPacket struct {
RequestId uint64
- *GetBlockHeadersPacket
+ *GetBlockHeadersRequest
}
// HashOrNumber is a combined field for specifying an origin block.
@@ -168,23 +165,23 @@ func (hn *HashOrNumber) DecodeRLP(s *rlp.Stream) error {
}
}
-// BlockHeadersPacket represents a block header response.
-type BlockHeadersPacket []*types.Header
+// BlockHeadersRequest represents a block header response.
+type BlockHeadersRequest []*types.Header
-// BlockHeadersPacket66 represents a block header response over eth/66.
-type BlockHeadersPacket66 struct {
+// BlockHeadersPacket represents a block header response over with request ID wrapping.
+type BlockHeadersPacket struct {
RequestId uint64
- BlockHeadersPacket
+ BlockHeadersRequest
}
-// BlockHeadersRLPPacket represents a block header response, to use when we already
+// BlockHeadersRLPResponse represents a block header response, to use when we already
// have the headers rlp encoded.
-type BlockHeadersRLPPacket []rlp.RawValue
+type BlockHeadersRLPResponse []rlp.RawValue
-// BlockHeadersRLPPacket66 represents a block header response over eth/66.
-type BlockHeadersRLPPacket66 struct {
+// BlockHeadersRLPPacket represents a block header response with request ID wrapping.
+type BlockHeadersRLPPacket struct {
RequestId uint64
- BlockHeadersRLPPacket
+ BlockHeadersRLPResponse
}
// NewBlockPacket is the network packet for the block propagation message.
@@ -206,33 +203,34 @@ func (request *NewBlockPacket) sanityCheck() error {
return nil
}
-// GetBlockBodiesPacket represents a block body query.
-type GetBlockBodiesPacket []common.Hash
+// GetBlockBodiesRequest represents a block body query.
+type GetBlockBodiesRequest []common.Hash
-// GetBlockBodiesPacket66 represents a block body query over eth/66.
-type GetBlockBodiesPacket66 struct {
+// GetBlockBodiesPacket represents a block body query with request ID wrapping.
+type GetBlockBodiesPacket struct {
RequestId uint64
- GetBlockBodiesPacket
+ GetBlockBodiesRequest
}
-// BlockBodiesPacket is the network packet for block content distribution.
-type BlockBodiesPacket []*BlockBody
+// BlockBodiesResponse is the network packet for block content distribution.
+type BlockBodiesResponse []*BlockBody
-// BlockBodiesPacket66 is the network packet for block content distribution over eth/66.
-type BlockBodiesPacket66 struct {
+// BlockBodiesPacket is the network packet for block content distribution with
+// request ID wrapping.
+type BlockBodiesPacket struct {
RequestId uint64
- BlockBodiesPacket
+ BlockBodiesResponse
}
-// BlockBodiesRLPPacket is used for replying to block body requests, in cases
+// BlockBodiesRLPResponse is used for replying to block body requests, in cases
// where we already have them RLP-encoded, and thus can avoid the decode-encode
// roundtrip.
-type BlockBodiesRLPPacket []rlp.RawValue
+type BlockBodiesRLPResponse []rlp.RawValue
-// BlockBodiesRLPPacket66 is the BlockBodiesRLPPacket over eth/66
-type BlockBodiesRLPPacket66 struct {
+// BlockBodiesRLPPacket is the BlockBodiesRLPResponse with request ID wrapping.
+type BlockBodiesRLPPacket struct {
RequestId uint64
- BlockBodiesRLPPacket
+ BlockBodiesRLPResponse
}
// BlockBody represents the data content of a single block.
@@ -244,7 +242,7 @@ type BlockBody struct {
// Unpack retrieves the transactions and uncles from the range packet and returns
// them in a split flat format that's more consistent with the internal data structures.
-func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) {
+func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) {
// TODO(matt): add support for withdrawals to fetchers
var (
txset = make([][]*types.Transaction, len(*p))
@@ -257,53 +255,36 @@ func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header,
return txset, uncleset, withdrawalset
}
-// GetNodeDataPacket represents a trie node data query.
-type GetNodeDataPacket []common.Hash
+// GetReceiptsRequest represents a block receipts query.
+type GetReceiptsRequest []common.Hash
-// GetNodeDataPacket66 represents a trie node data query over eth/66.
-type GetNodeDataPacket66 struct {
+// GetReceiptsPacket represents a block receipts query with request ID wrapping.
+type GetReceiptsPacket struct {
RequestId uint64
- GetNodeDataPacket
+ GetReceiptsRequest
}
-// NodeDataPacket is the network packet for trie node data distribution.
-type NodeDataPacket [][]byte
+// ReceiptsResponse is the network packet for block receipts distribution.
+type ReceiptsResponse [][]*types.Receipt
-// NodeDataPacket66 is the network packet for trie node data distribution over eth/66.
-type NodeDataPacket66 struct {
+// ReceiptsPacket is the network packet for block receipts distribution with
+// request ID wrapping.
+type ReceiptsPacket struct {
RequestId uint64
- NodeDataPacket
+ ReceiptsResponse
}
-// GetReceiptsPacket represents a block receipts query.
-type GetReceiptsPacket []common.Hash
+// ReceiptsRLPResponse is used for receipts, when we already have it encoded
+type ReceiptsRLPResponse []rlp.RawValue
-// GetReceiptsPacket66 represents a block receipts query over eth/66.
-type GetReceiptsPacket66 struct {
+// ReceiptsRLPPacket is ReceiptsRLPResponse with request ID wrapping.
+type ReceiptsRLPPacket struct {
RequestId uint64
- GetReceiptsPacket
+ ReceiptsRLPResponse
}
-// ReceiptsPacket is the network packet for block receipts distribution.
-type ReceiptsPacket [][]*types.Receipt
-
-// ReceiptsPacket66 is the network packet for block receipts distribution over eth/66.
-type ReceiptsPacket66 struct {
- RequestId uint64
- ReceiptsPacket
-}
-
-// ReceiptsRLPPacket is used for receipts, when we already have it encoded
-type ReceiptsRLPPacket []rlp.RawValue
-
-// ReceiptsRLPPacket66 is the eth-66 version of ReceiptsRLPPacket
-type ReceiptsRLPPacket66 struct {
- RequestId uint64
- ReceiptsRLPPacket
-}
-
-// NewPooledTransactionHashesPacket66 represents a transaction announcement packet on eth/66 and eth/67.
-type NewPooledTransactionHashesPacket66 []common.Hash
+// NewPooledTransactionHashesPacket67 represents a transaction announcement packet on eth/67.
+type NewPooledTransactionHashesPacket67 []common.Hash
// NewPooledTransactionHashesPacket68 represents a transaction announcement packet on eth/68 and newer.
type NewPooledTransactionHashesPacket68 struct {
@@ -312,31 +293,33 @@ type NewPooledTransactionHashesPacket68 struct {
Hashes []common.Hash
}
-// GetPooledTransactionsPacket represents a transaction query.
-type GetPooledTransactionsPacket []common.Hash
+// GetPooledTransactionsRequest represents a transaction query.
+type GetPooledTransactionsRequest []common.Hash
-type GetPooledTransactionsPacket66 struct {
+// GetPooledTransactionsPacket represents a transaction query with request ID wrapping.
+type GetPooledTransactionsPacket struct {
RequestId uint64
- GetPooledTransactionsPacket
+ GetPooledTransactionsRequest
}
-// PooledTransactionsPacket is the network packet for transaction distribution.
-type PooledTransactionsPacket []*types.Transaction
+// PooledTransactionsResponse is the network packet for transaction distribution.
+type PooledTransactionsResponse []*types.Transaction
-// PooledTransactionsPacket66 is the network packet for transaction distribution over eth/66.
-type PooledTransactionsPacket66 struct {
+// PooledTransactionsPacket is the network packet for transaction distribution
+// with request ID wrapping.
+type PooledTransactionsPacket struct {
RequestId uint64
- PooledTransactionsPacket
+ PooledTransactionsResponse
}
-// PooledTransactionsRLPPacket is the network packet for transaction distribution, used
+// PooledTransactionsRLPResponse is the network packet for transaction distribution, used
// in the cases we already have them in rlp-encoded form
-type PooledTransactionsRLPPacket []rlp.RawValue
+type PooledTransactionsRLPResponse []rlp.RawValue
-// PooledTransactionsRLPPacket66 is the eth/66 form of PooledTransactionsRLPPacket
-type PooledTransactionsRLPPacket66 struct {
+// PooledTransactionsRLPPacket is PooledTransactionsRLPResponse with request ID wrapping.
+type PooledTransactionsRLPPacket struct {
RequestId uint64
- PooledTransactionsRLPPacket
+ PooledTransactionsRLPResponse
}
func (*StatusPacket) Name() string { return "Status" }
@@ -348,40 +331,34 @@ func (*NewBlockHashesPacket) Kind() byte { return NewBlockHashesMsg }
func (*TransactionsPacket) Name() string { return "Transactions" }
func (*TransactionsPacket) Kind() byte { return TransactionsMsg }
-func (*GetBlockHeadersPacket) Name() string { return "GetBlockHeaders" }
-func (*GetBlockHeadersPacket) Kind() byte { return GetBlockHeadersMsg }
+func (*GetBlockHeadersRequest) Name() string { return "GetBlockHeaders" }
+func (*GetBlockHeadersRequest) Kind() byte { return GetBlockHeadersMsg }
-func (*BlockHeadersPacket) Name() string { return "BlockHeaders" }
-func (*BlockHeadersPacket) Kind() byte { return BlockHeadersMsg }
+func (*BlockHeadersRequest) Name() string { return "BlockHeaders" }
+func (*BlockHeadersRequest) Kind() byte { return BlockHeadersMsg }
-func (*GetBlockBodiesPacket) Name() string { return "GetBlockBodies" }
-func (*GetBlockBodiesPacket) Kind() byte { return GetBlockBodiesMsg }
+func (*GetBlockBodiesRequest) Name() string { return "GetBlockBodies" }
+func (*GetBlockBodiesRequest) Kind() byte { return GetBlockBodiesMsg }
-func (*BlockBodiesPacket) Name() string { return "BlockBodies" }
-func (*BlockBodiesPacket) Kind() byte { return BlockBodiesMsg }
+func (*BlockBodiesResponse) Name() string { return "BlockBodies" }
+func (*BlockBodiesResponse) Kind() byte { return BlockBodiesMsg }
func (*NewBlockPacket) Name() string { return "NewBlock" }
func (*NewBlockPacket) Kind() byte { return NewBlockMsg }
-func (*NewPooledTransactionHashesPacket66) Name() string { return "NewPooledTransactionHashes" }
-func (*NewPooledTransactionHashesPacket66) Kind() byte { return NewPooledTransactionHashesMsg }
+func (*NewPooledTransactionHashesPacket67) Name() string { return "NewPooledTransactionHashes" }
+func (*NewPooledTransactionHashesPacket67) Kind() byte { return NewPooledTransactionHashesMsg }
func (*NewPooledTransactionHashesPacket68) Name() string { return "NewPooledTransactionHashes" }
func (*NewPooledTransactionHashesPacket68) Kind() byte { return NewPooledTransactionHashesMsg }
-func (*GetPooledTransactionsPacket) Name() string { return "GetPooledTransactions" }
-func (*GetPooledTransactionsPacket) Kind() byte { return GetPooledTransactionsMsg }
+func (*GetPooledTransactionsRequest) Name() string { return "GetPooledTransactions" }
+func (*GetPooledTransactionsRequest) Kind() byte { return GetPooledTransactionsMsg }
-func (*PooledTransactionsPacket) Name() string { return "PooledTransactions" }
-func (*PooledTransactionsPacket) Kind() byte { return PooledTransactionsMsg }
+func (*PooledTransactionsResponse) Name() string { return "PooledTransactions" }
+func (*PooledTransactionsResponse) Kind() byte { return PooledTransactionsMsg }
-func (*GetNodeDataPacket) Name() string { return "GetNodeData" }
-func (*GetNodeDataPacket) Kind() byte { return GetNodeDataMsg }
+func (*GetReceiptsRequest) Name() string { return "GetReceipts" }
+func (*GetReceiptsRequest) Kind() byte { return GetReceiptsMsg }
-func (*NodeDataPacket) Name() string { return "NodeData" }
-func (*NodeDataPacket) Kind() byte { return NodeDataMsg }
-
-func (*GetReceiptsPacket) Name() string { return "GetReceipts" }
-func (*GetReceiptsPacket) Kind() byte { return GetReceiptsMsg }
-
-func (*ReceiptsPacket) Name() string { return "Receipts" }
-func (*ReceiptsPacket) Kind() byte { return ReceiptsMsg }
+func (*ReceiptsResponse) Name() string { return "Receipts" }
+func (*ReceiptsResponse) Kind() byte { return ReceiptsMsg }
diff --git a/eth/protocols/eth/protocol_test.go b/eth/protocols/eth/protocol_test.go
index a86fbb0a6..bc2545dea 100644
--- a/eth/protocols/eth/protocol_test.go
+++ b/eth/protocols/eth/protocol_test.go
@@ -35,19 +35,19 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
}
// Assemble some table driven tests
tests := []struct {
- packet *GetBlockHeadersPacket
+ packet *GetBlockHeadersRequest
fail bool
}{
// Providing the origin as either a hash or a number should both work
- {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}}},
- {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}}},
+ {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}}},
+ {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}}},
// Providing arbitrary query field should also work
- {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}},
- {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}},
+ {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}},
+ {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}},
// Providing both the origin hash and origin number must fail
- {fail: true, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash, Number: 314}}},
+ {fail: true, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash, Number: 314}}},
}
// Iterate over each of the tests and try to encode and then decode
for i, tt := range tests {
@@ -58,7 +58,7 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
t.Fatalf("test %d: encode should have failed", i)
}
if !tt.fail {
- packet := new(GetBlockHeadersPacket)
+ packet := new(GetBlockHeadersRequest)
if err := rlp.DecodeBytes(bytes, packet); err != nil {
t.Fatalf("test %d: failed to decode packet: %v", i, err)
}
@@ -70,46 +70,40 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
}
}
-// TestEth66EmptyMessages tests encoding of empty eth66 messages
-func TestEth66EmptyMessages(t *testing.T) {
+// TestEmptyMessages tests encoding of empty messages.
+func TestEmptyMessages(t *testing.T) {
// All empty messages encodes to the same format
want := common.FromHex("c4820457c0")
for i, msg := range []interface{}{
// Headers
- GetBlockHeadersPacket66{1111, nil},
- BlockHeadersPacket66{1111, nil},
+ GetBlockHeadersPacket{1111, nil},
+ BlockHeadersPacket{1111, nil},
// Bodies
- GetBlockBodiesPacket66{1111, nil},
- BlockBodiesPacket66{1111, nil},
- BlockBodiesRLPPacket66{1111, nil},
- // Node data
- GetNodeDataPacket66{1111, nil},
- NodeDataPacket66{1111, nil},
+ GetBlockBodiesPacket{1111, nil},
+ BlockBodiesPacket{1111, nil},
+ BlockBodiesRLPPacket{1111, nil},
// Receipts
- GetReceiptsPacket66{1111, nil},
- ReceiptsPacket66{1111, nil},
+ GetReceiptsPacket{1111, nil},
+ ReceiptsPacket{1111, nil},
// Transactions
- GetPooledTransactionsPacket66{1111, nil},
- PooledTransactionsPacket66{1111, nil},
- PooledTransactionsRLPPacket66{1111, nil},
+ GetPooledTransactionsPacket{1111, nil},
+ PooledTransactionsPacket{1111, nil},
+ PooledTransactionsRLPPacket{1111, nil},
// Headers
- BlockHeadersPacket66{1111, BlockHeadersPacket([]*types.Header{})},
+ BlockHeadersPacket{1111, BlockHeadersRequest([]*types.Header{})},
// Bodies
- GetBlockBodiesPacket66{1111, GetBlockBodiesPacket([]common.Hash{})},
- BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{})},
- BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{})},
- // Node data
- GetNodeDataPacket66{1111, GetNodeDataPacket([]common.Hash{})},
- NodeDataPacket66{1111, NodeDataPacket([][]byte{})},
+ GetBlockBodiesPacket{1111, GetBlockBodiesRequest([]common.Hash{})},
+ BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{})},
+ BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{})},
// Receipts
- GetReceiptsPacket66{1111, GetReceiptsPacket([]common.Hash{})},
- ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{})},
+ GetReceiptsPacket{1111, GetReceiptsRequest([]common.Hash{})},
+ ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{})},
// Transactions
- GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket([]common.Hash{})},
- PooledTransactionsPacket66{1111, PooledTransactionsPacket([]*types.Transaction{})},
- PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket([]rlp.RawValue{})},
+ GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest([]common.Hash{})},
+ PooledTransactionsPacket{1111, PooledTransactionsResponse([]*types.Transaction{})},
+ PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse([]rlp.RawValue{})},
} {
if have, _ := rlp.EncodeToBytes(msg); !bytes.Equal(have, want) {
t.Errorf("test %d, type %T, have\n\t%x\nwant\n\t%x", i, msg, have, want)
@@ -117,8 +111,8 @@ func TestEth66EmptyMessages(t *testing.T) {
}
}
-// TestEth66Messages tests the encoding of all redefined eth66 messages
-func TestEth66Messages(t *testing.T) {
+// TestMessages tests the encoding of all messages.
+func TestMessages(t *testing.T) {
// Some basic structs used during testing
var (
header *types.Header
@@ -169,10 +163,6 @@ func TestEth66Messages(t *testing.T) {
common.HexToHash("deadc0de"),
common.HexToHash("feedbeef"),
}
- byteSlices := [][]byte{
- common.FromHex("deadc0de"),
- common.FromHex("feedbeef"),
- }
// init the receipts
{
receipts = []*types.Receipt{
@@ -203,59 +193,51 @@ func TestEth66Messages(t *testing.T) {
want []byte
}{
{
- GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{hashes[0], 0}, 5, 5, false}},
+ GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{hashes[0], 0}, 5, 5, false}},
common.FromHex("e8820457e4a000000000000000000000000000000000000000000000000000000000deadc0de050580"),
},
{
- GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}},
+ GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}},
common.FromHex("ca820457c682270f050580"),
},
{
- BlockHeadersPacket66{1111, BlockHeadersPacket{header}},
+ BlockHeadersPacket{1111, BlockHeadersRequest{header}},
common.FromHex("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"),
},
{
- GetBlockBodiesPacket66{1111, GetBlockBodiesPacket(hashes)},
+ GetBlockBodiesPacket{1111, GetBlockBodiesRequest(hashes)},
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
},
{
- BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{blockBody})},
+ BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{blockBody})},
common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"),
},
{ // Identical to non-rlp-shortcut version
- BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{blockBodyRlp})},
+ BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{blockBodyRlp})},
common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"),
},
{
- GetNodeDataPacket66{1111, GetNodeDataPacket(hashes)},
+ GetReceiptsPacket{1111, GetReceiptsRequest(hashes)},
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
},
{
- NodeDataPacket66{1111, NodeDataPacket(byteSlices)},
- common.FromHex("ce820457ca84deadc0de84feedbeef"),
- },
- {
- GetReceiptsPacket66{1111, GetReceiptsPacket(hashes)},
- common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
- },
- {
- ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{receipts})},
+ ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{receipts})},
common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"),
},
{
- ReceiptsRLPPacket66{1111, ReceiptsRLPPacket([]rlp.RawValue{receiptsRlp})},
+ ReceiptsRLPPacket{1111, ReceiptsRLPResponse([]rlp.RawValue{receiptsRlp})},
common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"),
},
{
- GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket(hashes)},
+ GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest(hashes)},
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
},
{
- PooledTransactionsPacket66{1111, PooledTransactionsPacket(txs)},
+ PooledTransactionsPacket{1111, PooledTransactionsResponse(txs)},
common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"),
},
{
- PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket(txRlps)},
+ PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse(txRlps)},
common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"),
},
} {
diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go
index b2fd03766..ce23ca514 100644
--- a/eth/protocols/snap/handler.go
+++ b/eth/protocols/snap/handler.go
@@ -24,13 +24,13 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
const (
@@ -321,7 +321,7 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac
it.Release()
// Generate the Merkle proofs for the first and last account
- proof := light.NewNodeSet()
+ proof := trienode.NewProofSet()
if err := tr.Prove(req.Origin[:], proof); err != nil {
log.Warn("Failed to prove account range", "origin", req.Origin, "err", err)
return nil, nil
@@ -333,7 +333,7 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac
}
}
var proofs [][]byte
- for _, blob := range proof.NodeList() {
+ for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
return accounts, proofs
@@ -427,7 +427,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP
if err != nil {
return nil, nil
}
- proof := light.NewNodeSet()
+ proof := trienode.NewProofSet()
if err := stTrie.Prove(origin[:], proof); err != nil {
log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err)
return nil, nil
@@ -438,7 +438,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP
return nil, nil
}
}
- for _, blob := range proof.NodeList() {
+ for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
// Proof terminates the reply as proofs are only added if a node
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index 0f5f2ccdf..df1473e99 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -37,11 +37,11 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
- "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/msgrate"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -738,8 +738,8 @@ func (s *Syncer) loadSyncStatus() {
s.accountBytes += common.StorageSize(len(key) + len(value))
},
}
- task.genTrie = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- rawdb.WriteTrieNode(task.genBatch, owner, path, hash, val, s.scheme)
+ task.genTrie = trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
+ rawdb.WriteTrieNode(task.genBatch, common.Hash{}, path, hash, val, s.scheme)
})
for accountHash, subtasks := range task.SubTasks {
for _, subtask := range subtasks {
@@ -751,9 +751,10 @@ func (s *Syncer) loadSyncStatus() {
s.storageBytes += common.StorageSize(len(key) + len(value))
},
}
- subtask.genTrie = trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
+ owner := accountHash // local assignment for stacktrie writer closure
+ subtask.genTrie = trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(subtask.genBatch, owner, path, hash, val, s.scheme)
- }, accountHash)
+ })
}
}
}
@@ -810,8 +811,8 @@ func (s *Syncer) loadSyncStatus() {
Last: last,
SubTasks: make(map[common.Hash][]*storageTask),
genBatch: batch,
- genTrie: trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
+ genTrie: trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
+ rawdb.WriteTrieNode(batch, common.Hash{}, path, hash, val, s.scheme)
}),
})
log.Debug("Created account sync task", "from", next, "last", last)
@@ -2004,14 +2005,15 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
s.storageBytes += common.StorageSize(len(key) + len(value))
},
}
+ owner := account // local assignment for stacktrie writer closure
tasks = append(tasks, &storageTask{
Next: common.Hash{},
Last: r.End(),
root: acc.Root,
genBatch: batch,
- genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
+ genTrie: trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
- }, account),
+ }),
})
for r.Next() {
batch := ethdb.HookedBatch{
@@ -2025,9 +2027,9 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
Last: r.End(),
root: acc.Root,
genBatch: batch,
- genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
+ genTrie: trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
- }, account),
+ }),
})
}
for _, task := range tasks {
@@ -2072,9 +2074,10 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
slots += len(res.hashes[i])
if i < len(res.hashes)-1 || res.subTask == nil {
- tr := trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
- }, account)
+ // no need to make local reassignment of account: this closure does not outlive the loop
+ tr := trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
+ rawdb.WriteTrieNode(batch, account, path, hash, val, s.scheme)
+ })
for j := 0; j < len(res.hashes[i]); j++ {
tr.Update(res.hashes[i][j][:], res.slots[i][j])
}
@@ -2394,11 +2397,11 @@ func (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, acco
for i, key := range hashes {
keys[i] = common.CopyBytes(key[:])
}
- nodes := make(light.NodeList, len(proof))
+ nodes := make(trienode.ProofList, len(proof))
for i, node := range proof {
nodes[i] = node
}
- proofdb := nodes.NodeSet()
+ proofdb := nodes.Set()
var end []byte
if len(keys) > 0 {
@@ -2639,7 +2642,7 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo
for j, key := range hashes[i] {
keys[j] = common.CopyBytes(key[:])
}
- nodes := make(light.NodeList, 0, len(proof))
+ nodes := make(trienode.ProofList, 0, len(proof))
if i == len(hashes)-1 {
for _, node := range proof {
nodes = append(nodes, node)
@@ -2658,7 +2661,7 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo
} else {
// A proof was attached, the response is only partial, check that the
// returned data is indeed part of the storage trie
- proofdb := nodes.NodeSet()
+ proofdb := nodes.Set()
var end []byte
if len(keys) > 0 {
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index 1514ad4e1..1ee381a66 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -31,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
@@ -273,7 +272,7 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H
// Unless we send the entire trie, we need to supply proofs
// Actually, we need to supply proofs either way! This seems to be an implementation
// quirk in go-ethereum
- proof := light.NewNodeSet()
+ proof := trienode.NewProofSet()
if err := t.accountTrie.Prove(origin[:], proof); err != nil {
t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
}
@@ -283,7 +282,7 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H
t.logger.Error("Could not prove last item", "error", err)
}
}
- for _, blob := range proof.NodeList() {
+ for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
return keys, vals, proofs
@@ -353,7 +352,7 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm
if originHash != (common.Hash{}) || (abort && len(keys) > 0) {
// If we're aborting, we need to prove the first and last item
// This terminates the response (and thus the loop)
- proof := light.NewNodeSet()
+ proof := trienode.NewProofSet()
stTrie := t.storageTries[account]
// Here's a potential gotcha: when constructing the proof, we cannot
@@ -368,7 +367,7 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm
t.logger.Error("Could not prove last item", "error", err)
}
}
- for _, blob := range proof.NodeList() {
+ for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
break
@@ -411,7 +410,7 @@ func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, acco
if exit {
// If we're aborting, we need to prove the first and last item
// This terminates the response (and thus the loop)
- proof := light.NewNodeSet()
+ proof := trienode.NewProofSet()
stTrie := t.storageTries[account]
// Here's a potential gotcha: when constructing the proof, we cannot
@@ -427,7 +426,7 @@ func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, acco
t.logger.Error("Could not prove last item", "error", err)
}
}
- for _, blob := range proof.NodeList() {
+ for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
break
@@ -599,9 +598,10 @@ func testSyncBloatedProof(t *testing.T, scheme string) {
vals = append(vals, entry.v)
}
// The proofs
- proof := light.NewNodeSet()
+ proof := trienode.NewProofSet()
if err := t.accountTrie.Prove(origin[:], proof); err != nil {
t.logger.Error("Could not prove origin", "origin", origin, "error", err)
+ t.logger.Error("Could not prove origin", "origin", origin, "error", err)
}
// The bloat: add proof of every single element
for _, entry := range t.accountValues {
@@ -614,7 +614,7 @@ func testSyncBloatedProof(t *testing.T, scheme string) {
keys = append(keys[:1], keys[2:]...)
vals = append(vals[:1], vals[2:]...)
}
- for _, blob := range proof.NodeList() {
+ for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
diff --git a/eth/sync_test.go b/eth/sync_test.go
index b5e00298b..d26cbb66e 100644
--- a/eth/sync_test.go
+++ b/eth/sync_test.go
@@ -28,8 +28,8 @@ import (
)
// Tests that snap sync is disabled after a successful sync cycle.
-func TestSnapSyncDisabling66(t *testing.T) { testSnapSyncDisabling(t, eth.ETH66, snap.SNAP1) }
func TestSnapSyncDisabling67(t *testing.T) { testSnapSyncDisabling(t, eth.ETH67, snap.SNAP1) }
+func TestSnapSyncDisabling68(t *testing.T) { testSnapSyncDisabling(t, eth.ETH68, snap.SNAP1) }
// Tests that snap sync gets disabled as soon as a real block is successfully
// imported into the blockchain.
diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go
index c0e0eb250..e58efbddb 100644
--- a/ethdb/leveldb/leveldb.go
+++ b/ethdb/leveldb/leveldb.go
@@ -22,6 +22,7 @@ package leveldb
import (
"fmt"
+ "strings"
"sync"
"time"
@@ -245,6 +246,11 @@ func (db *Database) NewSnapshot() (ethdb.Snapshot, error) {
// Stat returns a particular internal stat of the database.
func (db *Database) Stat(property string) (string, error) {
+ if property == "" {
+ property = "leveldb.stats"
+ } else if !strings.HasPrefix(property, "leveldb.") {
+ property = "leveldb." + property
+ }
return db.db.GetProperty(property)
}
diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go
index 12a84cc91..07dcf5933 100644
--- a/ethdb/pebble/pebble.go
+++ b/ethdb/pebble/pebble.go
@@ -27,6 +27,7 @@ import (
"sync/atomic"
"time"
+ "github.com/cockroachdb/errors"
"github.com/cockroachdb/pebble"
"github.com/cockroachdb/pebble/bloom"
"github.com/ethereum/go-ethereum/common"
@@ -70,6 +71,8 @@ type Database struct {
seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt
manualMemAllocGauge metrics.Gauge // Gauge for tracking amount of non-managed memory currently allocated
+ levelsGauge []metrics.Gauge // Gauge for tracking the number of tables in levels
+
quitLock sync.RWMutex // Mutex protecting the quit channel and the closed flag
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
closed bool // keep track of whether we're Closed
@@ -118,6 +121,18 @@ func (d *Database) onWriteStallEnd() {
d.writeDelayTime.Add(int64(time.Since(d.writeDelayStartTime)))
}
+// panicLogger is just a noop logger to disable Pebble's internal logger.
+//
+// TODO(karalabe): Remove when Pebble sets this as the default.
+type panicLogger struct{}
+
+func (l panicLogger) Infof(format string, args ...interface{}) {
+}
+
+func (l panicLogger) Fatalf(format string, args ...interface{}) {
+ panic(errors.Errorf("fatal: "+format, args...))
+}
+
// New returns a wrapped pebble DB object. The namespace is the prefix that the
// metrics reporting should use for surfacing internal stats.
func New(file string, cache int, handles int, namespace string, readonly bool, ephemeral bool) (*Database, error) {
@@ -158,7 +173,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e
// The size of memory table(as well as the write buffer).
// Note, there may have more than two memory tables in the system.
- MemTableSize: memTableSize,
+ MemTableSize: uint64(memTableSize),
// MemTableStopWritesThreshold places a hard limit on the size
// of the existent MemTables(including the frozen one).
@@ -189,6 +204,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e
WriteStallBegin: db.onWriteStallBegin,
WriteStallEnd: db.onWriteStallEnd,
},
+ Logger: panicLogger{}, // TODO(karalabe): Delete when this is upstreamed in Pebble
}
// Disable seek compaction explicitly. Check https://github.com/ethereum/go-ethereum/pull/20130
// for more details.
@@ -216,7 +232,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e
db.manualMemAllocGauge = metrics.NewRegisteredGauge(namespace+"memory/manualalloc", nil)
// Start up the metrics gathering and return
- go db.meter(metricsGatheringInterval)
+ go db.meter(metricsGatheringInterval, namespace)
return db, nil
}
@@ -305,12 +321,9 @@ func (d *Database) NewBatch() ethdb.Batch {
}
// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
-// It's not supported by pebble, but pebble has better memory allocation strategy
-// which turns out a lot faster than leveldb. It's performant enough to construct
-// batch object without any pre-allocated space.
-func (d *Database) NewBatchWithSize(_ int) ethdb.Batch {
+func (d *Database) NewBatchWithSize(size int) ethdb.Batch {
return &batch{
- b: d.db.NewBatch(),
+ b: d.db.NewBatchWithSize(size),
db: d,
}
}
@@ -379,9 +392,12 @@ func upperBound(prefix []byte) (limit []byte) {
return limit
}
-// Stat returns a particular internal stat of the database.
+// Stat returns the internal metrics of Pebble in a text format. It's a developer
+// method to read everything there is to read independent of Pebble version.
+//
+// The property is unused in Pebble as there's only one thing to retrieve.
func (d *Database) Stat(property string) (string, error) {
- return "", nil
+ return d.db.Metrics().String(), nil
}
// Compact flattens the underlying data store for the given key range. In essence,
@@ -413,7 +429,7 @@ func (d *Database) Path() string {
// meter periodically retrieves internal pebble counters and reports them to
// the metrics subsystem.
-func (d *Database) meter(refresh time.Duration) {
+func (d *Database) meter(refresh time.Duration, namespace string) {
var errc chan error
timer := time.NewTimer(refresh)
defer timer.Stop()
@@ -436,7 +452,7 @@ func (d *Database) meter(refresh time.Duration) {
compRead int64
nWrite int64
- metrics = d.db.Metrics()
+ stats = d.db.Metrics()
compTime = d.compTime.Load()
writeDelayCount = d.writeDelayCount.Load()
writeDelayTime = d.writeDelayTime.Load()
@@ -447,14 +463,14 @@ func (d *Database) meter(refresh time.Duration) {
writeDelayCounts[i%2] = writeDelayCount
compTimes[i%2] = compTime
- for _, levelMetrics := range metrics.Levels {
+ for _, levelMetrics := range stats.Levels {
nWrite += int64(levelMetrics.BytesCompacted)
nWrite += int64(levelMetrics.BytesFlushed)
compWrite += int64(levelMetrics.BytesCompacted)
compRead += int64(levelMetrics.BytesRead)
}
- nWrite += int64(metrics.WAL.BytesWritten)
+ nWrite += int64(stats.WAL.BytesWritten)
compWrites[i%2] = compWrite
compReads[i%2] = compRead
@@ -476,7 +492,7 @@ func (d *Database) meter(refresh time.Duration) {
d.compWriteMeter.Mark(compWrites[i%2] - compWrites[(i-1)%2])
}
if d.diskSizeGauge != nil {
- d.diskSizeGauge.Update(int64(metrics.DiskSpaceUsage()))
+ d.diskSizeGauge.Update(int64(stats.DiskSpaceUsage()))
}
if d.diskReadMeter != nil {
d.diskReadMeter.Mark(0) // pebble doesn't track non-compaction reads
@@ -485,12 +501,20 @@ func (d *Database) meter(refresh time.Duration) {
d.diskWriteMeter.Mark(nWrites[i%2] - nWrites[(i-1)%2])
}
// See https://github.com/cockroachdb/pebble/pull/1628#pullrequestreview-1026664054
- manuallyAllocated := metrics.BlockCache.Size + int64(metrics.MemTable.Size) + int64(metrics.MemTable.ZombieSize)
+ manuallyAllocated := stats.BlockCache.Size + int64(stats.MemTable.Size) + int64(stats.MemTable.ZombieSize)
d.manualMemAllocGauge.Update(manuallyAllocated)
- d.memCompGauge.Update(metrics.Flush.Count)
+ d.memCompGauge.Update(stats.Flush.Count)
d.nonlevel0CompGauge.Update(nonLevel0CompCount)
d.level0CompGauge.Update(level0CompCount)
- d.seekCompGauge.Update(metrics.Compact.ReadCount)
+ d.seekCompGauge.Update(stats.Compact.ReadCount)
+
+ for i, level := range stats.Levels {
+ // Append metrics for additional layers
+ if i >= len(d.levelsGauge) {
+ d.levelsGauge = append(d.levelsGauge, metrics.NewRegisteredGauge(namespace+fmt.Sprintf("tables/level%v", i), nil))
+ }
+ d.levelsGauge[i].Update(level.NumFiles)
+ }
// Sleep a bit, then repeat the stats collection
select {
@@ -579,7 +603,7 @@ type pebbleIterator struct {
// of database content with a particular key prefix, starting at a particular
// initial key (or after, if it does not exist).
func (d *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
- iter := d.db.NewIter(&pebble.IterOptions{
+ iter, _ := d.db.NewIter(&pebble.IterOptions{
LowerBound: append(prefix, start...),
UpperBound: upperBound(prefix),
})
diff --git a/go.mod b/go.mod
index 2490cd6e3..d9be2c3e5 100644
--- a/go.mod
+++ b/go.mod
@@ -13,7 +13,8 @@ require (
github.com/btcsuite/btcd/btcec/v2 v2.2.0
github.com/cespare/cp v0.1.0
github.com/cloudflare/cloudflare-go v0.14.0
- github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06
+ github.com/cockroachdb/errors v1.8.1
+ github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593
github.com/consensys/gnark-crypto v0.10.0
github.com/crate-crypto/go-kzg-4844 v0.3.0
github.com/davecgh/go-spew v1.1.1
@@ -30,7 +31,7 @@ require (
github.com/go-stack/stack v1.8.1
github.com/gofrs/flock v0.8.1
github.com/golang-jwt/jwt/v4 v4.3.0
- github.com/golang/protobuf v1.5.2
+ github.com/golang/protobuf v1.5.3
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa
github.com/google/uuid v1.3.0
@@ -49,7 +50,7 @@ require (
github.com/karalabe/usb v0.0.2
github.com/kylelemons/godebug v1.1.0
github.com/mattn/go-colorable v0.1.13
- github.com/mattn/go-isatty v0.0.16
+ github.com/mattn/go-isatty v0.0.17
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416
github.com/olekukonko/tablewriter v0.0.5
github.com/openrelayxyz/plugeth-utils v1.3.0
@@ -58,19 +59,19 @@ require (
github.com/rs/cors v1.7.0
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible
github.com/status-im/keycard-go v0.2.0
- github.com/stretchr/testify v1.8.1
+ github.com/stretchr/testify v1.8.2
github.com/supranational/blst v0.3.11
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/tyler-smith/go-bip39 v1.1.0
github.com/urfave/cli/v2 v2.25.7
go.uber.org/automaxprocs v1.5.2
- golang.org/x/crypto v0.12.0
- golang.org/x/exp v0.0.0-20230810033253-352e893a4cad
+ golang.org/x/crypto v0.13.0
+ golang.org/x/exp v0.0.0-20230905200255-921286631fa9
golang.org/x/sync v0.3.0
- golang.org/x/sys v0.11.0
- golang.org/x/text v0.12.0
+ golang.org/x/sys v0.12.0
+ golang.org/x/text v0.13.0
golang.org/x/time v0.3.0
- golang.org/x/tools v0.9.1
+ golang.org/x/tools v0.13.0
gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/yaml.v3 v3.0.1
)
@@ -88,10 +89,10 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.5.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
- github.com/cockroachdb/errors v1.8.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f // indirect
github.com/cockroachdb/redact v1.0.8 // indirect
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect
+ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 // indirect
@@ -127,10 +128,10 @@ require (
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
- golang.org/x/mod v0.11.0 // indirect
- golang.org/x/net v0.10.0 // indirect
+ golang.org/x/mod v0.12.0 // indirect
+ golang.org/x/net v0.15.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
- gotest.tools/v3 v3.5.0 // indirect
+ gotest.tools/v3 v3.5.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)
diff --git a/go.sum b/go.sum
index 57596642f..1981ee312 100644
--- a/go.sum
+++ b/go.sum
@@ -109,18 +109,20 @@ github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu
github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4=
-github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877 h1:1MLK4YpFtIEo3ZtMA5C795Wtv5VuUnrXX7mQG+aHg6o=
+github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4=
github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM=
github.com/cockroachdb/errors v1.8.1 h1:A5+txlVZfOqFBDa4mGz2bUWSp0aHElvHX2bKkdbQu+Y=
github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
-github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 h1:T+Np/xtzIjYM/P5NAw0e2Rf1FGvzDau1h54MKvx8G7w=
-github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06/go.mod h1:bynZ3gvVyhlvjLI7PT6dmZ7g76xzJ7HpxfjgkzCGz6s=
+github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A=
+github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo=
github.com/cockroachdb/redact v1.0.8 h1:8QG/764wK+vmEYoOlfobpe12EQcS81ukx/a4hdVMxNw=
github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 h1:IKgmqgMQlVJIZj19CdocBeSfSaiCbEBZGKODaixqtHM=
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ=
+github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
+github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
@@ -262,8 +264,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
@@ -409,8 +412,9 @@ github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
+github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
@@ -538,8 +542,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
@@ -594,8 +598,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
-golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
+golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
+golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -606,8 +610,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20230810033253-352e893a4cad h1:g0bG7Z4uG+OgH2QDODnjp6ggkk1bJDsINcuWmJN1iJU=
-golang.org/x/exp v0.0.0-20230810033253-352e893a4cad/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
+golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
+golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -629,8 +633,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
-golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -674,8 +678,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
+golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -758,8 +762,9 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -773,8 +778,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
-golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
-golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -829,8 +834,8 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
-golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
+golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -943,8 +948,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=
-gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
+gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
+gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index e2911c6b1..cf1960fcf 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -991,13 +991,14 @@ func (diff *StateOverride) Apply(state *state.StateDB) error {
// BlockOverrides is a set of header fields to override.
type BlockOverrides struct {
- Number *hexutil.Big
- Difficulty *hexutil.Big
- Time *hexutil.Uint64
- GasLimit *hexutil.Uint64
- Coinbase *common.Address
- Random *common.Hash
- BaseFee *hexutil.Big
+ Number *hexutil.Big
+ Difficulty *hexutil.Big
+ Time *hexutil.Uint64
+ GasLimit *hexutil.Uint64
+ Coinbase *common.Address
+ Random *common.Hash
+ BaseFee *hexutil.Big
+ BlobBaseFee *hexutil.Big
}
// Apply overrides the given header fields into the given block context.
@@ -1026,6 +1027,9 @@ func (diff *BlockOverrides) Apply(blockCtx *vm.BlockContext) {
if diff.BaseFee != nil {
blockCtx.BaseFee = diff.BaseFee.ToInt()
}
+ if diff.BlobBaseFee != nil {
+ blockCtx.BlobBaseFee = diff.BlobBaseFee.ToInt()
+ }
}
// ChainContextBackend provides methods required to implement ChainContext.
@@ -2191,20 +2195,23 @@ func (api *DebugAPI) PrintBlock(ctx context.Context, number uint64) (string, err
// ChaindbProperty returns leveldb properties of the key-value database.
func (api *DebugAPI) ChaindbProperty(property string) (string, error) {
- if property == "" {
- property = "leveldb.stats"
- } else if !strings.HasPrefix(property, "leveldb.") {
- property = "leveldb." + property
- }
return api.b.ChainDb().Stat(property)
}
// ChaindbCompact flattens the entire key-value database into a single level,
// removing all unused slots and merging all keys.
func (api *DebugAPI) ChaindbCompact() error {
- for b := byte(0); b < 255; b++ {
- log.Info("Compacting chain database", "range", fmt.Sprintf("0x%0.2X-0x%0.2X", b, b+1))
- if err := api.b.ChainDb().Compact([]byte{b}, []byte{b + 1}); err != nil {
+ cstart := time.Now()
+ for b := 0; b <= 255; b++ {
+ var (
+ start = []byte{byte(b)}
+ end = []byte{byte(b + 1)}
+ )
+ if b == 255 {
+ end = nil
+ }
+ log.Info("Compacting database", "range", fmt.Sprintf("%#X-%#X", start, end), "elapsed", common.PrettyDuration(time.Since(cstart)))
+ if err := api.b.ChainDb().Compact(start, end); err != nil {
log.Error("Database compaction failed", "err", err)
return err
}
diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go
index 846a4347a..59882cd6b 100644
--- a/internal/ethapi/api_test.go
+++ b/internal/ethapi/api_test.go
@@ -1448,9 +1448,6 @@ func setupReceiptBackend(t *testing.T, genBlocks int) (*testBackend, []common.Ha
b.AddTx(tx)
txHashes[i] = tx.Hash()
}
- if i == 5 {
- b.SetBlobGas(params.BlobTxBlobGasPerBlob)
- }
b.SetPoS()
})
return backend, txHashes
diff --git a/les/client_handler.go b/les/client_handler.go
index 4cfeba08f..50f6dce87 100644
--- a/les/client_handler.go
+++ b/les/client_handler.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
// clientHandler is responsible for receiving and processing all incoming server
@@ -236,7 +237,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error {
p.Log().Trace("Received les/2 proofs response")
var resp struct {
ReqID, BV uint64
- Data light.NodeList
+ Data trienode.ProofList
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
diff --git a/les/handler_test.go b/les/handler_test.go
index 26a083f47..c803a5ddb 100644
--- a/les/handler_test.go
+++ b/les/handler_test.go
@@ -37,6 +37,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}) error {
@@ -401,7 +402,7 @@ func testGetProofs(t *testing.T, protocol int) {
bc := server.handler.blockchain
var proofreqs []ProofReq
- proofsV2 := light.NewNodeSet()
+ proofsV2 := trienode.NewProofSet()
accounts := []common.Address{bankAddr, userAddr1, userAddr2, signerAddr, {}}
for i := uint64(0); i <= bc.CurrentBlock().Number.Uint64(); i++ {
@@ -419,7 +420,7 @@ func testGetProofs(t *testing.T, protocol int) {
}
// Send the proof request and verify the response
sendRequest(rawPeer.app, GetProofsV2Msg, 42, proofreqs)
- if err := expectResponse(rawPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
+ if err := expectResponse(rawPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.List()); err != nil {
t.Errorf("proofs mismatch: %v", err)
}
}
@@ -456,10 +457,10 @@ func testGetStaleProof(t *testing.T, protocol int) {
var expected []rlp.RawValue
if wantOK {
- proofsV2 := light.NewNodeSet()
+ proofsV2 := trienode.NewProofSet()
t, _ := trie.New(trie.StateTrieID(header.Root), server.backend.Blockchain().TrieDB())
t.Prove(account, proofsV2)
- expected = proofsV2.NodeList()
+ expected = proofsV2.List()
}
if err := expectResponse(rawPeer.app, ProofsV2Msg, 42, testBufLimit, expected); err != nil {
t.Errorf("codes mismatch: %v", err)
diff --git a/les/odr_requests.go b/les/odr_requests.go
index 2b23e0540..c90701859 100644
--- a/les/odr_requests.go
+++ b/les/odr_requests.go
@@ -30,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
var (
@@ -222,9 +223,9 @@ func (r *TrieRequest) Validate(db ethdb.Database, msg *Msg) error {
if msg.MsgType != MsgProofsV2 {
return errInvalidMessageType
}
- proofs := msg.Obj.(light.NodeList)
+ proofs := msg.Obj.(trienode.ProofList)
// Verify the proof and store if checks out
- nodeSet := proofs.NodeSet()
+ nodeSet := proofs.Set()
reads := &readTraceDB{db: nodeSet}
if _, err := trie.VerifyProof(r.Id.Root, r.Key, reads); err != nil {
return fmt.Errorf("merkle proof verification failed: %v", err)
@@ -308,7 +309,7 @@ type HelperTrieReq struct {
}
type HelperTrieResps struct { // describes all responses, not just a single one
- Proofs light.NodeList
+ Proofs trienode.ProofList
AuxData [][]byte
}
@@ -356,7 +357,7 @@ func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error {
if len(resp.AuxData) != 1 {
return errInvalidEntryCount
}
- nodeSet := resp.Proofs.NodeSet()
+ nodeSet := resp.Proofs.Set()
headerEnc := resp.AuxData[0]
if len(headerEnc) == 0 {
return errHeaderUnavailable
@@ -451,7 +452,7 @@ func (r *BloomRequest) Validate(db ethdb.Database, msg *Msg) error {
}
resps := msg.Obj.(HelperTrieResps)
proofs := resps.Proofs
- nodeSet := proofs.NodeSet()
+ nodeSet := proofs.Set()
reads := &readTraceDB{db: nodeSet}
r.BloomBits = make([][]byte, len(r.SectionIndexList))
diff --git a/les/peer.go b/les/peer.go
index 48381689e..58cb92870 100644
--- a/les/peer.go
+++ b/les/peer.go
@@ -40,6 +40,7 @@ import (
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
var (
@@ -899,7 +900,7 @@ func (p *clientPeer) replyReceiptsRLP(reqID uint64, receipts []rlp.RawValue) *re
}
// replyProofsV2 creates a reply with a batch of merkle proofs, corresponding to the ones requested.
-func (p *clientPeer) replyProofsV2(reqID uint64, proofs light.NodeList) *reply {
+func (p *clientPeer) replyProofsV2(reqID uint64, proofs trienode.ProofList) *reply {
data, _ := rlp.EncodeToBytes(proofs)
return &reply{p.rw, ProofsV2Msg, reqID, data}
}
diff --git a/les/server_requests.go b/les/server_requests.go
index 485be6d9e..9a249f04c 100644
--- a/les/server_requests.go
+++ b/les/server_requests.go
@@ -30,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
// serverBackend defines the backend functions needed for serving LES requests
@@ -378,7 +379,7 @@ func handleGetProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) {
err error
)
bc := backend.BlockChain()
- nodes := light.NewNodeSet()
+ nodes := trienode.NewProofSet()
for i, request := range r.Reqs {
if i != 0 && !waitOrStop() {
@@ -444,7 +445,7 @@ func handleGetProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) {
break
}
}
- return p.replyProofsV2(r.ReqID, nodes.NodeList())
+ return p.replyProofsV2(r.ReqID, nodes.List())
}, r.ReqID, uint64(len(r.Reqs)), nil
}
@@ -463,7 +464,7 @@ func handleGetHelperTrieProofs(msg Decoder) (serveRequestFn, uint64, uint64, err
auxData [][]byte
)
bc := backend.BlockChain()
- nodes := light.NewNodeSet()
+ nodes := trienode.NewProofSet()
for i, request := range r.Reqs {
if i != 0 && !waitOrStop() {
return nil
@@ -498,7 +499,7 @@ func handleGetHelperTrieProofs(msg Decoder) (serveRequestFn, uint64, uint64, err
break
}
}
- return p.replyHelperTrieProofs(r.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData})
+ return p.replyHelperTrieProofs(r.ReqID, HelperTrieResps{Proofs: nodes.List(), AuxData: auxData})
}, r.ReqID, uint64(len(r.Reqs)), nil
}
diff --git a/light/odr.go b/light/odr.go
index 259702743..39f626ee2 100644
--- a/light/odr.go
+++ b/light/odr.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
// NoOdr is the default context passed to an ODR capable function when the ODR
@@ -90,7 +91,7 @@ func StorageTrieID(state *TrieID, address common.Address, root common.Hash) *Tri
type TrieRequest struct {
Id *TrieID
Key []byte
- Proof *NodeSet
+ Proof *trienode.ProofSet
}
// StoreResult stores the retrieved data in local database
@@ -143,7 +144,7 @@ type ChtRequest struct {
ChtRoot common.Hash
Header *types.Header
Td *big.Int
- Proof *NodeSet
+ Proof *trienode.ProofSet
}
// StoreResult stores the retrieved data in local database
@@ -163,7 +164,7 @@ type BloomRequest struct {
SectionIndexList []uint64
BloomTrieRoot common.Hash
BloomBits [][]byte
- Proofs *NodeSet
+ Proofs *trienode.ProofSet
}
// StoreResult stores the retrieved data in local database
diff --git a/light/odr_test.go b/light/odr_test.go
index d8a7f1067..c415d73e7 100644
--- a/light/odr_test.go
+++ b/light/odr_test.go
@@ -37,6 +37,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
var (
@@ -95,7 +96,7 @@ func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error {
if err != nil {
panic(err)
}
- nodes := NewNodeSet()
+ nodes := trienode.NewProofSet()
t.Prove(req.Key, nodes)
req.Proof = nodes
case *CodeRequest:
diff --git a/light/postprocess.go b/light/postprocess.go
index 13d75f861..a317e30b9 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -363,7 +363,7 @@ func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uin
func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error {
indexCh := make(chan uint, types.BloomBitLength)
type res struct {
- nodes *NodeSet
+ nodes *trienode.ProofSet
err error
}
resCh := make(chan res, types.BloomBitLength)
diff --git a/miner/ordering_test.go b/miner/ordering_test.go
index bdbdc3214..59d478274 100644
--- a/miner/ordering_test.go
+++ b/miner/ordering_test.go
@@ -92,6 +92,8 @@ func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) {
Time: tx.Time(),
GasFeeCap: tx.GasFeeCap(),
GasTipCap: tx.GasTipCap(),
+ Gas: tx.Gas(),
+ BlobGas: tx.BlobGas(),
})
}
expectedCount += count
@@ -157,6 +159,8 @@ func TestTransactionTimeSort(t *testing.T) {
Time: tx.Time(),
GasFeeCap: tx.GasFeeCap(),
GasTipCap: tx.GasTipCap(),
+ Gas: tx.Gas(),
+ BlobGas: tx.BlobGas(),
})
}
// Sort the transactions and cross check the nonce ordering
diff --git a/miner/worker.go b/miner/worker.go
index 711149232..f68070281 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -263,8 +263,8 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus
resubmitIntervalCh: make(chan time.Duration),
resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize),
}
- // Subscribe NewTxsEvent for tx pool
- worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh)
+ // Subscribe for transaction insertion events (whether from network or resurrects)
+ worker.txsSub = eth.TxPool().SubscribeTransactions(worker.txsCh, true)
// Subscribe events for blockchain
worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh)
@@ -542,11 +542,14 @@ func (w *worker) mainLoop() {
for _, tx := range ev.Txs {
acc, _ := types.Sender(w.current.signer, tx)
txs[acc] = append(txs[acc], &txpool.LazyTransaction{
+ Pool: w.eth.TxPool(), // We don't know where this came from, yolo resolve from everywhere
Hash: tx.Hash(),
- Tx: tx.WithoutBlobTxSidecar(),
+ Tx: nil, // Do *not* set this! We need to resolve it later to pull blobs in
Time: tx.Time(),
GasFeeCap: tx.GasFeeCap(),
GasTipCap: tx.GasTipCap(),
+ Gas: tx.Gas(),
+ BlobGas: tx.BlobGas(),
})
}
txset := newTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee)
@@ -742,7 +745,6 @@ func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]*
if tx.Type() == types.BlobTxType {
return w.commitBlobTransaction(env, tx)
}
-
receipt, err := w.applyTransaction(env, tx)
if err != nil {
return nil, err
@@ -764,7 +766,6 @@ func (w *worker) commitBlobTransaction(env *environment, tx *types.Transaction)
if (env.blobs+len(sc.Blobs))*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock {
return nil, errors.New("max data blobs reached")
}
-
receipt, err := w.applyTransaction(env, tx)
if err != nil {
return nil, err
@@ -815,13 +816,24 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn
if ltx == nil {
break
}
- tx := ltx.Resolve()
- if tx == nil {
- log.Warn("Ignoring evicted transaction")
+ // If we don't have enough space for the next transaction, skip the account.
+ if env.gasPool.Gas() < ltx.Gas {
+ log.Trace("Not enough gas left for transaction", "hash", ltx.Hash, "left", env.gasPool.Gas(), "needed", ltx.Gas)
+ txs.Pop()
+ continue
+ }
+ if left := uint64(params.MaxBlobGasPerBlock - env.blobs*params.BlobTxBlobGasPerBlob); left < ltx.BlobGas {
+ log.Trace("Not enough blob gas left for transaction", "hash", ltx.Hash, "left", left, "needed", ltx.BlobGas)
+ txs.Pop()
+ continue
+ }
+ // Transaction seems to fit, pull it up from the pool
+ tx := ltx.Resolve()
+ if tx == nil {
+ log.Trace("Ignoring evicted transaction", "hash", ltx.Hash)
txs.Pop()
continue
}
-
// Error may be ignored here. The error has already been checked
// during transaction acceptance is the transaction pool.
from, _ := types.Sender(env.signer, tx)
@@ -829,11 +841,10 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn
// Check whether the tx is replay protected. If we're not in the EIP155 hf
// phase, start ignoring the sender until we do.
if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) {
- log.Trace("Ignoring replay protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block)
+ log.Trace("Ignoring replay protected transaction", "hash", ltx.Hash, "eip155", w.chainConfig.EIP155Block)
txs.Pop()
continue
}
-
// Start executing the transaction
env.state.SetTxContext(tx.Hash(), env.tcount)
@@ -841,7 +852,7 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn
switch {
case errors.Is(err, core.ErrNonceTooLow):
// New head notification data race between the transaction pool and miner, shift
- log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce())
+ log.Trace("Skipping transaction with low nonce", "hash", ltx.Hash, "sender", from, "nonce", tx.Nonce())
txs.Shift()
case errors.Is(err, nil):
@@ -853,7 +864,7 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn
default:
// Transaction is regarded as invalid, drop all consecutive transactions from
// the same sender because of `nonce-too-high` clause.
- log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err)
+ log.Debug("Transaction failed, account skipped", "hash", ltx.Hash, "err", err)
txs.Pop()
}
}
diff --git a/params/config.go b/params/config.go
index ac55d3771..88ff772a1 100644
--- a/params/config.go
+++ b/params/config.go
@@ -214,7 +214,7 @@ var (
}
// TestChainConfig contains every protocol change (EIPs) introduced
- // and accepted by the Ethereum core developers for testing proposes.
+ // and accepted by the Ethereum core developers for testing purposes.
TestChainConfig = &ChainConfig{
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
diff --git a/params/protocol_params.go b/params/protocol_params.go
index 353ad1e03..8a5c01184 100644
--- a/params/protocol_params.go
+++ b/params/protocol_params.go
@@ -186,7 +186,7 @@ var (
DurationLimit = big.NewInt(13) // The decision boundary on the blocktime duration used to determine whether difficulty should go up or not.
// BeaconRootsStorageAddress is the address where historical beacon roots are stored as per EIP-4788
- BeaconRootsStorageAddress = common.HexToAddress("0xbEac00dDB15f3B6d645C48263dC93862413A222D")
+ BeaconRootsStorageAddress = common.HexToAddress("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02")
// SystemAddress is where the system-transaction is sent from as per EIP-4788
SystemAddress common.Address = common.HexToAddress("0xfffffffffffffffffffffffffffffffffffffffe")
)
diff --git a/params/version.go b/params/version.go
index 5941701b6..130acceef 100644
--- a/params/version.go
+++ b/params/version.go
@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 13 // Minor version component of the current release
- VersionPatch = 2 // Patch version component of the current release
+ VersionPatch = 3 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)
diff --git a/rpc/client_opt.go b/rpc/client_opt.go
index 5bef08cca..3fa045a9b 100644
--- a/rpc/client_opt.go
+++ b/rpc/client_opt.go
@@ -34,7 +34,8 @@ type clientConfig struct {
httpAuth HTTPAuth
// WebSocket options
- wsDialer *websocket.Dialer
+ wsDialer *websocket.Dialer
+ wsMessageSizeLimit *int64 // wsMessageSizeLimit nil = default, 0 = no limit
// RPC handler options
idgen func() ID
@@ -66,6 +67,14 @@ func WithWebsocketDialer(dialer websocket.Dialer) ClientOption {
})
}
+// WithWebsocketMessageSizeLimit configures the websocket message size limit used by the RPC
+// client. Passing a limit of 0 means no limit.
+func WithWebsocketMessageSizeLimit(messageSizeLimit int64) ClientOption {
+ return optionFunc(func(cfg *clientConfig) {
+ cfg.wsMessageSizeLimit = &messageSizeLimit
+ })
+}
+
// WithHeader configures HTTP headers set by the RPC client. Headers set using this option
// will be used for both HTTP and WebSocket connections.
func WithHeader(key, value string) ClientOption {
diff --git a/rpc/server_test.go b/rpc/server_test.go
index 5d3929dfd..9d1c7fb5f 100644
--- a/rpc/server_test.go
+++ b/rpc/server_test.go
@@ -32,7 +32,8 @@ func TestServerRegisterName(t *testing.T) {
server := NewServer()
service := new(testService)
- if err := server.RegisterName("test", service); err != nil {
+ svcName := "test"
+ if err := server.RegisterName(svcName, service); err != nil {
t.Fatalf("%v", err)
}
@@ -40,12 +41,12 @@ func TestServerRegisterName(t *testing.T) {
t.Fatalf("Expected 2 service entries, got %d", len(server.services.services))
}
- svc, ok := server.services.services["test"]
+ svc, ok := server.services.services[svcName]
if !ok {
- t.Fatalf("Expected service calc to be registered")
+ t.Fatalf("Expected service %s to be registered", svcName)
}
- wantCallbacks := 13
+ wantCallbacks := 14
if len(svc.callbacks) != wantCallbacks {
t.Errorf("Expected %d callbacks for service 'service', got %d", wantCallbacks, len(svc.callbacks))
}
diff --git a/rpc/testservice_test.go b/rpc/testservice_test.go
index eab67f1dd..7d873af66 100644
--- a/rpc/testservice_test.go
+++ b/rpc/testservice_test.go
@@ -90,6 +90,10 @@ func (s *testService) EchoWithCtx(ctx context.Context, str string, i int, args *
return echoResult{str, i, args}
}
+func (s *testService) Repeat(msg string, i int) string {
+ return strings.Repeat(msg, i)
+}
+
func (s *testService) PeerInfo(ctx context.Context) PeerInfo {
return PeerInfoFromContext(ctx)
}
diff --git a/rpc/websocket.go b/rpc/websocket.go
index 86cf50594..538e53a31 100644
--- a/rpc/websocket.go
+++ b/rpc/websocket.go
@@ -38,7 +38,7 @@ const (
wsPingInterval = 30 * time.Second
wsPingWriteTimeout = 5 * time.Second
wsPongTimeout = 30 * time.Second
- wsMessageSizeLimit = 32 * 1024 * 1024
+ wsDefaultReadLimit = 32 * 1024 * 1024
)
var wsBufferPool = new(sync.Pool)
@@ -60,7 +60,7 @@ func (s *Server) WebsocketHandler(allowedOrigins []string) http.Handler {
log.Debug("WebSocket upgrade failed", "err", err)
return
}
- codec := newWebsocketCodec(conn, r.Host, r.Header)
+ codec := newWebsocketCodec(conn, r.Host, r.Header, wsDefaultReadLimit)
s.ServeCodec(codec, 0)
})
}
@@ -251,7 +251,11 @@ func newClientTransportWS(endpoint string, cfg *clientConfig) (reconnectFunc, er
}
return nil, hErr
}
- return newWebsocketCodec(conn, dialURL, header), nil
+ messageSizeLimit := int64(wsDefaultReadLimit)
+ if cfg.wsMessageSizeLimit != nil && *cfg.wsMessageSizeLimit >= 0 {
+ messageSizeLimit = *cfg.wsMessageSizeLimit
+ }
+ return newWebsocketCodec(conn, dialURL, header, messageSizeLimit), nil
}
return connect, nil
}
@@ -283,8 +287,8 @@ type websocketCodec struct {
pongReceived chan struct{}
}
-func newWebsocketCodec(conn *websocket.Conn, host string, req http.Header) ServerCodec {
- conn.SetReadLimit(wsMessageSizeLimit)
+func newWebsocketCodec(conn *websocket.Conn, host string, req http.Header, readLimit int64) ServerCodec {
+ conn.SetReadLimit(readLimit)
encode := func(v interface{}, isErrorResponse bool) error {
return conn.WriteJSON(v)
}
diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go
index fb9357605..e4ac5c3fa 100644
--- a/rpc/websocket_test.go
+++ b/rpc/websocket_test.go
@@ -113,6 +113,66 @@ func TestWebsocketLargeCall(t *testing.T) {
}
}
+// This test checks whether the wsMessageSizeLimit option is obeyed.
+func TestWebsocketLargeRead(t *testing.T) {
+ t.Parallel()
+
+ var (
+ srv = newTestServer()
+ httpsrv = httptest.NewServer(srv.WebsocketHandler([]string{"*"}))
+ wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:")
+ )
+ defer srv.Stop()
+ defer httpsrv.Close()
+
+ testLimit := func(limit *int64) {
+ opts := []ClientOption{}
+ expLimit := int64(wsDefaultReadLimit)
+ if limit != nil && *limit >= 0 {
+ opts = append(opts, WithWebsocketMessageSizeLimit(*limit))
+ if *limit > 0 {
+ expLimit = *limit // 0 means infinite
+ }
+ }
+ client, err := DialOptions(context.Background(), wsURL, opts...)
+ if err != nil {
+ t.Fatalf("can't dial: %v", err)
+ }
+ defer client.Close()
+ // Remove some bytes for json encoding overhead.
+ underLimit := int(expLimit - 128)
+ overLimit := expLimit + 1
+ if expLimit == wsDefaultReadLimit {
+ // No point trying the full 32MB in tests. Just sanity-check that
+ // it's not obviously limited.
+ underLimit = 1024
+ overLimit = -1
+ }
+ var res string
+ // Check under limit
+ if err = client.Call(&res, "test_repeat", "A", underLimit); err != nil {
+ t.Fatalf("unexpected error with limit %d: %v", expLimit, err)
+ }
+ if len(res) != underLimit || strings.Count(res, "A") != underLimit {
+ t.Fatal("incorrect data")
+ }
+ // Check over limit
+ if overLimit > 0 {
+ err = client.Call(&res, "test_repeat", "A", expLimit+1)
+ if err == nil || err != websocket.ErrReadLimit {
+ t.Fatalf("wrong error with limit %d: %v expecting %v", expLimit, err, websocket.ErrReadLimit)
+ }
+ }
+ }
+ ptr := func(v int64) *int64 { return &v }
+
+ testLimit(ptr(-1)) // Should be ignored (use default)
+ testLimit(ptr(0)) // Should be ignored (use default)
+ testLimit(nil) // Should be ignored (use default)
+ testLimit(ptr(200))
+ testLimit(ptr(wsDefaultReadLimit * 2))
+}
+
func TestWebsocketPeerInfo(t *testing.T) {
var (
s = newTestServer()
@@ -206,7 +266,7 @@ func TestClientWebsocketLargeMessage(t *testing.T) {
defer srv.Stop()
defer httpsrv.Close()
- respLength := wsMessageSizeLimit - 50
+ respLength := wsDefaultReadLimit - 50
srv.RegisterName("test", largeRespService{respLength})
c, err := DialWebsocket(context.Background(), wsURL, "")
diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go
index 3d6552409..20b8ca24b 100644
--- a/tests/fuzzers/stacktrie/trie_fuzzer.go
+++ b/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -140,8 +140,8 @@ func (f *fuzzer) fuzz() int {
trieA = trie.NewEmpty(dbA)
spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
dbB = trie.NewDatabase(rawdb.NewDatabase(spongeB), nil)
- trieB = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
- rawdb.WriteTrieNode(spongeB, owner, path, hash, blob, dbB.Scheme())
+ trieB = trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(spongeB, common.Hash{}, path, hash, blob, dbB.Scheme())
})
vals []kv
useful bool
@@ -205,13 +205,10 @@ func (f *fuzzer) fuzz() int {
// Ensure all the nodes are persisted correctly
var (
nodeset = make(map[string][]byte) // path -> blob
- trieC = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ trieC = trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
if crypto.Keccak256Hash(blob) != hash {
panic("invalid node blob")
}
- if owner != (common.Hash{}) {
- panic("invalid node owner")
- }
nodeset[string(path)] = common.CopyBytes(blob)
})
checked int
diff --git a/tests/fuzzers/txfetcher/txfetcher_fuzzer.go b/tests/fuzzers/txfetcher/txfetcher_fuzzer.go
index d1d6fdc66..8b501645b 100644
--- a/tests/fuzzers/txfetcher/txfetcher_fuzzer.go
+++ b/tests/fuzzers/txfetcher/txfetcher_fuzzer.go
@@ -83,6 +83,7 @@ func Fuzz(input []byte) int {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ nil,
clock, rand,
)
f.Start()
@@ -116,6 +117,8 @@ func Fuzz(input []byte) int {
var (
announceIdxs = make([]int, announce)
announces = make([]common.Hash, announce)
+ types = make([]byte, announce)
+ sizes = make([]uint32, announce)
)
for i := 0; i < len(announces); i++ {
annBuf := make([]byte, 2)
@@ -124,11 +127,13 @@ func Fuzz(input []byte) int {
}
announceIdxs[i] = (int(annBuf[0])*256 + int(annBuf[1])) % len(txs)
announces[i] = txs[announceIdxs[i]].Hash()
+ types[i] = txs[announceIdxs[i]].Type()
+ sizes[i] = uint32(txs[announceIdxs[i]].Size())
}
if verbose {
fmt.Println("Notify", peer, announceIdxs)
}
- if err := f.Notify(peer, announces); err != nil {
+ if err := f.Notify(peer, types, sizes, announces); err != nil {
panic(err)
}
diff --git a/trie/stacktrie.go b/trie/stacktrie.go
index 0d65ee75e..35208e1cb 100644
--- a/trie/stacktrie.go
+++ b/trie/stacktrie.go
@@ -17,11 +17,7 @@
package trie
import (
- "bufio"
- "bytes"
- "encoding/gob"
"errors"
- "io"
"sync"
"github.com/ethereum/go-ethereum/common"
@@ -29,171 +25,87 @@ import (
"github.com/ethereum/go-ethereum/log"
)
-var ErrCommitDisabled = errors.New("no database for committing")
-
-var stPool = sync.Pool{
- New: func() interface{} {
- return NewStackTrie(nil)
- },
-}
+var (
+ ErrCommitDisabled = errors.New("no database for committing")
+ stPool = sync.Pool{New: func() any { return new(stNode) }}
+ _ = types.TrieHasher((*StackTrie)(nil))
+)
// NodeWriteFunc is used to provide all information of a dirty node for committing
// so that callers can flush nodes into database with desired scheme.
-type NodeWriteFunc = func(owner common.Hash, path []byte, hash common.Hash, blob []byte)
-
-func stackTrieFromPool(writeFn NodeWriteFunc, owner common.Hash) *StackTrie {
- st := stPool.Get().(*StackTrie)
- st.owner = owner
- st.writeFn = writeFn
- return st
-}
-
-func returnToPool(st *StackTrie) {
- st.Reset()
- stPool.Put(st)
-}
+type NodeWriteFunc = func(path []byte, hash common.Hash, blob []byte)
// StackTrie is a trie implementation that expects keys to be inserted
// in order. Once it determines that a subtree will no longer be inserted
// into, it will hash it and free up the memory it uses.
type StackTrie struct {
- owner common.Hash // the owner of the trie
- nodeType uint8 // node type (as in branch, ext, leaf)
- val []byte // value contained by this node if it's a leaf
- key []byte // key chunk covered by this (leaf|ext) node
- children [16]*StackTrie // list of children (for branch and exts)
- writeFn NodeWriteFunc // function for committing nodes, can be nil
+ writeFn NodeWriteFunc // function for committing nodes, can be nil
+ root *stNode
+ h *hasher
}
// NewStackTrie allocates and initializes an empty trie.
func NewStackTrie(writeFn NodeWriteFunc) *StackTrie {
return &StackTrie{
- nodeType: emptyNode,
- writeFn: writeFn,
+ writeFn: writeFn,
+ root: stPool.Get().(*stNode),
+ h: newHasher(false),
}
}
-// NewStackTrieWithOwner allocates and initializes an empty trie, but with
-// the additional owner field.
-func NewStackTrieWithOwner(writeFn NodeWriteFunc, owner common.Hash) *StackTrie {
- return &StackTrie{
- owner: owner,
- nodeType: emptyNode,
- writeFn: writeFn,
- }
-}
-
-// NewFromBinary initialises a serialized stacktrie with the given db.
-func NewFromBinary(data []byte, writeFn NodeWriteFunc) (*StackTrie, error) {
- var st StackTrie
- if err := st.UnmarshalBinary(data); err != nil {
- return nil, err
- }
- // If a database is used, we need to recursively add it to every child
- if writeFn != nil {
- st.setWriter(writeFn)
- }
- return &st, nil
-}
-
-// MarshalBinary implements encoding.BinaryMarshaler
-func (st *StackTrie) MarshalBinary() (data []byte, err error) {
- var (
- b bytes.Buffer
- w = bufio.NewWriter(&b)
- )
- if err := gob.NewEncoder(w).Encode(struct {
- Owner common.Hash
- NodeType uint8
- Val []byte
- Key []byte
- }{
- st.owner,
- st.nodeType,
- st.val,
- st.key,
- }); err != nil {
- return nil, err
- }
- for _, child := range st.children {
- if child == nil {
- w.WriteByte(0)
- continue
- }
- w.WriteByte(1)
- if childData, err := child.MarshalBinary(); err != nil {
- return nil, err
- } else {
- w.Write(childData)
- }
- }
- w.Flush()
- return b.Bytes(), nil
-}
-
-// UnmarshalBinary implements encoding.BinaryUnmarshaler
-func (st *StackTrie) UnmarshalBinary(data []byte) error {
- r := bytes.NewReader(data)
- return st.unmarshalBinary(r)
-}
-
-func (st *StackTrie) unmarshalBinary(r io.Reader) error {
- var dec struct {
- Owner common.Hash
- NodeType uint8
- Val []byte
- Key []byte
- }
- if err := gob.NewDecoder(r).Decode(&dec); err != nil {
- return err
- }
- st.owner = dec.Owner
- st.nodeType = dec.NodeType
- st.val = dec.Val
- st.key = dec.Key
-
- var hasChild = make([]byte, 1)
- for i := range st.children {
- if _, err := r.Read(hasChild); err != nil {
- return err
- } else if hasChild[0] == 0 {
- continue
- }
- var child StackTrie
- if err := child.unmarshalBinary(r); err != nil {
- return err
- }
- st.children[i] = &child
+// Update inserts a (key, value) pair into the stack trie.
+func (t *StackTrie) Update(key, value []byte) error {
+ k := keybytesToHex(key)
+ if len(value) == 0 {
+ panic("deletion not supported")
}
+ t.insert(t.root, k[:len(k)-1], value, nil)
return nil
}
-func (st *StackTrie) setWriter(writeFn NodeWriteFunc) {
- st.writeFn = writeFn
- for _, child := range st.children {
- if child != nil {
- child.setWriter(writeFn)
- }
+// MustUpdate is a wrapper of Update and will omit any encountered error but
+// just print out an error message.
+func (t *StackTrie) MustUpdate(key, value []byte) {
+ if err := t.Update(key, value); err != nil {
+ log.Error("Unhandled trie error in StackTrie.Update", "err", err)
}
}
-func newLeaf(owner common.Hash, key, val []byte, writeFn NodeWriteFunc) *StackTrie {
- st := stackTrieFromPool(writeFn, owner)
- st.nodeType = leafNode
+func (t *StackTrie) Reset() {
+ t.writeFn = nil
+ t.root = stPool.Get().(*stNode)
+}
+
+// stNode represents a node within a StackTrie
+type stNode struct {
+ typ uint8 // node type (as in branch, ext, leaf)
+ key []byte // key chunk covered by this (leaf|ext) node
+ val []byte // value contained by this node if it's a leaf
+ children [16]*stNode // list of children (for branch and exts)
+}
+
+// newLeaf constructs a leaf node with provided node key and value. The key
+// will be deep-copied in the function and safe to modify afterwards, but
+// value is not.
+func newLeaf(key, val []byte) *stNode {
+ st := stPool.Get().(*stNode)
+ st.typ = leafNode
st.key = append(st.key, key...)
st.val = val
return st
}
-func newExt(owner common.Hash, key []byte, child *StackTrie, writeFn NodeWriteFunc) *StackTrie {
- st := stackTrieFromPool(writeFn, owner)
- st.nodeType = extNode
+// newExt constructs an extension node with provided node key and child. The
+// key will be deep-copied in the function and safe to modify afterwards.
+func newExt(key []byte, child *stNode) *stNode {
+ st := stPool.Get().(*stNode)
+ st.typ = extNode
st.key = append(st.key, key...)
st.children[0] = child
return st
}
-// List all values that StackTrie#nodeType can hold
+// List all values that stNode#nodeType can hold
const (
emptyNode = iota
branchNode
@@ -202,59 +114,40 @@ const (
hashedNode
)
-// Update inserts a (key, value) pair into the stack trie.
-func (st *StackTrie) Update(key, value []byte) error {
- k := keybytesToHex(key)
- if len(value) == 0 {
- panic("deletion not supported")
+func (n *stNode) reset() *stNode {
+ n.key = n.key[:0]
+ n.val = nil
+ for i := range n.children {
+ n.children[i] = nil
}
- st.insert(k[:len(k)-1], value, nil)
- return nil
-}
-
-// MustUpdate is a wrapper of Update and will omit any encountered error but
-// just print out an error message.
-func (st *StackTrie) MustUpdate(key, value []byte) {
- if err := st.Update(key, value); err != nil {
- log.Error("Unhandled trie error in StackTrie.Update", "err", err)
- }
-}
-
-func (st *StackTrie) Reset() {
- st.owner = common.Hash{}
- st.writeFn = nil
- st.key = st.key[:0]
- st.val = nil
- for i := range st.children {
- st.children[i] = nil
- }
- st.nodeType = emptyNode
+ n.typ = emptyNode
+ return n
}
// Helper function that, given a full key, determines the index
// at which the chunk pointed by st.keyOffset is different from
// the same chunk in the full key.
-func (st *StackTrie) getDiffIndex(key []byte) int {
- for idx, nibble := range st.key {
+func (n *stNode) getDiffIndex(key []byte) int {
+ for idx, nibble := range n.key {
if nibble != key[idx] {
return idx
}
}
- return len(st.key)
+ return len(n.key)
}
// Helper function to that inserts a (key, value) pair into
// the trie.
-func (st *StackTrie) insert(key, value []byte, prefix []byte) {
- switch st.nodeType {
+func (t *StackTrie) insert(st *stNode, key, value []byte, prefix []byte) {
+ switch st.typ {
case branchNode: /* Branch */
idx := int(key[0])
// Unresolve elder siblings
for i := idx - 1; i >= 0; i-- {
if st.children[i] != nil {
- if st.children[i].nodeType != hashedNode {
- st.children[i].hash(append(prefix, byte(i)))
+ if st.children[i].typ != hashedNode {
+ t.hash(st.children[i], append(prefix, byte(i)))
}
break
}
@@ -262,9 +155,9 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
// Add new child
if st.children[idx] == nil {
- st.children[idx] = newLeaf(st.owner, key[1:], value, st.writeFn)
+ st.children[idx] = newLeaf(key[1:], value)
} else {
- st.children[idx].insert(key[1:], value, append(prefix, key[0]))
+ t.insert(st.children[idx], key[1:], value, append(prefix, key[0]))
}
case extNode: /* Ext */
@@ -279,46 +172,46 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
if diffidx == len(st.key) {
// Ext key and key segment are identical, recurse into
// the child node.
- st.children[0].insert(key[diffidx:], value, append(prefix, key[:diffidx]...))
+ t.insert(st.children[0], key[diffidx:], value, append(prefix, key[:diffidx]...))
return
}
// Save the original part. Depending if the break is
// at the extension's last byte or not, create an
// intermediate extension or use the extension's child
// node directly.
- var n *StackTrie
+ var n *stNode
if diffidx < len(st.key)-1 {
// Break on the non-last byte, insert an intermediate
// extension. The path prefix of the newly-inserted
// extension should also contain the different byte.
- n = newExt(st.owner, st.key[diffidx+1:], st.children[0], st.writeFn)
- n.hash(append(prefix, st.key[:diffidx+1]...))
+ n = newExt(st.key[diffidx+1:], st.children[0])
+ t.hash(n, append(prefix, st.key[:diffidx+1]...))
} else {
// Break on the last byte, no need to insert
// an extension node: reuse the current node.
// The path prefix of the original part should
// still be same.
n = st.children[0]
- n.hash(append(prefix, st.key...))
+ t.hash(n, append(prefix, st.key...))
}
- var p *StackTrie
+ var p *stNode
if diffidx == 0 {
// the break is on the first byte, so
// the current node is converted into
// a branch node.
st.children[0] = nil
p = st
- st.nodeType = branchNode
+ st.typ = branchNode
} else {
// the common prefix is at least one byte
// long, insert a new intermediate branch
// node.
- st.children[0] = stackTrieFromPool(st.writeFn, st.owner)
- st.children[0].nodeType = branchNode
+ st.children[0] = stPool.Get().(*stNode)
+ st.children[0].typ = branchNode
p = st.children[0]
}
// Create a leaf for the inserted part
- o := newLeaf(st.owner, key[diffidx+1:], value, st.writeFn)
+ o := newLeaf(key[diffidx+1:], value)
// Insert both child leaves where they belong:
origIdx := st.key[diffidx]
@@ -344,18 +237,18 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
// Check if the split occurs at the first nibble of the
// chunk. In that case, no prefix extnode is necessary.
// Otherwise, create that
- var p *StackTrie
+ var p *stNode
if diffidx == 0 {
// Convert current leaf into a branch
- st.nodeType = branchNode
+ st.typ = branchNode
p = st
st.children[0] = nil
} else {
// Convert current node into an ext,
// and insert a child branch node.
- st.nodeType = extNode
- st.children[0] = NewStackTrieWithOwner(st.writeFn, st.owner)
- st.children[0].nodeType = branchNode
+ st.typ = extNode
+ st.children[0] = stPool.Get().(*stNode)
+ st.children[0].typ = branchNode
p = st.children[0]
}
@@ -363,11 +256,11 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
// value and another containing the new value. The child leaf
// is hashed directly in order to free up some memory.
origIdx := st.key[diffidx]
- p.children[origIdx] = newLeaf(st.owner, st.key[diffidx+1:], st.val, st.writeFn)
- p.children[origIdx].hash(append(prefix, st.key[:diffidx+1]...))
+ p.children[origIdx] = newLeaf(st.key[diffidx+1:], st.val)
+ t.hash(p.children[origIdx], append(prefix, st.key[:diffidx+1]...))
newIdx := key[diffidx]
- p.children[newIdx] = newLeaf(st.owner, key[diffidx+1:], value, st.writeFn)
+ p.children[newIdx] = newLeaf(key[diffidx+1:], value)
// Finally, cut off the key part that has been passed
// over to the children.
@@ -375,7 +268,7 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
st.val = nil
case emptyNode: /* Empty */
- st.nodeType = leafNode
+ st.typ = leafNode
st.key = key
st.val = value
@@ -398,25 +291,18 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
// - And the 'st.type' will be 'hashedNode' AGAIN
//
// This method also sets 'st.type' to hashedNode, and clears 'st.key'.
-func (st *StackTrie) hash(path []byte) {
- h := newHasher(false)
- defer returnHasherToPool(h)
-
- st.hashRec(h, path)
-}
-
-func (st *StackTrie) hashRec(hasher *hasher, path []byte) {
+func (t *StackTrie) hash(st *stNode, path []byte) {
// The switch below sets this to the RLP-encoding of this node.
var encodedNode []byte
- switch st.nodeType {
+ switch st.typ {
case hashedNode:
return
case emptyNode:
st.val = types.EmptyRootHash.Bytes()
st.key = st.key[:0]
- st.nodeType = hashedNode
+ st.typ = hashedNode
return
case branchNode:
@@ -426,23 +312,21 @@ func (st *StackTrie) hashRec(hasher *hasher, path []byte) {
nodes.Children[i] = nilValueNode
continue
}
- child.hashRec(hasher, append(path, byte(i)))
+ t.hash(child, append(path, byte(i)))
+
if len(child.val) < 32 {
nodes.Children[i] = rawNode(child.val)
} else {
nodes.Children[i] = hashNode(child.val)
}
-
- // Release child back to pool.
st.children[i] = nil
- returnToPool(child)
+ stPool.Put(child.reset()) // Release child back to pool.
}
-
- nodes.encode(hasher.encbuf)
- encodedNode = hasher.encodedBytes()
+ nodes.encode(t.h.encbuf)
+ encodedNode = t.h.encodedBytes()
case extNode:
- st.children[0].hashRec(hasher, append(path, st.key...))
+ t.hash(st.children[0], append(path, st.key...))
n := shortNode{Key: hexToCompactInPlace(st.key)}
if len(st.children[0].val) < 32 {
@@ -450,26 +334,24 @@ func (st *StackTrie) hashRec(hasher *hasher, path []byte) {
} else {
n.Val = hashNode(st.children[0].val)
}
+ n.encode(t.h.encbuf)
+ encodedNode = t.h.encodedBytes()
- n.encode(hasher.encbuf)
- encodedNode = hasher.encodedBytes()
-
- // Release child back to pool.
- returnToPool(st.children[0])
+ stPool.Put(st.children[0].reset()) // Release child back to pool.
st.children[0] = nil
case leafNode:
st.key = append(st.key, byte(16))
n := shortNode{Key: hexToCompactInPlace(st.key), Val: valueNode(st.val)}
- n.encode(hasher.encbuf)
- encodedNode = hasher.encodedBytes()
+ n.encode(t.h.encbuf)
+ encodedNode = t.h.encodedBytes()
default:
panic("invalid node type")
}
- st.nodeType = hashedNode
+ st.typ = hashedNode
st.key = st.key[:0]
if len(encodedNode) < 32 {
st.val = common.CopyBytes(encodedNode)
@@ -478,18 +360,16 @@ func (st *StackTrie) hashRec(hasher *hasher, path []byte) {
// Write the hash to the 'val'. We allocate a new val here to not mutate
// input values
- st.val = hasher.hashData(encodedNode)
- if st.writeFn != nil {
- st.writeFn(st.owner, path, common.BytesToHash(st.val), encodedNode)
+ st.val = t.h.hashData(encodedNode)
+ if t.writeFn != nil {
+ t.writeFn(path, common.BytesToHash(st.val), encodedNode)
}
}
// Hash returns the hash of the current node.
-func (st *StackTrie) Hash() (h common.Hash) {
- hasher := newHasher(false)
- defer returnHasherToPool(hasher)
-
- st.hashRec(hasher, nil)
+func (t *StackTrie) Hash() (h common.Hash) {
+ st := t.root
+ t.hash(st, nil)
if len(st.val) == 32 {
copy(h[:], st.val)
return h
@@ -497,9 +377,9 @@ func (st *StackTrie) Hash() (h common.Hash) {
// If the node's RLP isn't 32 bytes long, the node will not
// be hashed, and instead contain the rlp-encoding of the
// node. For the top level node, we need to force the hashing.
- hasher.sha.Reset()
- hasher.sha.Write(st.val)
- hasher.sha.Read(h[:])
+ t.h.sha.Reset()
+ t.h.sha.Write(st.val)
+ t.h.sha.Read(h[:])
return h
}
@@ -510,14 +390,12 @@ func (st *StackTrie) Hash() (h common.Hash) {
//
// The associated database is expected, otherwise the whole commit
// functionality should be disabled.
-func (st *StackTrie) Commit() (h common.Hash, err error) {
- if st.writeFn == nil {
+func (t *StackTrie) Commit() (h common.Hash, err error) {
+ if t.writeFn == nil {
return common.Hash{}, ErrCommitDisabled
}
- hasher := newHasher(false)
- defer returnHasherToPool(hasher)
-
- st.hashRec(hasher, nil)
+ st := t.root
+ t.hash(st, nil)
if len(st.val) == 32 {
copy(h[:], st.val)
return h, nil
@@ -525,10 +403,10 @@ func (st *StackTrie) Commit() (h common.Hash, err error) {
// If the node's RLP isn't 32 bytes long, the node will not
// be hashed (and committed), and instead contain the rlp-encoding of the
// node. For the top level node, we need to force the hashing+commit.
- hasher.sha.Reset()
- hasher.sha.Write(st.val)
- hasher.sha.Read(h[:])
+ t.h.sha.Reset()
+ t.h.sha.Write(st.val)
+ t.h.sha.Read(h[:])
- st.writeFn(st.owner, nil, h, st.val)
+ t.writeFn(nil, h, st.val)
return h, nil
}
diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go
index 6bd0b83e3..0e52781c6 100644
--- a/trie/stacktrie_test.go
+++ b/trie/stacktrie_test.go
@@ -198,12 +198,11 @@ func TestStackTrieInsertAndHash(t *testing.T) {
{"000003", "XXXXXXXXXXXXXXXXXXXXXXXXXXXX", "962c0fffdeef7612a4f7bff1950d67e3e81c878e48b9ae45b3b374253b050bd8"},
},
}
- st := NewStackTrie(nil)
for i, test := range tests {
// The StackTrie does not allow Insert(), Hash(), Insert(), ...
// so we will create new trie for every sequence length of inserts.
for l := 1; l <= len(test); l++ {
- st.Reset()
+ st := NewStackTrie(nil)
for j := 0; j < l; j++ {
kv := &test[j]
if err := st.Update(common.FromHex(kv.K), []byte(kv.V)); err != nil {
@@ -377,48 +376,3 @@ func TestStacktrieNotModifyValues(t *testing.T) {
}
}
}
-
-// TestStacktrieSerialization tests that the stacktrie works well if we
-// serialize/unserialize it a lot
-func TestStacktrieSerialization(t *testing.T) {
- var (
- st = NewStackTrie(nil)
- nt = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
- keyB = big.NewInt(1)
- keyDelta = big.NewInt(1)
- vals [][]byte
- keys [][]byte
- )
- getValue := func(i int) []byte {
- if i%2 == 0 { // large
- return crypto.Keccak256(big.NewInt(int64(i)).Bytes())
- } else { //small
- return big.NewInt(int64(i)).Bytes()
- }
- }
- for i := 0; i < 10; i++ {
- vals = append(vals, getValue(i))
- keys = append(keys, common.BigToHash(keyB).Bytes())
- keyB = keyB.Add(keyB, keyDelta)
- keyDelta.Add(keyDelta, common.Big1)
- }
- for i, k := range keys {
- nt.Update(k, common.CopyBytes(vals[i]))
- }
-
- for i, k := range keys {
- blob, err := st.MarshalBinary()
- if err != nil {
- t.Fatal(err)
- }
- newSt, err := NewFromBinary(blob, nil)
- if err != nil {
- t.Fatal(err)
- }
- st = newSt
- st.Update(k, common.CopyBytes(vals[i]))
- }
- if have, want := st.Hash(), nt.Hash(); have != want {
- t.Fatalf("have %#x want %#x", have, want)
- }
-}
diff --git a/trie/sync.go b/trie/sync.go
index 9da070607..6939aed76 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -302,7 +302,7 @@ func (s *Sync) Missing(max int) ([]string, []common.Hash, []common.Hash) {
}
// ProcessCode injects the received data for requested item. Note it can
-// happpen that the single response commits two pending requests(e.g.
+// happen that the single response commits two pending requests(e.g.
// there are two requests one for code and one for node but the hash
// is same). In this case the second response for the same hash will
// be treated as "non-requested" item or "already-processed" item but
@@ -391,7 +391,7 @@ func (s *Sync) Pending() int {
return len(s.nodeReqs) + len(s.codeReqs)
}
-// schedule inserts a new state retrieval request into the fetch queue. If there
+// scheduleNodeRequest inserts a new state retrieval request into the fetch queue. If there
// is already a pending request for this node, the new request will be discarded
// and only a parent reference added to the old one.
func (s *Sync) scheduleNodeRequest(req *nodeRequest) {
@@ -406,7 +406,7 @@ func (s *Sync) scheduleNodeRequest(req *nodeRequest) {
s.queue.Push(string(req.path), prio)
}
-// schedule inserts a new state retrieval request into the fetch queue. If there
+// scheduleCodeRequest inserts a new state retrieval request into the fetch queue. If there
// is already a pending request for this node, the new request will be discarded
// and only a parent reference added to the old one.
func (s *Sync) scheduleCodeRequest(req *codeRequest) {
@@ -556,7 +556,7 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
return requests, nil
}
-// commit finalizes a retrieval request and stores it into the membatch. If any
+// commitNodeRequest finalizes a retrieval request and stores it into the membatch. If any
// of the referencing parent requests complete due to this commit, they are also
// committed themselves.
func (s *Sync) commitNodeRequest(req *nodeRequest) error {
@@ -591,7 +591,7 @@ func (s *Sync) commitNodeRequest(req *nodeRequest) error {
return nil
}
-// commit finalizes a retrieval request and stores it into the membatch. If any
+// commitCodeRequest finalizes a retrieval request and stores it into the membatch. If any
// of the referencing parent requests complete due to this commit, they are also
// committed themselves.
func (s *Sync) commitCodeRequest(req *codeRequest) error {
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 35ccc7720..2dfe81ef8 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -614,7 +614,9 @@ func benchGet(b *testing.B) {
k := make([]byte, 32)
for i := 0; i < benchElemCount; i++ {
binary.LittleEndian.PutUint64(k, uint64(i))
- trie.MustUpdate(k, k)
+ v := make([]byte, 32)
+ binary.LittleEndian.PutUint64(v, uint64(i))
+ trie.MustUpdate(k, v)
}
binary.LittleEndian.PutUint64(k, benchElemCount/2)
@@ -630,8 +632,10 @@ func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie {
k := make([]byte, 32)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
+ v := make([]byte, 32)
e.PutUint64(k, uint64(i))
- trie.MustUpdate(k, k)
+ e.PutUint64(v, uint64(i))
+ trie.MustUpdate(k, v)
}
return trie
}
@@ -908,8 +912,8 @@ func TestCommitSequenceStackTrie(t *testing.T) {
trie := NewEmpty(db)
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
- stTrie := NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
- rawdb.WriteTrieNode(stackTrieSponge, owner, path, hash, blob, db.Scheme())
+ stTrie := NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme())
})
// Fill the trie with elements
for i := 0; i < count; i++ {
@@ -967,8 +971,8 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
trie := NewEmpty(db)
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
- stTrie := NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
- rawdb.WriteTrieNode(stackTrieSponge, owner, path, hash, blob, db.Scheme())
+ stTrie := NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme())
})
// Add a single small-element to the trie(s)
key := make([]byte, 5)
diff --git a/light/nodeset.go b/trie/trienode/proof.go
similarity index 73%
rename from light/nodeset.go
rename to trie/trienode/proof.go
index 366259678..012f0087d 100644
--- a/light/nodeset.go
+++ b/trie/trienode/proof.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package light
+package trienode
import (
"errors"
@@ -26,9 +26,9 @@ import (
"github.com/ethereum/go-ethereum/rlp"
)
-// NodeSet stores a set of trie nodes. It implements trie.Database and can also
+// ProofSet stores a set of trie nodes. It implements trie.Database and can also
// act as a cache for another trie.Database.
-type NodeSet struct {
+type ProofSet struct {
nodes map[string][]byte
order []string
@@ -36,15 +36,15 @@ type NodeSet struct {
lock sync.RWMutex
}
-// NewNodeSet creates an empty node set
-func NewNodeSet() *NodeSet {
- return &NodeSet{
+// NewProofSet creates an empty node set
+func NewProofSet() *ProofSet {
+ return &ProofSet{
nodes: make(map[string][]byte),
}
}
// Put stores a new node in the set
-func (db *NodeSet) Put(key []byte, value []byte) error {
+func (db *ProofSet) Put(key []byte, value []byte) error {
db.lock.Lock()
defer db.lock.Unlock()
@@ -61,7 +61,7 @@ func (db *NodeSet) Put(key []byte, value []byte) error {
}
// Delete removes a node from the set
-func (db *NodeSet) Delete(key []byte) error {
+func (db *ProofSet) Delete(key []byte) error {
db.lock.Lock()
defer db.lock.Unlock()
@@ -70,7 +70,7 @@ func (db *NodeSet) Delete(key []byte) error {
}
// Get returns a stored node
-func (db *NodeSet) Get(key []byte) ([]byte, error) {
+func (db *ProofSet) Get(key []byte) ([]byte, error) {
db.lock.RLock()
defer db.lock.RUnlock()
@@ -81,13 +81,13 @@ func (db *NodeSet) Get(key []byte) ([]byte, error) {
}
// Has returns true if the node set contains the given key
-func (db *NodeSet) Has(key []byte) (bool, error) {
+func (db *ProofSet) Has(key []byte) (bool, error) {
_, err := db.Get(key)
return err == nil, nil
}
// KeyCount returns the number of nodes in the set
-func (db *NodeSet) KeyCount() int {
+func (db *ProofSet) KeyCount() int {
db.lock.RLock()
defer db.lock.RUnlock()
@@ -95,19 +95,19 @@ func (db *NodeSet) KeyCount() int {
}
// DataSize returns the aggregated data size of nodes in the set
-func (db *NodeSet) DataSize() int {
+func (db *ProofSet) DataSize() int {
db.lock.RLock()
defer db.lock.RUnlock()
return db.dataSize
}
-// NodeList converts the node set to a NodeList
-func (db *NodeSet) NodeList() NodeList {
+// List converts the node set to a ProofList
+func (db *ProofSet) List() ProofList {
db.lock.RLock()
defer db.lock.RUnlock()
- var values NodeList
+ var values ProofList
for _, key := range db.order {
values = append(values, db.nodes[key])
}
@@ -115,7 +115,7 @@ func (db *NodeSet) NodeList() NodeList {
}
// Store writes the contents of the set to the given database
-func (db *NodeSet) Store(target ethdb.KeyValueWriter) {
+func (db *ProofSet) Store(target ethdb.KeyValueWriter) {
db.lock.RLock()
defer db.lock.RUnlock()
@@ -124,36 +124,36 @@ func (db *NodeSet) Store(target ethdb.KeyValueWriter) {
}
}
-// NodeList stores an ordered list of trie nodes. It implements ethdb.KeyValueWriter.
-type NodeList []rlp.RawValue
+// ProofList stores an ordered list of trie nodes. It implements ethdb.KeyValueWriter.
+type ProofList []rlp.RawValue
// Store writes the contents of the list to the given database
-func (n NodeList) Store(db ethdb.KeyValueWriter) {
+func (n ProofList) Store(db ethdb.KeyValueWriter) {
for _, node := range n {
db.Put(crypto.Keccak256(node), node)
}
}
-// NodeSet converts the node list to a NodeSet
-func (n NodeList) NodeSet() *NodeSet {
- db := NewNodeSet()
+// Set converts the node list to a ProofSet
+func (n ProofList) Set() *ProofSet {
+ db := NewProofSet()
n.Store(db)
return db
}
// Put stores a new node at the end of the list
-func (n *NodeList) Put(key []byte, value []byte) error {
+func (n *ProofList) Put(key []byte, value []byte) error {
*n = append(*n, value)
return nil
}
// Delete panics as there's no reason to remove a node from the list.
-func (n *NodeList) Delete(key []byte) error {
+func (n *ProofList) Delete(key []byte) error {
panic("not supported")
}
// DataSize returns the aggregated data size of nodes in the list
-func (n NodeList) DataSize() int {
+func (n ProofList) DataSize() int {
var size int
for _, node := range n {
size += len(node)