Merge commit '0d45d72d7' into merge/geth-v1.13.3

This commit is contained in:
philip-morlier 2023-10-12 07:16:53 -07:00
commit f912a2192d
131 changed files with 2073 additions and 1717 deletions

2
.gitignore vendored
View File

@ -48,3 +48,5 @@ profile.cov
**/yarn-error.log **/yarn-error.log
logs/ logs/
tests/spec-tests/

View File

@ -606,8 +606,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) { if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) {
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
} }
head := b.blockchain.CurrentHeader() if !b.blockchain.Config().IsLondon(header.Number) {
if !b.blockchain.Config().IsLondon(head.Number) {
// If there's no basefee, then it must be a non-1559 execution // If there's no basefee, then it must be a non-1559 execution
if call.GasPrice == nil { if call.GasPrice == nil {
call.GasPrice = new(big.Int) call.GasPrice = new(big.Int)
@ -629,13 +628,13 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
// Backfill the legacy gasPrice for EVM execution, unless we're all zeroes // Backfill the legacy gasPrice for EVM execution, unless we're all zeroes
call.GasPrice = new(big.Int) call.GasPrice = new(big.Int)
if call.GasFeeCap.BitLen() > 0 || call.GasTipCap.BitLen() > 0 { if call.GasFeeCap.BitLen() > 0 || call.GasTipCap.BitLen() > 0 {
call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, head.BaseFee), call.GasFeeCap) call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, header.BaseFee), call.GasFeeCap)
} }
} }
} }
// Ensure message is initialized properly. // Ensure message is initialized properly.
if call.Gas == 0 { if call.Gas == 0 {
call.Gas = 50000000 call.Gas = 10 * header.GasLimit
} }
if call.Value == nil { if call.Value == nil {
call.Value = new(big.Int) call.Value = new(big.Int)

View File

@ -8,7 +8,7 @@
## Preparing the smartcard ## Preparing the smartcard
**WARNING: FOILLOWING THESE INSTRUCTIONS WILL DESTROY THE MASTER KEY ON YOUR CARD. ONLY PROCEED IF NO FUNDS ARE ASSOCIATED WITH THESE ACCOUNTS** **WARNING: FOLLOWING THESE INSTRUCTIONS WILL DESTROY THE MASTER KEY ON YOUR CARD. ONLY PROCEED IF NO FUNDS ARE ASSOCIATED WITH THESE ACCOUNTS**
You can use status' [keycard-cli](https://github.com/status-im/keycard-cli) and you should get _at least_ version 2.1.1 of their [smartcard application](https://github.com/status-im/status-keycard/releases/download/2.2.1/keycard_v2.2.1.cap) You can use status' [keycard-cli](https://github.com/status-im/keycard-cli) and you should get _at least_ version 2.1.1 of their [smartcard application](https://github.com/status-im/status-keycard/releases/download/2.2.1/keycard_v2.2.1.cap)

View File

@ -145,7 +145,7 @@ func TestChain_GetHeaders(t *testing.T) {
}{ }{
{ {
req: GetBlockHeaders{ req: GetBlockHeaders{
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Number: uint64(2)}, Origin: eth.HashOrNumber{Number: uint64(2)},
Amount: uint64(5), Amount: uint64(5),
Skip: 1, Skip: 1,
@ -162,7 +162,7 @@ func TestChain_GetHeaders(t *testing.T) {
}, },
{ {
req: GetBlockHeaders{ req: GetBlockHeaders{
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)}, Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)},
Amount: uint64(3), Amount: uint64(3),
Skip: 0, Skip: 0,
@ -177,7 +177,7 @@ func TestChain_GetHeaders(t *testing.T) {
}, },
{ {
req: GetBlockHeaders{ req: GetBlockHeaders{
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Hash: chain.Head().Hash()}, Origin: eth.HashOrNumber{Hash: chain.Head().Hash()},
Amount: uint64(1), Amount: uint64(1),
Skip: 0, Skip: 0,

View File

@ -62,7 +62,6 @@ func (s *Suite) dial() (*Conn, error) {
} }
// set default p2p capabilities // set default p2p capabilities
conn.caps = []p2p.Cap{ conn.caps = []p2p.Cap{
{Name: "eth", Version: 66},
{Name: "eth", Version: 67}, {Name: "eth", Version: 67},
{Name: "eth", Version: 68}, {Name: "eth", Version: 68},
} }
@ -238,7 +237,7 @@ func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message {
} }
resp := &BlockHeaders{ resp := &BlockHeaders{
RequestId: msg.ReqID(), RequestId: msg.ReqID(),
BlockHeadersPacket: eth.BlockHeadersPacket(headers), BlockHeadersRequest: eth.BlockHeadersRequest(headers),
} }
if err := c.Write(resp); err != nil { if err := c.Write(resp); err != nil {
return errorf("could not write to connection: %v", err) return errorf("could not write to connection: %v", err)
@ -267,7 +266,7 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, reqID uint
if !ok { if !ok {
return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg)) return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg))
} }
headers := []*types.Header(resp.BlockHeadersPacket) headers := []*types.Header(resp.BlockHeadersRequest)
return headers, nil return headers, nil
} }
@ -379,7 +378,7 @@ func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block) error {
conn.SetReadDeadline(time.Now().Add(20 * time.Second)) conn.SetReadDeadline(time.Now().Add(20 * time.Second))
// create request // create request
req := &GetBlockHeaders{ req := &GetBlockHeaders{
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Hash: block.Hash()}, Origin: eth.HashOrNumber{Hash: block.Hash()},
Amount: 1, Amount: 1,
}, },
@ -605,7 +604,7 @@ func (s *Suite) hashAnnounce() error {
} }
err = sendConn.Write(&BlockHeaders{ err = sendConn.Write(&BlockHeaders{
RequestId: blockHeaderReq.ReqID(), RequestId: blockHeaderReq.ReqID(),
BlockHeadersPacket: eth.BlockHeadersPacket{nextBlock.Header()}, BlockHeadersRequest: eth.BlockHeadersRequest{nextBlock.Header()},
}) })
if err != nil { if err != nil {
return fmt.Errorf("failed to write to connection: %v", err) return fmt.Errorf("failed to write to connection: %v", err)

View File

@ -27,8 +27,8 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/eth/protocols/snap"
"github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/internal/utesting"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3" "golang.org/x/crypto/sha3"
) )
@ -530,11 +530,11 @@ func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error {
for i, key := range hashes { for i, key := range hashes {
keys[i] = common.CopyBytes(key[:]) keys[i] = common.CopyBytes(key[:])
} }
nodes := make(light.NodeList, len(proof)) nodes := make(trienode.ProofList, len(proof))
for i, node := range proof { for i, node := range proof {
nodes[i] = node nodes[i] = node
} }
proofdb := nodes.NodeSet() proofdb := nodes.Set()
var end []byte var end []byte
if len(keys) > 0 { if len(keys) > 0 {

View File

@ -112,7 +112,7 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
} }
// write request // write request
req := &GetBlockHeaders{ req := &GetBlockHeaders{
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Hash: s.chain.blocks[1].Hash()}, Origin: eth.HashOrNumber{Hash: s.chain.blocks[1].Hash()},
Amount: 2, Amount: 2,
Skip: 1, Skip: 1,
@ -150,7 +150,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
// create two requests // create two requests
req1 := &GetBlockHeaders{ req1 := &GetBlockHeaders{
RequestId: uint64(111), RequestId: uint64(111),
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
Hash: s.chain.blocks[1].Hash(), Hash: s.chain.blocks[1].Hash(),
}, },
@ -161,7 +161,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
} }
req2 := &GetBlockHeaders{ req2 := &GetBlockHeaders{
RequestId: uint64(222), RequestId: uint64(222),
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
Hash: s.chain.blocks[1].Hash(), Hash: s.chain.blocks[1].Hash(),
}, },
@ -201,10 +201,10 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
if err != nil { if err != nil {
t.Fatalf("failed to get expected headers for request 2: %v", err) t.Fatalf("failed to get expected headers for request 2: %v", err)
} }
if !headersMatch(expected1, headers1.BlockHeadersPacket) { if !headersMatch(expected1, headers1.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
} }
if !headersMatch(expected2, headers2.BlockHeadersPacket) { if !headersMatch(expected2, headers2.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
} }
} }
@ -224,7 +224,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
reqID := uint64(1234) reqID := uint64(1234)
request1 := &GetBlockHeaders{ request1 := &GetBlockHeaders{
RequestId: reqID, RequestId: reqID,
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
Number: 1, Number: 1,
}, },
@ -233,7 +233,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
} }
request2 := &GetBlockHeaders{ request2 := &GetBlockHeaders{
RequestId: reqID, RequestId: reqID,
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
Number: 33, Number: 33,
}, },
@ -270,10 +270,10 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
if err != nil { if err != nil {
t.Fatalf("failed to get expected block headers: %v", err) t.Fatalf("failed to get expected block headers: %v", err)
} }
if !headersMatch(expected1, headers1.BlockHeadersPacket) { if !headersMatch(expected1, headers1.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
} }
if !headersMatch(expected2, headers2.BlockHeadersPacket) { if !headersMatch(expected2, headers2.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
} }
} }
@ -290,7 +290,7 @@ func (s *Suite) TestZeroRequestID(t *utesting.T) {
t.Fatalf("peering failed: %v", err) t.Fatalf("peering failed: %v", err)
} }
req := &GetBlockHeaders{ req := &GetBlockHeaders{
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Number: 0}, Origin: eth.HashOrNumber{Number: 0},
Amount: 2, Amount: 2,
}, },
@ -322,7 +322,7 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) {
// create block bodies request // create block bodies request
req := &GetBlockBodies{ req := &GetBlockBodies{
RequestId: uint64(55), RequestId: uint64(55),
GetBlockBodiesPacket: eth.GetBlockBodiesPacket{ GetBlockBodiesRequest: eth.GetBlockBodiesRequest{
s.chain.blocks[54].Hash(), s.chain.blocks[54].Hash(),
s.chain.blocks[75].Hash(), s.chain.blocks[75].Hash(),
}, },
@ -336,11 +336,11 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) {
if !ok { if !ok {
t.Fatalf("unexpected: %s", pretty.Sdump(msg)) t.Fatalf("unexpected: %s", pretty.Sdump(msg))
} }
bodies := resp.BlockBodiesPacket bodies := resp.BlockBodiesResponse
t.Logf("received %d block bodies", len(bodies)) t.Logf("received %d block bodies", len(bodies))
if len(bodies) != len(req.GetBlockBodiesPacket) { if len(bodies) != len(req.GetBlockBodiesRequest) {
t.Fatalf("wrong bodies in response: expected %d bodies, "+ t.Fatalf("wrong bodies in response: expected %d bodies, "+
"got %d", len(req.GetBlockBodiesPacket), len(bodies)) "got %d", len(req.GetBlockBodiesRequest), len(bodies))
} }
} }
@ -482,7 +482,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
} }
getTxReq := &GetPooledTransactions{ getTxReq := &GetPooledTransactions{
RequestId: 1234, RequestId: 1234,
GetPooledTransactionsPacket: hashes, GetPooledTransactionsRequest: hashes,
} }
if err = conn.Write(getTxReq); err != nil { if err = conn.Write(getTxReq); err != nil {
t.Fatalf("could not write to conn: %v", err) t.Fatalf("could not write to conn: %v", err)
@ -490,7 +490,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
// check that all received transactions match those that were sent to node // check that all received transactions match those that were sent to node
switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) { switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) {
case *PooledTransactions: case *PooledTransactions:
for _, gotTx := range msg.PooledTransactionsPacket { for _, gotTx := range msg.PooledTransactionsResponse {
if _, exists := hashMap[gotTx.Hash()]; !exists { if _, exists := hashMap[gotTx.Hash()]; !exists {
t.Fatalf("unexpected tx received: %v", gotTx.Hash()) t.Fatalf("unexpected tx received: %v", gotTx.Hash())
} }
@ -547,8 +547,8 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
msg := conn.readAndServe(s.chain, timeout) msg := conn.readAndServe(s.chain, timeout)
switch msg := msg.(type) { switch msg := msg.(type) {
case *GetPooledTransactions: case *GetPooledTransactions:
if len(msg.GetPooledTransactionsPacket) != len(hashes) { if len(msg.GetPooledTransactionsRequest) != len(hashes) {
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket)) t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest))
} }
return return

View File

@ -99,24 +99,24 @@ func (msg Transactions) Code() int { return 18 }
func (msg Transactions) ReqID() uint64 { return 18 } func (msg Transactions) ReqID() uint64 { return 18 }
// GetBlockHeaders represents a block header query. // GetBlockHeaders represents a block header query.
type GetBlockHeaders eth.GetBlockHeadersPacket66 type GetBlockHeaders eth.GetBlockHeadersPacket
func (msg GetBlockHeaders) Code() int { return 19 } func (msg GetBlockHeaders) Code() int { return 19 }
func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId } func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId }
type BlockHeaders eth.BlockHeadersPacket66 type BlockHeaders eth.BlockHeadersPacket
func (msg BlockHeaders) Code() int { return 20 } func (msg BlockHeaders) Code() int { return 20 }
func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId } func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId }
// GetBlockBodies represents a GetBlockBodies request // GetBlockBodies represents a GetBlockBodies request
type GetBlockBodies eth.GetBlockBodiesPacket66 type GetBlockBodies eth.GetBlockBodiesPacket
func (msg GetBlockBodies) Code() int { return 21 } func (msg GetBlockBodies) Code() int { return 21 }
func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId } func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId }
// BlockBodies is the network packet for block content distribution. // BlockBodies is the network packet for block content distribution.
type BlockBodies eth.BlockBodiesPacket66 type BlockBodies eth.BlockBodiesPacket
func (msg BlockBodies) Code() int { return 22 } func (msg BlockBodies) Code() int { return 22 }
func (msg BlockBodies) ReqID() uint64 { return msg.RequestId } func (msg BlockBodies) ReqID() uint64 { return msg.RequestId }
@ -128,7 +128,7 @@ func (msg NewBlock) Code() int { return 23 }
func (msg NewBlock) ReqID() uint64 { return 0 } func (msg NewBlock) ReqID() uint64 { return 0 }
// NewPooledTransactionHashes66 is the network packet for the tx hash propagation message. // NewPooledTransactionHashes66 is the network packet for the tx hash propagation message.
type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket66 type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket67
func (msg NewPooledTransactionHashes66) Code() int { return 24 } func (msg NewPooledTransactionHashes66) Code() int { return 24 }
func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 } func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 }
@ -139,12 +139,12 @@ type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket68
func (msg NewPooledTransactionHashes) Code() int { return 24 } func (msg NewPooledTransactionHashes) Code() int { return 24 }
func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 } func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 }
type GetPooledTransactions eth.GetPooledTransactionsPacket66 type GetPooledTransactions eth.GetPooledTransactionsPacket
func (msg GetPooledTransactions) Code() int { return 25 } func (msg GetPooledTransactions) Code() int { return 25 }
func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId } func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId }
type PooledTransactions eth.PooledTransactionsPacket66 type PooledTransactions eth.PooledTransactionsPacket
func (msg PooledTransactions) Code() int { return 26 } func (msg PooledTransactions) Code() int { return 26 }
func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId } func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId }
@ -180,25 +180,25 @@ func (c *Conn) Read() Message {
case (Status{}).Code(): case (Status{}).Code():
msg = new(Status) msg = new(Status)
case (GetBlockHeaders{}).Code(): case (GetBlockHeaders{}).Code():
ethMsg := new(eth.GetBlockHeadersPacket66) ethMsg := new(eth.GetBlockHeadersPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
return (*GetBlockHeaders)(ethMsg) return (*GetBlockHeaders)(ethMsg)
case (BlockHeaders{}).Code(): case (BlockHeaders{}).Code():
ethMsg := new(eth.BlockHeadersPacket66) ethMsg := new(eth.BlockHeadersPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
return (*BlockHeaders)(ethMsg) return (*BlockHeaders)(ethMsg)
case (GetBlockBodies{}).Code(): case (GetBlockBodies{}).Code():
ethMsg := new(eth.GetBlockBodiesPacket66) ethMsg := new(eth.GetBlockBodiesPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
return (*GetBlockBodies)(ethMsg) return (*GetBlockBodies)(ethMsg)
case (BlockBodies{}).Code(): case (BlockBodies{}).Code():
ethMsg := new(eth.BlockBodiesPacket66) ethMsg := new(eth.BlockBodiesPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
@ -217,13 +217,13 @@ func (c *Conn) Read() Message {
} }
msg = new(NewPooledTransactionHashes66) msg = new(NewPooledTransactionHashes66)
case (GetPooledTransactions{}.Code()): case (GetPooledTransactions{}.Code()):
ethMsg := new(eth.GetPooledTransactionsPacket66) ethMsg := new(eth.GetPooledTransactionsPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
return (*GetPooledTransactions)(ethMsg) return (*GetPooledTransactions)(ethMsg)
case (PooledTransactions{}.Code()): case (PooledTransactions{}.Code()):
ethMsg := new(eth.PooledTransactionsPacket66) ethMsg := new(eth.PooledTransactionsPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }

View File

@ -54,6 +54,9 @@ type header struct {
Nonce *types.BlockNonce `json:"nonce"` Nonce *types.BlockNonce `json:"nonce"`
BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
} }
type headerMarshaling struct { type headerMarshaling struct {
@ -64,6 +67,8 @@ type headerMarshaling struct {
Time math.HexOrDecimal64 Time math.HexOrDecimal64
Extra hexutil.Bytes Extra hexutil.Bytes
BaseFee *math.HexOrDecimal256 BaseFee *math.HexOrDecimal256
BlobGasUsed *math.HexOrDecimal64
ExcessBlobGas *math.HexOrDecimal64
} }
type bbInput struct { type bbInput struct {
@ -129,6 +134,9 @@ func (i *bbInput) ToBlock() *types.Block {
MixDigest: i.Header.MixDigest, MixDigest: i.Header.MixDigest,
BaseFee: i.Header.BaseFee, BaseFee: i.Header.BaseFee,
WithdrawalsHash: i.Header.WithdrawalsHash, WithdrawalsHash: i.Header.WithdrawalsHash,
BlobGasUsed: i.Header.BlobGasUsed,
ExcessBlobGas: i.Header.ExcessBlobGas,
ParentBeaconRoot: i.Header.ParentBeaconBlockRoot,
} }
// Fill optional values. // Fill optional values.
@ -150,7 +158,7 @@ func (i *bbInput) ToBlock() *types.Block {
if i.Header.Nonce != nil { if i.Header.Nonce != nil {
header.Nonce = *i.Header.Nonce header.Nonce = *i.Header.Nonce
} }
if header.Difficulty != nil { if i.Header.Difficulty != nil {
header.Difficulty = i.Header.Difficulty header.Difficulty = i.Header.Difficulty
} }
return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers).WithWithdrawals(i.Withdrawals) return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers).WithWithdrawals(i.Withdrawals)

View File

@ -59,7 +59,7 @@ type ExecutionResult struct {
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"` WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"` CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
CurrentBlobGasUsed *math.HexOrDecimal64 `json:"currentBlobGasUsed,omitempty"` CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"`
} }
type ommer struct { type ommer struct {
@ -85,7 +85,7 @@ type stEnv struct {
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
BaseFee *big.Int `json:"currentBaseFee,omitempty"` BaseFee *big.Int `json:"currentBaseFee,omitempty"`
ParentUncleHash common.Hash `json:"parentUncleHash"` ParentUncleHash common.Hash `json:"parentUncleHash"`
ExcessBlobGas *uint64 `json:"excessBlobGas,omitempty"` ExcessBlobGas *uint64 `json:"currentExcessBlobGas,omitempty"`
ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"` ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"`
ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"` ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
@ -163,17 +163,19 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
rnd := common.BigToHash(pre.Env.Random) rnd := common.BigToHash(pre.Env.Random)
vmContext.Random = &rnd vmContext.Random = &rnd
} }
// If excessBlobGas is defined, add it to the vmContext. // Calculate the BlobBaseFee
var excessBlobGas uint64
if pre.Env.ExcessBlobGas != nil { if pre.Env.ExcessBlobGas != nil {
vmContext.ExcessBlobGas = pre.Env.ExcessBlobGas excessBlobGas := *pre.Env.ExcessBlobGas
vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas)
} else { } else {
// If it is not explicitly defined, but we have the parent values, we try // If it is not explicitly defined, but we have the parent values, we try
// to calculate it ourselves. // to calculate it ourselves.
parentExcessBlobGas := pre.Env.ParentExcessBlobGas parentExcessBlobGas := pre.Env.ParentExcessBlobGas
parentBlobGasUsed := pre.Env.ParentBlobGasUsed parentBlobGasUsed := pre.Env.ParentBlobGasUsed
if parentExcessBlobGas != nil && parentBlobGasUsed != nil { if parentExcessBlobGas != nil && parentBlobGasUsed != nil {
excessBlobGas := eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed) excessBlobGas = eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed)
vmContext.ExcessBlobGas = &excessBlobGas vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas)
} }
} }
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
@ -189,12 +191,15 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
} }
var blobGasUsed uint64 var blobGasUsed uint64
for i, tx := range txs { for i, tx := range txs {
if tx.Type() == types.BlobTxType && vmContext.ExcessBlobGas == nil { if tx.Type() == types.BlobTxType && vmContext.BlobBaseFee == nil {
errMsg := "blob tx used but field env.ExcessBlobGas missing" errMsg := "blob tx used but field env.ExcessBlobGas missing"
log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", errMsg) log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", errMsg)
rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg}) rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg})
continue continue
} }
if tx.Type() == types.BlobTxType {
blobGasUsed += uint64(params.BlobTxBlobGasPerBlob * len(tx.BlobHashes()))
}
msg, err := core.TransactionToMessage(tx, signer, pre.Env.BaseFee) msg, err := core.TransactionToMessage(tx, signer, pre.Env.BaseFee)
if err != nil { if err != nil {
log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", err) log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", err)
@ -224,9 +229,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
gaspool.SetGas(prevGas) gaspool.SetGas(prevGas)
continue continue
} }
if tx.Type() == types.BlobTxType {
blobGasUsed += params.BlobTxBlobGasPerBlob
}
includedTxs = append(includedTxs, tx) includedTxs = append(includedTxs, tx)
if hashError != nil { if hashError != nil {
return nil, nil, NewError(ErrorMissingBlockhash, hashError) return nil, nil, NewError(ErrorMissingBlockhash, hashError)
@ -322,8 +324,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
h := types.DeriveSha(types.Withdrawals(pre.Env.Withdrawals), trie.NewStackTrie(nil)) h := types.DeriveSha(types.Withdrawals(pre.Env.Withdrawals), trie.NewStackTrie(nil))
execRs.WithdrawalsRoot = &h execRs.WithdrawalsRoot = &h
} }
if vmContext.ExcessBlobGas != nil { if vmContext.BlobBaseFee != nil {
execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(vmContext.ExcessBlobGas) execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(&excessBlobGas)
execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed) execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed)
} }
// Re-create statedb instance with new root upon the updated database // Re-create statedb instance with new root upon the updated database

View File

@ -35,6 +35,9 @@ func (h header) MarshalJSON() ([]byte, error) {
Nonce *types.BlockNonce `json:"nonce"` Nonce *types.BlockNonce `json:"nonce"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
} }
var enc header var enc header
enc.ParentHash = h.ParentHash enc.ParentHash = h.ParentHash
@ -54,6 +57,9 @@ func (h header) MarshalJSON() ([]byte, error) {
enc.Nonce = h.Nonce enc.Nonce = h.Nonce
enc.BaseFee = (*math.HexOrDecimal256)(h.BaseFee) enc.BaseFee = (*math.HexOrDecimal256)(h.BaseFee)
enc.WithdrawalsHash = h.WithdrawalsHash enc.WithdrawalsHash = h.WithdrawalsHash
enc.BlobGasUsed = (*math.HexOrDecimal64)(h.BlobGasUsed)
enc.ExcessBlobGas = (*math.HexOrDecimal64)(h.ExcessBlobGas)
enc.ParentBeaconBlockRoot = h.ParentBeaconBlockRoot
return json.Marshal(&enc) return json.Marshal(&enc)
} }
@ -77,6 +83,9 @@ func (h *header) UnmarshalJSON(input []byte) error {
Nonce *types.BlockNonce `json:"nonce"` Nonce *types.BlockNonce `json:"nonce"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
} }
var dec header var dec header
if err := json.Unmarshal(input, &dec); err != nil { if err := json.Unmarshal(input, &dec); err != nil {
@ -137,5 +146,14 @@ func (h *header) UnmarshalJSON(input []byte) error {
if dec.WithdrawalsHash != nil { if dec.WithdrawalsHash != nil {
h.WithdrawalsHash = dec.WithdrawalsHash h.WithdrawalsHash = dec.WithdrawalsHash
} }
if dec.BlobGasUsed != nil {
h.BlobGasUsed = (*uint64)(dec.BlobGasUsed)
}
if dec.ExcessBlobGas != nil {
h.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
}
if dec.ParentBeaconBlockRoot != nil {
h.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot
}
return nil return nil
} }

View File

@ -33,7 +33,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
ParentUncleHash common.Hash `json:"parentUncleHash"` ParentUncleHash common.Hash `json:"parentUncleHash"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"` ExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
@ -81,7 +81,7 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
ParentUncleHash *common.Hash `json:"parentUncleHash"` ParentUncleHash *common.Hash `json:"parentUncleHash"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"` ExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`

View File

@ -334,7 +334,7 @@ func loadTransactions(txStr string, inputData *input, env stEnv, chainConfig *pa
txsWithKeys = inputData.Txs txsWithKeys = inputData.Txs
} }
// We may have to sign the transactions. // We may have to sign the transactions.
signer := types.MakeSigner(chainConfig, big.NewInt(int64(env.Number)), env.Timestamp) signer := types.LatestSignerForChainID(chainConfig.ChainID)
return signUnsignedTransactions(txsWithKeys, signer) return signUnsignedTransactions(txsWithKeys, signer)
} }

View File

@ -124,6 +124,7 @@ func runCmd(ctx *cli.Context) error {
receiver = common.BytesToAddress([]byte("receiver")) receiver = common.BytesToAddress([]byte("receiver"))
preimages = ctx.Bool(DumpFlag.Name) preimages = ctx.Bool(DumpFlag.Name)
blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests
blobBaseFee = new(big.Int) // TODO (MariusVanDerWijden) implement blob fee in state tests
) )
if ctx.Bool(MachineFlag.Name) { if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(logconfig, os.Stdout) tracer = logger.NewJSONLogger(logconfig, os.Stdout)
@ -221,6 +222,7 @@ func runCmd(ctx *cli.Context) error {
Coinbase: genesisConfig.Coinbase, Coinbase: genesisConfig.Coinbase,
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number), BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
BlobHashes: blobHashes, BlobHashes: blobHashes,
BlobBaseFee: blobBaseFee,
EVMConfig: vm.Config{ EVMConfig: vm.Config{
Tracer: tracer, Tracer: tracer,
}, },

View File

@ -9,8 +9,7 @@
"parentDifficulty" : "0x00", "parentDifficulty" : "0x00",
"parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"currentRandom" : "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "currentRandom" : "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"withdrawals" : [ "withdrawals" : [],
],
"parentBaseFee" : "0x0a", "parentBaseFee" : "0x0a",
"parentGasUsed" : "0x00", "parentGasUsed" : "0x00",
"parentGasLimit" : "0x7fffffffffffffff", "parentGasLimit" : "0x7fffffffffffffff",

View File

@ -42,6 +42,6 @@
"currentBaseFee": "0x9", "currentBaseFee": "0x9",
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"currentExcessBlobGas": "0x0", "currentExcessBlobGas": "0x0",
"currentBlobGasUsed": "0x20000" "blobGasUsed": "0x20000"
} }
} }

View File

@ -6,7 +6,7 @@
"storage" : { "storage" : {
} }
}, },
"0xbEac00dDB15f3B6d645C48263dC93862413A222D" : { "0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02" : {
"balance" : "0x1", "balance" : "0x1",
"code" : "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500", "code" : "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500",
"nonce" : "0x00", "nonce" : "0x00",

View File

@ -1,6 +1,6 @@
{ {
"alloc": { "alloc": {
"0xbeac00ddb15f3b6d645c48263dc93862413a222d": { "0x000f3df6d732807ef1319fb7b8bb8522d0beac02": {
"code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500", "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500",
"storage": { "storage": {
"0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e", "0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e",
@ -14,7 +14,7 @@
} }
}, },
"result": { "result": {
"stateRoot": "0x2db9f6bc233e8fd0af2d8023404493a19b37d9d69ace71f4e73158851fced574", "stateRoot": "0x19a4f821a7c0a6f4c934f9acb0fe9ce5417b68086e12513ecbc3e3f57e01573c",
"txRoot": "0x248074fabe112f7d93917f292b64932394f835bb98da91f21501574d58ec92ab", "txRoot": "0x248074fabe112f7d93917f292b64932394f835bb98da91f21501574d58ec92ab",
"receiptsRoot": "0xf78dfb743fbd92ade140711c8bbc542b5e307f0ab7984eff35d751969fe57efa", "receiptsRoot": "0xf78dfb743fbd92ade140711c8bbc542b5e307f0ab7984eff35d751969fe57efa",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
@ -40,6 +40,6 @@
"currentBaseFee": "0x9", "currentBaseFee": "0x9",
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"currentExcessBlobGas": "0x0", "currentExcessBlobGas": "0x0",
"currentBlobGasUsed": "0x0" "blobGasUsed": "0x0"
} }
} }

View File

@ -1,29 +1,29 @@
## EIP 4788 ## EIP 4788
This test contains testcases for EIP-4788. The 4788-contract is This test contains testcases for EIP-4788. The 4788-contract is
located at address `0xbeac00ddb15f3b6d645c48263dc93862413a222d`, and this test executes a simple transaction. It also located at address `0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02`, and this test executes a simple transaction. It also
implicitly invokes the system tx, which sets calls the contract and sets the implicitly invokes the system tx, which sets calls the contract and sets the
storage values storage values
``` ```
$ dir=./testdata/29/ && go run . t8n --state.fork=Cancun --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout $ dir=./testdata/29/ && go run . t8n --state.fork=Cancun --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout
INFO [08-15|20:07:56.335] Trie dumping started root=ecde45..2af8a7 INFO [09-27|15:34:53.049] Trie dumping started root=19a4f8..01573c
INFO [08-15|20:07:56.335] Trie dumping complete accounts=2 elapsed="225.848µs" INFO [09-27|15:34:53.049] Trie dumping complete accounts=2 elapsed="192.759µs"
INFO [08-15|20:07:56.335] Wrote file file=result.json INFO [09-27|15:34:53.050] Wrote file file=result.json
{ {
"alloc": { "alloc": {
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { "0x000f3df6d732807ef1319fb7b8bb8522d0beac02": {
"balance": "0x16345785d871db8",
"nonce": "0x1"
},
"0xbeac00541d49391ed88abf392bfc1f4dea8c4143": {
"code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500", "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500",
"storage": { "storage": {
"0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e", "0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e",
"0x000000000000000000000000000000000000000000000000000000000001879e": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00" "0x000000000000000000000000000000000000000000000000000000000001879e": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00"
}, },
"balance": "0x "balance": "0x1"
},
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0x16345785d871db8",
"nonce": "0x1"
} }
} }
} }
``` ```

View File

@ -1,6 +1,6 @@
## EIP-1559 testing ## EIP-1559 testing
This test contains testcases for EIP-1559, which uses an new transaction type and has a new block parameter. This test contains testcases for EIP-1559, which uses a new transaction type and has a new block parameter.
### Prestate ### Prestate

View File

@ -474,7 +474,7 @@ func dump(ctx *cli.Context) error {
if err != nil { if err != nil {
return err return err
} }
triedb := utils.MakeTrieDatabase(ctx, db, true, false) // always enable preimage lookup triedb := utils.MakeTrieDatabase(ctx, db, true, true) // always enable preimage lookup
defer triedb.Close() defer triedb.Close()
state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil) state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)

View File

@ -32,6 +32,8 @@ import (
"github.com/ethereum/go-ethereum/accounts/scwallet" "github.com/ethereum/go-ethereum/accounts/scwallet"
"github.com/ethereum/go-ethereum/accounts/usbwallet" "github.com/ethereum/go-ethereum/accounts/usbwallet"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
@ -199,17 +201,18 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
if ctx.IsSet(utils.GraphQLEnabledFlag.Name) { if ctx.IsSet(utils.GraphQLEnabledFlag.Name) {
utils.RegisterGraphQLService(stack, backend, filterSystem, &cfg.Node) utils.RegisterGraphQLService(stack, backend, filterSystem, &cfg.Node)
} }
// Add the Ethereum Stats daemon if requested. // Add the Ethereum Stats daemon if requested.
if cfg.Ethstats.URL != "" { if cfg.Ethstats.URL != "" {
utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL) utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL)
} }
// Configure full-sync tester service if requested // Configure full-sync tester service if requested
if ctx.IsSet(utils.SyncTargetFlag.Name) && cfg.Eth.SyncMode == downloader.FullSync { if ctx.IsSet(utils.SyncTargetFlag.Name) {
utils.RegisterFullSyncTester(stack, eth, ctx.Path(utils.SyncTargetFlag.Name)) hex := hexutil.MustDecode(ctx.String(utils.SyncTargetFlag.Name))
if len(hex) != common.HashLength {
utils.Fatalf("invalid sync target length: have %d, want %d", len(hex), common.HashLength)
}
utils.RegisterFullSyncTester(stack, eth, common.BytesToHash(hex))
} }
// Start the dev mode if requested, or launch the engine API for // Start the dev mode if requested, or launch the engine API for
// interacting with external consensus client. // interacting with external consensus client.
if ctx.IsSet(utils.DeveloperFlag.Name) { if ctx.IsSet(utils.DeveloperFlag.Name) {

View File

@ -176,12 +176,12 @@ func TestCustomBackend(t *testing.T) {
{ // Can't start pebble on top of leveldb { // Can't start pebble on top of leveldb
initArgs: []string{"--db.engine", "leveldb"}, initArgs: []string{"--db.engine", "leveldb"},
execArgs: []string{"--db.engine", "pebble"}, execArgs: []string{"--db.engine", "pebble"},
execExpect: `Fatal: Could not open database: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`, execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`,
}, },
{ // Can't start leveldb on top of pebble { // Can't start leveldb on top of pebble
initArgs: []string{"--db.engine", "pebble"}, initArgs: []string{"--db.engine", "pebble"},
execArgs: []string{"--db.engine", "leveldb"}, execArgs: []string{"--db.engine", "leveldb"},
execExpect: `Fatal: Could not open database: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`, execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`,
}, },
{ // Reject invalid backend choice { // Reject invalid backend choice
initArgs: []string{"--db.engine", "mssql"}, initArgs: []string{"--db.engine", "mssql"},

View File

@ -18,7 +18,6 @@
package utils package utils
import ( import (
"bytes"
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"encoding/hex" "encoding/hex"
@ -39,11 +38,9 @@ import (
"github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/fdlimit" "github.com/ethereum/go-ethereum/common/fdlimit"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/crypto/kzg4844"
@ -72,7 +69,6 @@ import (
"github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb" "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
@ -280,7 +276,6 @@ var (
StateSchemeFlag = &cli.StringFlag{ StateSchemeFlag = &cli.StringFlag{
Name: "state.scheme", Name: "state.scheme",
Usage: "Scheme to use for storing ethereum state ('hash' or 'path')", Usage: "Scheme to use for storing ethereum state ('hash' or 'path')",
Value: rawdb.HashScheme,
Category: flags.StateCategory, Category: flags.StateCategory,
} }
StateHistoryFlag = &cli.Uint64Flag{ StateHistoryFlag = &cli.Uint64Flag{
@ -603,9 +598,9 @@ var (
} }
// MISC settings // MISC settings
SyncTargetFlag = &cli.PathFlag{ SyncTargetFlag = &cli.StringFlag{
Name: "synctarget", Name: "synctarget",
Usage: `File for containing the hex-encoded block-rlp as sync target(dev feature)`, Usage: `Hash of the block to full sync to (dev testing feature)`,
TakesFile: true, TakesFile: true,
Category: flags.MiscCategory, Category: flags.MiscCategory,
} }
@ -1699,7 +1694,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc))
godebug.SetGCPercent(int(gogc)) godebug.SetGCPercent(int(gogc))
if ctx.IsSet(SyncModeFlag.Name) { if ctx.IsSet(SyncTargetFlag.Name) {
cfg.SyncMode = downloader.FullSync // dev sync target forces full sync
} else if ctx.IsSet(SyncModeFlag.Name) {
cfg.SyncMode = *flags.GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode) cfg.SyncMode = *flags.GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode)
} }
if ctx.IsSet(NetworkIdFlag.Name) { if ctx.IsSet(NetworkIdFlag.Name) {
@ -1731,15 +1728,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(StateHistoryFlag.Name) { if ctx.IsSet(StateHistoryFlag.Name) {
cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name) cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name)
} }
// Parse state scheme, abort the process if it's not compatible. if ctx.IsSet(StateSchemeFlag.Name) {
chaindb := tryMakeReadOnlyDatabase(ctx, stack) cfg.StateScheme = ctx.String(StateSchemeFlag.Name)
scheme, err := ParseStateScheme(ctx, chaindb)
chaindb.Close()
if err != nil {
Fatalf("%v", err)
} }
cfg.StateScheme = scheme
// Parse transaction history flag, if user is still using legacy config // Parse transaction history flag, if user is still using legacy config
// file with 'TxLookupLimit' configured, copy the value to 'TransactionHistory'. // file with 'TxLookupLimit' configured, copy the value to 'TransactionHistory'.
if cfg.TransactionHistory == ethconfig.Defaults.TransactionHistory && cfg.TxLookupLimit != ethconfig.Defaults.TxLookupLimit { if cfg.TransactionHistory == ethconfig.Defaults.TransactionHistory && cfg.TxLookupLimit != ethconfig.Defaults.TxLookupLimit {
@ -1984,21 +1975,9 @@ func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconf
} }
// RegisterFullSyncTester adds the full-sync tester service into node. // RegisterFullSyncTester adds the full-sync tester service into node.
func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, path string) { func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, target common.Hash) {
blob, err := os.ReadFile(path) catalyst.RegisterFullSyncTester(stack, eth, target)
if err != nil { log.Info("Registered full-sync tester", "hash", target)
Fatalf("Failed to read block file: %v", err)
}
rlpBlob, err := hexutil.Decode(string(bytes.TrimRight(blob, "\r\n")))
if err != nil {
Fatalf("Failed to decode block blob: %v", err)
}
var block types.Block
if err := rlp.DecodeBytes(rlpBlob, &block); err != nil {
Fatalf("Failed to decode block: %v", err)
}
catalyst.RegisterFullSyncTester(stack, eth, &block)
log.Info("Registered full-sync tester", "number", block.NumberU64(), "hash", block.Hash())
} }
func SetupMetrics(ctx *cli.Context) { func SetupMetrics(ctx *cli.Context) {
@ -2187,7 +2166,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" { if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name) Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
} }
scheme, err := ParseStateScheme(ctx, chainDb) scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), chainDb)
if err != nil { if err != nil {
Fatalf("%v", err) Fatalf("%v", err)
} }
@ -2246,47 +2225,12 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
return preloads return preloads
} }
// ParseStateScheme resolves scheme identifier from CLI flag. If the provided
// state scheme is not compatible with the one of persistent scheme, an error
// will be returned.
//
// - none: use the scheme consistent with persistent state, or fallback
// to hash-based scheme if state is empty.
// - hash: use hash-based scheme or error out if not compatible with
// persistent state scheme.
// - path: use path-based scheme or error out if not compatible with
// persistent state scheme.
func ParseStateScheme(ctx *cli.Context, disk ethdb.Database) (string, error) {
// If state scheme is not specified, use the scheme consistent
// with persistent state, or fallback to hash mode if database
// is empty.
stored := rawdb.ReadStateScheme(disk)
if !ctx.IsSet(StateSchemeFlag.Name) {
if stored == "" {
// use default scheme for empty database, flip it when
// path mode is chosen as default
log.Info("State schema set to default", "scheme", "hash")
return rawdb.HashScheme, nil
}
log.Info("State scheme set to already existing", "scheme", stored)
return stored, nil // reuse scheme of persistent scheme
}
// If state scheme is specified, ensure it's compatible with
// persistent state.
scheme := ctx.String(StateSchemeFlag.Name)
if stored == "" || scheme == stored {
log.Info("State scheme set by user", "scheme", scheme)
return scheme, nil
}
return "", fmt.Errorf("incompatible state scheme, stored: %s, provided: %s", stored, scheme)
}
// MakeTrieDatabase constructs a trie database based on the configured scheme. // MakeTrieDatabase constructs a trie database based on the configured scheme.
func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool) *trie.Database { func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool) *trie.Database {
config := &trie.Config{ config := &trie.Config{
Preimages: preimage, Preimages: preimage,
} }
scheme, err := ParseStateScheme(ctx, disk) scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), disk)
if err != nil { if err != nil {
Fatalf("%v", err) Fatalf("%v", err)
} }

View File

@ -239,9 +239,6 @@ func (a Address) Cmp(other Address) int {
// Bytes gets the string representation of the underlying address. // Bytes gets the string representation of the underlying address.
func (a Address) Bytes() []byte { return a[:] } func (a Address) Bytes() []byte { return a[:] }
// Hash converts an address to a hash by left-padding it with zeros.
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
// Big converts an address to a big integer. // Big converts an address to a big integer.
func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) } func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) }

View File

@ -34,7 +34,7 @@ type instructionIterator struct {
started bool started bool
} }
// NewInstructionIterator create a new instruction iterator. // NewInstructionIterator creates a new instruction iterator.
func NewInstructionIterator(code []byte) *instructionIterator { func NewInstructionIterator(code []byte) *instructionIterator {
it := new(instructionIterator) it := new(instructionIterator)
it.code = code it.code = code

View File

@ -72,12 +72,12 @@ func TestLexer(t *testing.T) {
input: "@label123", input: "@label123",
tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}}, tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}},
}, },
// comment after label // Comment after label
{ {
input: "@label123 ;; comment", input: "@label123 ;; comment",
tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}}, tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}},
}, },
// comment after instruction // Comment after instruction
{ {
input: "push 3 ;; comment\nadd", input: "push 3 ;; comment\nadd",
tokens: []token{{typ: lineStart}, {typ: element, text: "push"}, {typ: number, text: "3"}, {typ: lineEnd, text: "\n"}, {typ: lineStart, lineno: 1}, {typ: element, lineno: 1, text: "add"}, {typ: eof, lineno: 1}}, tokens: []token{{typ: lineStart}, {typ: element, text: "push"}, {typ: number, text: "3"}, {typ: lineEnd, text: "\n"}, {typ: lineStart, lineno: 1}, {typ: element, lineno: 1, text: "add"}, {typ: eof, lineno: 1}},

View File

@ -576,7 +576,7 @@ func (bc *BlockChain) SetHead(head uint64) error {
header := bc.CurrentBlock() header := bc.CurrentBlock()
block := bc.GetBlock(header.Hash(), header.Number.Uint64()) block := bc.GetBlock(header.Hash(), header.Number.Uint64())
if block == nil { if block == nil {
// This should never happen. In practice, previsouly currentBlock // This should never happen. In practice, previously currentBlock
// contained the entire block whereas now only a "marker", so there // contained the entire block whereas now only a "marker", so there
// is an ever so slight chance for a race we should handle. // is an ever so slight chance for a race we should handle.
log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash()) log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
@ -598,7 +598,7 @@ func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error {
header := bc.CurrentBlock() header := bc.CurrentBlock()
block := bc.GetBlock(header.Hash(), header.Number.Uint64()) block := bc.GetBlock(header.Hash(), header.Number.Uint64())
if block == nil { if block == nil {
// This should never happen. In practice, previsouly currentBlock // This should never happen. In practice, previously currentBlock
// contained the entire block whereas now only a "marker", so there // contained the entire block whereas now only a "marker", so there
// is an ever so slight chance for a race we should handle. // is an ever so slight chance for a race we should handle.
log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash()) log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
@ -982,7 +982,7 @@ func (bc *BlockChain) stopWithoutSaving() {
func (bc *BlockChain) Stop() { func (bc *BlockChain) Stop() {
bc.stopWithoutSaving() bc.stopWithoutSaving()
// Ensure that the entirety of the state snapshot is journalled to disk. // Ensure that the entirety of the state snapshot is journaled to disk.
var snapBase common.Hash var snapBase common.Hash
if bc.snaps != nil { if bc.snaps != nil {
var err error var err error
@ -1193,7 +1193,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients) // a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients)
// range. In this case, all tx indices of newly imported blocks should be // range. In this case, all tx indices of newly imported blocks should be
// generated. // generated.
var batch = bc.db.NewBatch() batch := bc.db.NewBatch()
for i, block := range blockChain { for i, block := range blockChain {
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit { if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
rawdb.WriteTxLookupEntriesByBlock(batch, block) rawdb.WriteTxLookupEntriesByBlock(batch, block)
@ -2618,7 +2618,7 @@ func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) {
bc.flushInterval.Store(int64(interval)) bc.flushInterval.Store(int64(interval))
} }
// GetTrieFlushInterval gets the in-memroy tries flush interval // GetTrieFlushInterval gets the in-memory tries flush interval
func (bc *BlockChain) GetTrieFlushInterval() time.Duration { func (bc *BlockChain) GetTrieFlushInterval() time.Duration {
return time.Duration(bc.flushInterval.Load()) return time.Duration(bc.flushInterval.Load())
} }

View File

@ -58,7 +58,7 @@ type partialMatches struct {
// bit with the given number of fetch elements, or a response for such a request. // bit with the given number of fetch elements, or a response for such a request.
// It can also have the actual results set to be used as a delivery data struct. // It can also have the actual results set to be used as a delivery data struct.
// //
// The contest and error fields are used by the light client to terminate matching // The context and error fields are used by the light client to terminate matching
// early if an error is encountered on some path of the pipeline. // early if an error is encountered on some path of the pipeline.
type Retrieval struct { type Retrieval struct {
Bit uint Bit uint
@ -389,7 +389,7 @@ func (m *Matcher) distributor(dist chan *request, session *MatcherSession) {
shutdown = session.quit // Shutdown request channel, will gracefully wait for pending requests shutdown = session.quit // Shutdown request channel, will gracefully wait for pending requests
) )
// assign is a helper method fo try to assign a pending bit an actively // assign is a helper method to try to assign a pending bit an actively
// listening servicer, or schedule it up for later when one arrives. // listening servicer, or schedule it up for later when one arrives.
assign := func(bit uint) { assign := func(bit uint) {
select { select {

View File

@ -85,7 +85,7 @@ func TestMatcherRandom(t *testing.T) {
} }
// Tests that the matcher can properly find matches if the starting block is // Tests that the matcher can properly find matches if the starting block is
// shifter from a multiple of 8. This is needed to cover an optimisation with // shifted from a multiple of 8. This is needed to cover an optimisation with
// bitset matching https://github.com/ethereum/go-ethereum/issues/15309. // bitset matching https://github.com/ethereum/go-ethereum/issues/15309.
func TestMatcherShifted(t *testing.T) { func TestMatcherShifted(t *testing.T) {
t.Parallel() t.Parallel()
@ -106,7 +106,7 @@ func TestWildcardMatcher(t *testing.T) {
testMatcherBothModes(t, nil, 0, 10000, 0) testMatcherBothModes(t, nil, 0, 10000, 0)
} }
// makeRandomIndexes generates a random filter system, composed on multiple filter // makeRandomIndexes generates a random filter system, composed of multiple filter
// criteria, each having one bloom list component for the address and arbitrarily // criteria, each having one bloom list component for the address and arbitrarily
// many topic bloom list components. // many topic bloom list components.
func makeRandomIndexes(lengths []int, max int) [][]bloomIndexes { func makeRandomIndexes(lengths []int, max int) [][]bloomIndexes {

View File

@ -88,11 +88,6 @@ func (b *BlockGen) SetPoS() {
b.header.Difficulty = new(big.Int) b.header.Difficulty = new(big.Int)
} }
// SetBlobGas sets the data gas used by the blob in the generated block.
func (b *BlockGen) SetBlobGas(blobGasUsed uint64) {
b.header.BlobGasUsed = &blobGasUsed
}
// addTx adds a transaction to the generated block. If no coinbase has // addTx adds a transaction to the generated block. If no coinbase has
// been set, the block's coinbase is set to the zero address. // been set, the block's coinbase is set to the zero address.
// //
@ -111,6 +106,9 @@ func (b *BlockGen) addTx(bc *BlockChain, vmConfig vm.Config, tx *types.Transacti
} }
b.txs = append(b.txs, tx) b.txs = append(b.txs, tx)
b.receipts = append(b.receipts, receipt) b.receipts = append(b.receipts, receipt)
if b.header.BlobGasUsed != nil {
*b.header.BlobGasUsed += receipt.BlobGasUsed
}
} }
// AddTx adds a transaction to the generated block. If no coinbase has // AddTx adds a transaction to the generated block. If no coinbase has

View File

@ -21,6 +21,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
) )
@ -40,6 +41,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
var ( var (
beneficiary common.Address beneficiary common.Address
baseFee *big.Int baseFee *big.Int
blobBaseFee *big.Int
random *common.Hash random *common.Hash
) )
@ -52,6 +54,9 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
if header.BaseFee != nil { if header.BaseFee != nil {
baseFee = new(big.Int).Set(header.BaseFee) baseFee = new(big.Int).Set(header.BaseFee)
} }
if header.ExcessBlobGas != nil {
blobBaseFee = eip4844.CalcBlobFee(*header.ExcessBlobGas)
}
if header.Difficulty.Cmp(common.Big0) == 0 { if header.Difficulty.Cmp(common.Big0) == 0 {
random = &header.MixDigest random = &header.MixDigest
} }
@ -64,9 +69,9 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
Time: header.Time, Time: header.Time,
Difficulty: new(big.Int).Set(header.Difficulty), Difficulty: new(big.Int).Set(header.Difficulty),
BaseFee: baseFee, BaseFee: baseFee,
BlobBaseFee: blobBaseFee,
GasLimit: header.GasLimit, GasLimit: header.GasLimit,
Random: random, Random: random,
ExcessBlobGas: header.ExcessBlobGas,
} }
} }

View File

@ -120,8 +120,8 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// deriveHash computes the state root according to the genesis specification. // hash computes the state root according to the genesis specification.
func (ga *GenesisAlloc) deriveHash() (common.Hash, error) { func (ga *GenesisAlloc) hash() (common.Hash, error) {
// Create an ephemeral in-memory database for computing hash, // Create an ephemeral in-memory database for computing hash,
// all the derived states will be discarded to not pollute disk. // all the derived states will be discarded to not pollute disk.
db := state.NewDatabase(rawdb.NewMemoryDatabase()) db := state.NewDatabase(rawdb.NewMemoryDatabase())
@ -142,9 +142,9 @@ func (ga *GenesisAlloc) deriveHash() (common.Hash, error) {
return statedb.Commit(0, false) return statedb.Commit(0, false)
} }
// flush is very similar with deriveHash, but the main difference is // flush is very similar with hash, but the main difference is all the generated
// all the generated states will be persisted into the given database. // states will be persisted into the given database. Also, the genesis state
// Also, the genesis state specification will be flushed as well. // specification will be flushed as well.
func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error { func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error {
statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil) statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil)
if err != nil { if err != nil {
@ -179,39 +179,6 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhas
return nil return nil
} }
// CommitGenesisState loads the stored genesis state with the given block
// hash and commits it into the provided trie database.
func CommitGenesisState(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error {
var alloc GenesisAlloc
blob := rawdb.ReadGenesisStateSpec(db, blockhash)
if len(blob) != 0 {
if err := alloc.UnmarshalJSON(blob); err != nil {
return err
}
} else {
// Genesis allocation is missing and there are several possibilities:
// the node is legacy which doesn't persist the genesis allocation or
// the persisted allocation is just lost.
// - supported networks(mainnet, testnets), recover with defined allocations
// - private network, can't recover
var genesis *Genesis
switch blockhash {
case params.MainnetGenesisHash:
genesis = DefaultGenesisBlock()
case params.GoerliGenesisHash:
genesis = DefaultGoerliGenesisBlock()
case params.SepoliaGenesisHash:
genesis = DefaultSepoliaGenesisBlock()
}
if genesis != nil {
alloc = genesis.Alloc
} else {
return errors.New("not found")
}
}
return alloc.flush(db, triedb, blockhash)
}
// GenesisAccount is an account in the state of the genesis block. // GenesisAccount is an account in the state of the genesis block.
type GenesisAccount struct { type GenesisAccount struct {
Code []byte `json:"code,omitempty"` Code []byte `json:"code,omitempty"`
@ -444,7 +411,7 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
// ToBlock returns the genesis block according to genesis specification. // ToBlock returns the genesis block according to genesis specification.
func (g *Genesis) ToBlock() *types.Block { func (g *Genesis) ToBlock() *types.Block {
root, err := g.Alloc.deriveHash() root, err := g.Alloc.hash()
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -231,7 +231,7 @@ func TestReadWriteGenesisAlloc(t *testing.T) {
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
{2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}}, {2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}},
} }
hash, _ = alloc.deriveHash() hash, _ = alloc.hash()
) )
blob, _ := json.Marshal(alloc) blob, _ := json.Marshal(alloc)
rawdb.WriteGenesisStateSpec(db, hash, blob) rawdb.WriteGenesisStateSpec(db, hash, blob)

View File

@ -305,3 +305,38 @@ func ReadStateScheme(db ethdb.Reader) string {
} }
return HashScheme return HashScheme
} }
// ParseStateScheme checks if the specified state scheme is compatible with
// the stored state.
//
// - If the provided scheme is none, use the scheme consistent with persistent
// state, or fallback to hash-based scheme if state is empty.
//
// - If the provided scheme is hash, use hash-based scheme or error out if not
// compatible with persistent state scheme.
//
// - If the provided scheme is path: use path-based scheme or error out if not
// compatible with persistent state scheme.
func ParseStateScheme(provided string, disk ethdb.Database) (string, error) {
// If state scheme is not specified, use the scheme consistent
// with persistent state, or fallback to hash mode if database
// is empty.
stored := ReadStateScheme(disk)
if provided == "" {
if stored == "" {
// use default scheme for empty database, flip it when
// path mode is chosen as default
log.Info("State schema set to default", "scheme", "hash")
return HashScheme, nil
}
log.Info("State scheme set to already existing", "scheme", stored)
return stored, nil // reuse scheme of persistent scheme
}
// If state scheme is specified, ensure it's compatible with
// persistent state.
if stored == "" || provided == stored {
log.Info("State scheme set by user", "scheme", provided)
return provided, nil
}
return "", fmt.Errorf("incompatible state scheme, stored: %s, provided: %s", stored, provided)
}

View File

@ -200,7 +200,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
} }
batch.Reset() batch.Reset()
// Step into the future and delete and dangling side chains // Step into the future and delete any dangling side chains
if frozen > 0 { if frozen > 0 {
tip := frozen tip := frozen
for len(dangling) > 0 { for len(dangling) > 0 {

View File

@ -34,7 +34,7 @@ import (
"github.com/olekukonko/tablewriter" "github.com/olekukonko/tablewriter"
) )
// freezerdb is a database wrapper that enabled freezer data retrievals. // freezerdb is a database wrapper that enables freezer data retrievals.
type freezerdb struct { type freezerdb struct {
ancientRoot string ancientRoot string
ethdb.KeyValueStore ethdb.KeyValueStore
@ -141,7 +141,7 @@ func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error)
// Unlike other ancient-related methods, this method does not return // Unlike other ancient-related methods, this method does not return
// errNotSupported when invoked. // errNotSupported when invoked.
// The reason for this is that the caller might want to do several things: // The reason for this is that the caller might want to do several things:
// 1. Check if something is in freezer, // 1. Check if something is in the freezer,
// 2. If not, check leveldb. // 2. If not, check leveldb.
// //
// This will work, since the ancient-checks inside 'fn' will return errors, // This will work, since the ancient-checks inside 'fn' will return errors,
@ -209,7 +209,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
// of the freezer and database. Ensure that we don't shoot ourselves in the foot // of the freezer and database. Ensure that we don't shoot ourselves in the foot
// by serving up conflicting data, leading to both datastores getting corrupted. // by serving up conflicting data, leading to both datastores getting corrupted.
// //
// - If both the freezer and key-value store is empty (no genesis), we just // - If both the freezer and key-value store are empty (no genesis), we just
// initialized a new empty freezer, so everything's fine. // initialized a new empty freezer, so everything's fine.
// - If the key-value store is empty, but the freezer is not, we need to make // - If the key-value store is empty, but the freezer is not, we need to make
// sure the user's genesis matches the freezer. That will be checked in the // sure the user's genesis matches the freezer. That will be checked in the
@ -218,7 +218,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
// - If neither the key-value store nor the freezer is empty, cross validate // - If neither the key-value store nor the freezer is empty, cross validate
// the genesis hashes to make sure they are compatible. If they are, also // the genesis hashes to make sure they are compatible. If they are, also
// ensure that there's no gap between the freezer and subsequently leveldb. // ensure that there's no gap between the freezer and subsequently leveldb.
// - If the key-value store is not empty, but the freezer is we might just be // - If the key-value store is not empty, but the freezer is, we might just be
// upgrading to the freezer release, or we might have had a small chain and // upgrading to the freezer release, or we might have had a small chain and
// not frozen anything yet. Ensure that no blocks are missing yet from the // not frozen anything yet. Ensure that no blocks are missing yet from the
// key-value store, since that would mean we already had an old freezer. // key-value store, since that would mean we already had an old freezer.
@ -634,7 +634,7 @@ func printChainMetadata(db ethdb.KeyValueStore) {
fmt.Fprintf(os.Stderr, "\n\n") fmt.Fprintf(os.Stderr, "\n\n")
} }
// ReadChainMetadata returns a set of key/value pairs that contains informatin // ReadChainMetadata returns a set of key/value pairs that contains information
// about the database chain status. This can be used for diagnostic purposes // about the database chain status. This can be used for diagnostic purposes
// when investigating the state of the node. // when investigating the state of the node.
func ReadChainMetadata(db ethdb.KeyValueStore) [][]string { func ReadChainMetadata(db ethdb.KeyValueStore) [][]string {

View File

@ -23,7 +23,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb/pebble" "github.com/ethereum/go-ethereum/ethdb/pebble"
) )
// Pebble is unsuported on 32bit architecture // Pebble is unsupported on 32bit architecture
const PebbleEnabled = true const PebbleEnabled = true
// NewPebbleDBDatabase creates a persistent key-value database without a freezer // NewPebbleDBDatabase creates a persistent key-value database without a freezer

View File

@ -219,7 +219,7 @@ func (b *tableBatch) Put(key, value []byte) error {
return b.batch.Put(append([]byte(b.prefix), key...), value) return b.batch.Put(append([]byte(b.prefix), key...), value)
} }
// Delete inserts the a key removal into the batch for later committing. // Delete inserts a key removal into the batch for later committing.
func (b *tableBatch) Delete(key []byte) error { func (b *tableBatch) Delete(key []byte) error {
return b.batch.Delete(append([]byte(b.prefix), key...)) return b.batch.Delete(append([]byte(b.prefix), key...))
} }

View File

@ -364,11 +364,11 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou
func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan trieKV, out chan common.Hash) { func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan trieKV, out chan common.Hash) {
var nodeWriter trie.NodeWriteFunc var nodeWriter trie.NodeWriteFunc
if db != nil { if db != nil {
nodeWriter = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { nodeWriter = func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(db, owner, path, hash, blob, scheme) rawdb.WriteTrieNode(db, owner, path, hash, blob, scheme)
} }
} }
t := trie.NewStackTrieWithOwner(nodeWriter, owner) t := trie.NewStackTrie(nodeWriter)
for leaf := range in { for leaf := range in {
t.Update(leaf.key[:], leaf.value) t.Update(leaf.key[:], leaf.value)
} }

View File

@ -978,7 +978,7 @@ func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (boo
nodes = trienode.NewNodeSet(addrHash) nodes = trienode.NewNodeSet(addrHash)
slots = make(map[common.Hash][]byte) slots = make(map[common.Hash][]byte)
) )
stack := trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { stack := trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
nodes.AddNode(path, trienode.NewDeleted()) nodes.AddNode(path, trienode.NewDeleted())
size += common.StorageSize(len(path)) size += common.StorageSize(len(path))
}) })

View File

@ -37,7 +37,7 @@ var (
type triePrefetcher struct { type triePrefetcher struct {
db Database // Database to fetch trie nodes through db Database // Database to fetch trie nodes through
root common.Hash // Root hash of the account trie for metrics root common.Hash // Root hash of the account trie for metrics
fetches map[string]Trie // Partially or fully fetcher tries fetches map[string]Trie // Partially or fully fetched tries. Only populated for inactive copies.
fetchers map[string]*subfetcher // Subfetchers for each trie fetchers map[string]*subfetcher // Subfetchers for each trie
deliveryMissMeter metrics.Meter deliveryMissMeter metrics.Meter
@ -197,7 +197,10 @@ func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte
// trieID returns an unique trie identifier consists the trie owner and root hash. // trieID returns an unique trie identifier consists the trie owner and root hash.
func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string { func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string {
return string(append(owner.Bytes(), root.Bytes()...)) trieID := make([]byte, common.HashLength*2)
copy(trieID, owner.Bytes())
copy(trieID[common.HashLength:], root.Bytes())
return string(trieID)
} }
// subfetcher is a trie fetcher goroutine responsible for pulling entries for a // subfetcher is a trie fetcher goroutine responsible for pulling entries for a

View File

@ -24,7 +24,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
@ -169,7 +168,7 @@ func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, sta
if tx.Type() == types.BlobTxType { if tx.Type() == types.BlobTxType {
receipt.BlobGasUsed = uint64(len(tx.BlobHashes()) * params.BlobTxBlobGasPerBlob) receipt.BlobGasUsed = uint64(len(tx.BlobHashes()) * params.BlobTxBlobGasPerBlob)
receipt.BlobGasPrice = eip4844.CalcBlobFee(*evm.Context.ExcessBlobGas) receipt.BlobGasPrice = evm.Context.BlobBaseFee
} }
// If the transaction created a contract, store the creation address in the receipt. // If the transaction created a contract, store the creation address in the receipt.

View File

@ -24,7 +24,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
cmath "github.com/ethereum/go-ethereum/common/math" cmath "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
@ -248,7 +247,7 @@ func (st *StateTransition) buyGas() error {
balanceCheck.Add(balanceCheck, blobBalanceCheck) balanceCheck.Add(balanceCheck, blobBalanceCheck)
// Pay for blobGasUsed * actual blob fee // Pay for blobGasUsed * actual blob fee
blobFee := new(big.Int).SetUint64(blobGas) blobFee := new(big.Int).SetUint64(blobGas)
blobFee.Mul(blobFee, eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas)) blobFee.Mul(blobFee, st.evm.Context.BlobBaseFee)
mgval.Add(mgval, blobFee) mgval.Add(mgval, blobFee)
} }
} }
@ -329,7 +328,7 @@ func (st *StateTransition) preCheck() error {
if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) { if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) {
if st.blobGasUsed() > 0 { if st.blobGasUsed() > 0 {
// Check that the user is paying at least the current blob fee // Check that the user is paying at least the current blob fee
blobFee := eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas) blobFee := st.evm.Context.BlobBaseFee
if st.msg.BlobGasFeeCap.Cmp(blobFee) < 0 { if st.msg.BlobGasFeeCap.Cmp(blobFee) < 0 {
return fmt.Errorf("%w: address %v have %v want %v", ErrBlobFeeCapTooLow, st.msg.From.Hex(), st.msg.BlobGasFeeCap, blobFee) return fmt.Errorf("%w: address %v have %v want %v", ErrBlobFeeCapTooLow, st.msg.From.Hex(), st.msg.BlobGasFeeCap, blobFee)
} }

View File

@ -97,6 +97,8 @@ type blobTxMeta struct {
execTipCap *uint256.Int // Needed to prioritize inclusion order across accounts and validate replacement price bump execTipCap *uint256.Int // Needed to prioritize inclusion order across accounts and validate replacement price bump
execFeeCap *uint256.Int // Needed to validate replacement price bump execFeeCap *uint256.Int // Needed to validate replacement price bump
blobFeeCap *uint256.Int // Needed to validate replacement price bump blobFeeCap *uint256.Int // Needed to validate replacement price bump
execGas uint64 // Needed to check inclusion validity before reading the blob
blobGas uint64 // Needed to check inclusion validity before reading the blob
basefeeJumps float64 // Absolute number of 1559 fee adjustments needed to reach the tx's fee cap basefeeJumps float64 // Absolute number of 1559 fee adjustments needed to reach the tx's fee cap
blobfeeJumps float64 // Absolute number of 4844 fee adjustments needed to reach the tx's blob fee cap blobfeeJumps float64 // Absolute number of 4844 fee adjustments needed to reach the tx's blob fee cap
@ -118,6 +120,8 @@ func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta {
execTipCap: uint256.MustFromBig(tx.GasTipCap()), execTipCap: uint256.MustFromBig(tx.GasTipCap()),
execFeeCap: uint256.MustFromBig(tx.GasFeeCap()), execFeeCap: uint256.MustFromBig(tx.GasFeeCap()),
blobFeeCap: uint256.MustFromBig(tx.BlobGasFeeCap()), blobFeeCap: uint256.MustFromBig(tx.BlobGasFeeCap()),
execGas: tx.Gas(),
blobGas: tx.BlobGas(),
} }
meta.basefeeJumps = dynamicFeeJumps(meta.execFeeCap) meta.basefeeJumps = dynamicFeeJumps(meta.execFeeCap)
meta.blobfeeJumps = dynamicFeeJumps(meta.blobFeeCap) meta.blobfeeJumps = dynamicFeeJumps(meta.blobFeeCap)
@ -307,8 +311,8 @@ type BlobPool struct {
spent map[common.Address]*uint256.Int // Expenditure tracking for individual accounts spent map[common.Address]*uint256.Int // Expenditure tracking for individual accounts
evict *evictHeap // Heap of cheapest accounts for eviction when full evict *evictHeap // Heap of cheapest accounts for eviction when full
eventFeed event.Feed // Event feed to send out new tx events on pool inclusion discoverFeed event.Feed // Event feed to send out new tx events on pool discovery (reorg excluded)
eventScope event.SubscriptionScope // Event scope to track and mass unsubscribe on termination insertFeed event.Feed // Event feed to send out new tx events on pool inclusion (reorg included)
lock sync.RWMutex // Mutex protecting the pool during reorg handling lock sync.RWMutex // Mutex protecting the pool during reorg handling
} }
@ -436,8 +440,6 @@ func (p *BlobPool) Close() error {
if err := p.store.Close(); err != nil { if err := p.store.Close(); err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
p.eventScope.Close()
switch { switch {
case errs == nil: case errs == nil:
return nil return nil
@ -758,15 +760,21 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
// Run the reorg between the old and new head and figure out which accounts // Run the reorg between the old and new head and figure out which accounts
// need to be rechecked and which transactions need to be readded // need to be rechecked and which transactions need to be readded
if reinject, inclusions := p.reorg(oldHead, newHead); reinject != nil { if reinject, inclusions := p.reorg(oldHead, newHead); reinject != nil {
var adds []*types.Transaction
for addr, txs := range reinject { for addr, txs := range reinject {
// Blindly push all the lost transactions back into the pool // Blindly push all the lost transactions back into the pool
for _, tx := range txs { for _, tx := range txs {
p.reinject(addr, tx.Hash()) if err := p.reinject(addr, tx.Hash()); err == nil {
adds = append(adds, tx.WithoutBlobTxSidecar())
}
} }
// Recheck the account's pooled transactions to drop included and // Recheck the account's pooled transactions to drop included and
// invalidated one // invalidated one
p.recheck(addr, inclusions) p.recheck(addr, inclusions)
} }
if len(adds) > 0 {
p.insertFeed.Send(core.NewTxsEvent{Txs: adds})
}
} }
// Flush out any blobs from limbo that are older than the latest finality // Flush out any blobs from limbo that are older than the latest finality
if p.chain.Config().IsCancun(p.head.Number, p.head.Time) { if p.chain.Config().IsCancun(p.head.Number, p.head.Time) {
@ -921,13 +929,13 @@ func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address][]*
// Note, the method will not initialize the eviction cache values as those will // Note, the method will not initialize the eviction cache values as those will
// be done once for all transactions belonging to an account after all individual // be done once for all transactions belonging to an account after all individual
// transactions are injected back into the pool. // transactions are injected back into the pool.
func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) { func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error {
// Retrieve the associated blob from the limbo. Without the blobs, we cannot // Retrieve the associated blob from the limbo. Without the blobs, we cannot
// add the transaction back into the pool as it is not mineable. // add the transaction back into the pool as it is not mineable.
tx, err := p.limbo.pull(txhash) tx, err := p.limbo.pull(txhash)
if err != nil { if err != nil {
log.Error("Blobs unavailable, dropping reorged tx", "err", err) log.Error("Blobs unavailable, dropping reorged tx", "err", err)
return return err
} }
// TODO: seems like an easy optimization here would be getting the serialized tx // TODO: seems like an easy optimization here would be getting the serialized tx
// from limbo instead of re-serializing it here. // from limbo instead of re-serializing it here.
@ -936,12 +944,12 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) {
blob, err := rlp.EncodeToBytes(tx) blob, err := rlp.EncodeToBytes(tx)
if err != nil { if err != nil {
log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err) log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
return return err
} }
id, err := p.store.Put(blob) id, err := p.store.Put(blob)
if err != nil { if err != nil {
log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err) log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err)
return return err
} }
// Update the indixes and metrics // Update the indixes and metrics
@ -949,7 +957,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) {
if _, ok := p.index[addr]; !ok { if _, ok := p.index[addr]; !ok {
if err := p.reserve(addr, true); err != nil { if err := p.reserve(addr, true); err != nil {
log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err) log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err)
return return err
} }
p.index[addr] = []*blobTxMeta{meta} p.index[addr] = []*blobTxMeta{meta}
p.spent[addr] = meta.costCap p.spent[addr] = meta.costCap
@ -960,6 +968,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) {
} }
p.lookup[meta.hash] = meta.id p.lookup[meta.hash] = meta.id
p.stored += uint64(meta.size) p.stored += uint64(meta.size)
return nil
} }
// SetGasTip implements txpool.SubPool, allowing the blob pool's gas requirements // SetGasTip implements txpool.SubPool, allowing the blob pool's gas requirements
@ -1154,9 +1163,19 @@ func (p *BlobPool) Get(hash common.Hash) *types.Transaction {
// Add inserts a set of blob transactions into the pool if they pass validation (both // Add inserts a set of blob transactions into the pool if they pass validation (both
// consensus validity and pool restictions). // consensus validity and pool restictions).
func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error { func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
errs := make([]error, len(txs)) var (
adds = make([]*types.Transaction, 0, len(txs))
errs = make([]error, len(txs))
)
for i, tx := range txs { for i, tx := range txs {
errs[i] = p.add(tx) errs[i] = p.add(tx)
if errs[i] == nil {
adds = append(adds, tx.WithoutBlobTxSidecar())
}
}
if len(adds) > 0 {
p.discoverFeed.Send(core.NewTxsEvent{Txs: adds})
p.insertFeed.Send(core.NewTxsEvent{Txs: adds})
} }
return errs return errs
} }
@ -1384,6 +1403,8 @@ func (p *BlobPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTr
Time: time.Now(), // TODO(karalabe): Maybe save these and use that? Time: time.Now(), // TODO(karalabe): Maybe save these and use that?
GasFeeCap: tx.execFeeCap.ToBig(), GasFeeCap: tx.execFeeCap.ToBig(),
GasTipCap: tx.execTipCap.ToBig(), GasTipCap: tx.execTipCap.ToBig(),
Gas: tx.execGas,
BlobGas: tx.blobGas,
}) })
} }
if len(lazies) > 0 { if len(lazies) > 0 {
@ -1468,10 +1489,14 @@ func (p *BlobPool) updateLimboMetrics() {
limboSlotusedGauge.Update(int64(slotused)) limboSlotusedGauge.Update(int64(slotused))
} }
// SubscribeTransactions registers a subscription of NewTxsEvent and // SubscribeTransactions registers a subscription for new transaction events,
// starts sending event to the given channel. // supporting feeding only newly seen or also resurrected transactions.
func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription { func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
return p.eventScope.Track(p.eventFeed.Subscribe(ch)) if reorgs {
return p.insertFeed.Subscribe(ch)
} else {
return p.discoverFeed.Subscribe(ch)
}
} }
// Nonce returns the next nonce of an account, with all transactions executable // Nonce returns the next nonce of an account, with all transactions executable

View File

@ -208,7 +208,6 @@ type LegacyPool struct {
chain BlockChain chain BlockChain
gasTip atomic.Pointer[big.Int] gasTip atomic.Pointer[big.Int]
txFeed event.Feed txFeed event.Feed
scope event.SubscriptionScope
signer types.Signer signer types.Signer
mu sync.RWMutex mu sync.RWMutex
@ -404,9 +403,6 @@ func (pool *LegacyPool) loop() {
// Close terminates the transaction pool. // Close terminates the transaction pool.
func (pool *LegacyPool) Close() error { func (pool *LegacyPool) Close() error {
// Unsubscribe all subscriptions registered from txpool
pool.scope.Close()
// Terminate the pool reorger and return // Terminate the pool reorger and return
close(pool.reorgShutdownCh) close(pool.reorgShutdownCh)
pool.wg.Wait() pool.wg.Wait()
@ -425,10 +421,14 @@ func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) {
<-wait <-wait
} }
// SubscribeTransactions registers a subscription of NewTxsEvent and // SubscribeTransactions registers a subscription for new transaction events,
// starts sending event to the given channel. // supporting feeding only newly seen or also resurrected transactions.
func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription { func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
return pool.scope.Track(pool.txFeed.Subscribe(ch)) // The legacy pool has a very messed up internal shuffling, so it's kind of
// hard to separate newly discovered transaction from resurrected ones. This
// is because the new txs are added to the queue, resurrected ones too and
// reorgs run lazily, so separating the two would need a marker.
return pool.txFeed.Subscribe(ch)
} }
// SetGasTip updates the minimum gas tip required by the transaction pool for a // SetGasTip updates the minimum gas tip required by the transaction pool for a
@ -552,6 +552,8 @@ func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.L
Time: txs[i].Time(), Time: txs[i].Time(),
GasFeeCap: txs[i].GasFeeCap(), GasFeeCap: txs[i].GasFeeCap(),
GasTipCap: txs[i].GasTipCap(), GasTipCap: txs[i].GasTipCap(),
Gas: txs[i].Gas(),
BlobGas: txs[i].BlobGas(),
} }
} }
pending[addr] = lazies pending[addr] = lazies

View File

@ -205,7 +205,7 @@ func (m *sortedMap) Remove(nonce uint64) bool {
// removed from the list. // removed from the list.
// //
// Note, all transactions with nonces lower than start will also be returned to // Note, all transactions with nonces lower than start will also be returned to
// prevent getting into and invalid state. This is not something that should ever // prevent getting into an invalid state. This is not something that should ever
// happen but better to be self correcting than failing! // happen but better to be self correcting than failing!
func (m *sortedMap) Ready(start uint64) types.Transactions { func (m *sortedMap) Ready(start uint64) types.Transactions {
// Short circuit if no transactions are available // Short circuit if no transactions are available
@ -421,7 +421,7 @@ func (l *list) Remove(tx *types.Transaction) (bool, types.Transactions) {
// removed from the list. // removed from the list.
// //
// Note, all transactions with nonces lower than start will also be returned to // Note, all transactions with nonces lower than start will also be returned to
// prevent getting into and invalid state. This is not something that should ever // prevent getting into an invalid state. This is not something that should ever
// happen but better to be self correcting than failing! // happen but better to be self correcting than failing!
func (l *list) Ready(start uint64) types.Transactions { func (l *list) Ready(start uint64) types.Transactions {
txs := l.txs.Ready(start) txs := l.txs.Ready(start)

View File

@ -30,13 +30,16 @@ import (
// enough for the miner and other APIs to handle large batches of transactions; // enough for the miner and other APIs to handle large batches of transactions;
// and supports pulling up the entire transaction when really needed. // and supports pulling up the entire transaction when really needed.
type LazyTransaction struct { type LazyTransaction struct {
Pool SubPool // Transaction subpool to pull the real transaction up Pool LazyResolver // Transaction resolver to pull the real transaction up
Hash common.Hash // Transaction hash to pull up if needed Hash common.Hash // Transaction hash to pull up if needed
Tx *types.Transaction // Transaction if already resolved Tx *types.Transaction // Transaction if already resolved
Time time.Time // Time when the transaction was first seen Time time.Time // Time when the transaction was first seen
GasFeeCap *big.Int // Maximum fee per gas the transaction may consume GasFeeCap *big.Int // Maximum fee per gas the transaction may consume
GasTipCap *big.Int // Maximum miner tip per gas the transaction can pay GasTipCap *big.Int // Maximum miner tip per gas the transaction can pay
Gas uint64 // Amount of gas required by the transaction
BlobGas uint64 // Amount of blob gas required by the transaction
} }
// Resolve retrieves the full transaction belonging to a lazy handle if it is still // Resolve retrieves the full transaction belonging to a lazy handle if it is still
@ -48,6 +51,14 @@ func (ltx *LazyTransaction) Resolve() *types.Transaction {
return ltx.Tx return ltx.Tx
} }
// LazyResolver is a minimal interface needed for a transaction pool to satisfy
// resolving lazy transactions. It's mostly a helper to avoid the entire sub-
// pool being injected into the lazy transaction.
type LazyResolver interface {
// Get returns a transaction if it is contained in the pool, or nil otherwise.
Get(hash common.Hash) *types.Transaction
}
// AddressReserver is passed by the main transaction pool to subpools, so they // AddressReserver is passed by the main transaction pool to subpools, so they
// may request (and relinquish) exclusive access to certain addresses. // may request (and relinquish) exclusive access to certain addresses.
type AddressReserver func(addr common.Address, reserve bool) error type AddressReserver func(addr common.Address, reserve bool) error
@ -99,8 +110,10 @@ type SubPool interface {
// account and sorted by nonce. // account and sorted by nonce.
Pending(enforceTips bool) map[common.Address][]*LazyTransaction Pending(enforceTips bool) map[common.Address][]*LazyTransaction
// SubscribeTransactions subscribes to new transaction events. // SubscribeTransactions subscribes to new transaction events. The subscriber
SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription // can decide whether to receive notifications only for newly seen transactions
// or also for reorged out ones.
SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription
// Nonce returns the next nonce of an account, with all transactions executable // Nonce returns the next nonce of an account, with all transactions executable
// by the pool already applied on top. // by the pool already applied on top.

View File

@ -155,13 +155,15 @@ func (p *TxPool) Close() error {
if err := <-errc; err != nil { if err := <-errc; err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
// Terminate each subpool // Terminate each subpool
for _, subpool := range p.subpools { for _, subpool := range p.subpools {
if err := subpool.Close(); err != nil { if err := subpool.Close(); err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
} }
// Unsubscribe anyone still listening for tx events
p.subs.Close()
if len(errs) > 0 { if len(errs) > 0 {
return fmt.Errorf("subpool close errors: %v", errs) return fmt.Errorf("subpool close errors: %v", errs)
} }
@ -316,12 +318,12 @@ func (p *TxPool) Pending(enforceTips bool) map[common.Address][]*LazyTransaction
return txs return txs
} }
// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and starts sending // SubscribeTransactions registers a subscription for new transaction events,
// events to the given channel. // supporting feeding only newly seen or also resurrected transactions.
func (p *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { func (p *TxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
subs := make([]event.Subscription, len(p.subpools)) subs := make([]event.Subscription, len(p.subpools))
for i, subpool := range p.subpools { for i, subpool := range p.subpools {
subs[i] = subpool.SubscribeTransactions(ch) subs[i] = subpool.SubscribeTransactions(ch, reorgs)
} }
return p.subs.Track(event.JoinSubscriptions(subs...)) return p.subs.Track(event.JoinSubscriptions(subs...))
} }

View File

@ -95,7 +95,7 @@ type DerivableList interface {
func encodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte { func encodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte {
buf.Reset() buf.Reset()
list.EncodeIndex(i, buf) list.EncodeIndex(i, buf)
// It's really unfortunate that we need to do perform this copy. // It's really unfortunate that we need to perform this copy.
// StackTrie holds onto the values until Hash is called, so the values // StackTrie holds onto the values until Hash is called, so the values
// written to it must not alias. // written to it must not alias.
return common.CopyBytes(buf.Bytes()) return common.CopyBytes(buf.Bytes())

View File

@ -87,7 +87,7 @@ func SlimAccountRLP(account StateAccount) []byte {
return data return data
} }
// FullAccount decodes the data on the 'slim RLP' format and return // FullAccount decodes the data on the 'slim RLP' format and returns
// the consensus format account. // the consensus format account.
func FullAccount(data []byte) (*StateAccount, error) { func FullAccount(data []byte) (*StateAccount, error) {
var slim SlimAccount var slim SlimAccount

View File

@ -168,7 +168,7 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error {
} }
// UnmarshalBinary decodes the canonical encoding of transactions. // UnmarshalBinary decodes the canonical encoding of transactions.
// It supports legacy RLP transactions and EIP2718 typed transactions. // It supports legacy RLP transactions and EIP-2718 typed transactions.
func (tx *Transaction) UnmarshalBinary(b []byte) error { func (tx *Transaction) UnmarshalBinary(b []byte) error {
if len(b) > 0 && b[0] > 0x7f { if len(b) > 0 && b[0] > 0x7f {
// It's a legacy transaction. // It's a legacy transaction.
@ -180,7 +180,7 @@ func (tx *Transaction) UnmarshalBinary(b []byte) error {
tx.setDecoded(&data, uint64(len(b))) tx.setDecoded(&data, uint64(len(b)))
return nil return nil
} }
// It's an EIP2718 typed transaction envelope. // It's an EIP-2718 typed transaction envelope.
inner, err := tx.decodeTyped(b) inner, err := tx.decodeTyped(b)
if err != nil { if err != nil {
return err return err
@ -395,7 +395,7 @@ func (tx *Transaction) BlobGasFeeCap() *big.Int {
return nil return nil
} }
// BlobHashes returns the hases of the blob commitments for blob transactions, nil otherwise. // BlobHashes returns the hashes of the blob commitments for blob transactions, nil otherwise.
func (tx *Transaction) BlobHashes() []common.Hash { func (tx *Transaction) BlobHashes() []common.Hash {
if blobtx, ok := tx.inner.(*BlobTx); ok { if blobtx, ok := tx.inner.(*BlobTx); ok {
return blobtx.BlobHashes return blobtx.BlobHashes

View File

@ -57,7 +57,7 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint
} }
// LatestSigner returns the 'most permissive' Signer available for the given chain // LatestSigner returns the 'most permissive' Signer available for the given chain
// configuration. Specifically, this enables support of all types of transacrions // configuration. Specifically, this enables support of all types of transactions
// when their respective forks are scheduled to occur at any block number (or time) // when their respective forks are scheduled to occur at any block number (or time)
// in the chain config. // in the chain config.
// //

View File

@ -31,13 +31,13 @@ type ContractRef interface {
// AccountRef implements ContractRef. // AccountRef implements ContractRef.
// //
// Account references are used during EVM initialisation and // Account references are used during EVM initialisation and
// it's primary use is to fetch addresses. Removing this object // its primary use is to fetch addresses. Removing this object
// proves difficult because of the cached jump destinations which // proves difficult because of the cached jump destinations which
// are fetched from the parent contract (i.e. the caller), which // are fetched from the parent contract (i.e. the caller), which
// is a ContractRef. // is a ContractRef.
type AccountRef common.Address type AccountRef common.Address
// Address casts AccountRef to a Address // Address casts AccountRef to an Address
func (ar AccountRef) Address() common.Address { return (common.Address)(ar) } func (ar AccountRef) Address() common.Address { return (common.Address)(ar) }
// Contract represents an ethereum contract in the state database. It contains // Contract represents an ethereum contract in the state database. It contains

View File

@ -282,9 +282,15 @@ func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
return nil, nil return nil, nil
} }
// enable4844 applies EIP-4844 (DATAHASH opcode) // opBlobBaseFee implements BLOBBASEFEE opcode
func opBlobBaseFee(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
blobBaseFee, _ := uint256.FromBig(interpreter.evm.Context.BlobBaseFee)
scope.Stack.push(blobBaseFee)
return nil, nil
}
// enable4844 applies EIP-4844 (BLOBHASH opcode)
func enable4844(jt *JumpTable) { func enable4844(jt *JumpTable) {
// New opcode
jt[BLOBHASH] = &operation{ jt[BLOBHASH] = &operation{
execute: opBlobHash, execute: opBlobHash,
constantGas: GasFastestStep, constantGas: GasFastestStep,
@ -293,6 +299,16 @@ func enable4844(jt *JumpTable) {
} }
} }
// enable7516 applies EIP-7516 (BLOBBASEFEE opcode)
func enable7516(jt *JumpTable) {
jt[BLOBBASEFEE] = &operation{
execute: opBlobBaseFee,
constantGas: GasQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
}
}
// enable6780 applies EIP-6780 (deactivate SELFDESTRUCT) // enable6780 applies EIP-6780 (deactivate SELFDESTRUCT)
func enable6780(jt *JumpTable) { func enable6780(jt *JumpTable) {
jt[SELFDESTRUCT] = &operation{ jt[SELFDESTRUCT] = &operation{

View File

@ -73,8 +73,8 @@ type BlockContext struct {
Time uint64 // Provides information for TIME Time uint64 // Provides information for TIME
Difficulty *big.Int // Provides information for DIFFICULTY Difficulty *big.Int // Provides information for DIFFICULTY
BaseFee *big.Int // Provides information for BASEFEE BaseFee *big.Int // Provides information for BASEFEE
BlobBaseFee *big.Int // Provides information for BLOBBASEFEE
Random *common.Hash // Provides information for PREVRANDAO Random *common.Hash // Provides information for PREVRANDAO
ExcessBlobGas *uint64 // ExcessBlobGas field in the header, needed to compute the data
} }
// TxContext provides the EVM with information about a transaction. // TxContext provides the EVM with information about a transaction.

View File

@ -104,7 +104,7 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi
// Legacy rules should be applied if we are in Petersburg (removal of EIP-1283) // Legacy rules should be applied if we are in Petersburg (removal of EIP-1283)
// OR Constantinople is not active // OR Constantinople is not active
if evm.chainRules.IsPetersburg || !evm.chainRules.IsConstantinople { if evm.chainRules.IsPetersburg || !evm.chainRules.IsConstantinople {
// This checks for 3 scenario's and calculates gas accordingly: // This checks for 3 scenarios and calculates gas accordingly:
// //
// 1. From a zero-value address to a non-zero value (NEW VALUE) // 1. From a zero-value address to a non-zero value (NEW VALUE)
// 2. From a non-zero value address to a zero-value address (DELETE) // 2. From a non-zero value address to a zero-value address (DELETE)

View File

@ -45,7 +45,7 @@ type EVMInterpreter struct {
table *JumpTable table *JumpTable
hasher crypto.KeccakState // Keccak256 hasher instance shared across opcodes hasher crypto.KeccakState // Keccak256 hasher instance shared across opcodes
hasherBuf common.Hash // Keccak256 hasher result array shared aross opcodes hasherBuf common.Hash // Keccak256 hasher result array shared across opcodes
readOnly bool // Whether to throw on stateful modifications readOnly bool // Whether to throw on stateful modifications
returnData []byte // Last CALL's return data for subsequent reuse returnData []byte // Last CALL's return data for subsequent reuse

View File

@ -82,7 +82,8 @@ func validate(jt JumpTable) JumpTable {
func newCancunInstructionSet() JumpTable { func newCancunInstructionSet() JumpTable {
instructionSet := newShanghaiInstructionSet() instructionSet := newShanghaiInstructionSet()
enable4844(&instructionSet) // EIP-4844 (DATAHASH opcode) enable4844(&instructionSet) // EIP-4844 (BLOBHASH opcode)
enable7516(&instructionSet) // EIP-7516 (BLOBBASEFEE opcode)
enable1153(&instructionSet) // EIP-1153 "Transient Storage" enable1153(&instructionSet) // EIP-1153 "Transient Storage"
enable5656(&instructionSet) // EIP-5656 (MCOPY opcode) enable5656(&instructionSet) // EIP-5656 (MCOPY opcode)
enable6780(&instructionSet) // EIP-6780 SELFDESTRUCT only in same transaction enable6780(&instructionSet) // EIP-6780 SELFDESTRUCT only in same transaction

View File

@ -56,7 +56,7 @@ func LookupInstructionSet(rules params.Rules) (JumpTable, error) {
return newFrontierInstructionSet(), nil return newFrontierInstructionSet(), nil
} }
// Stack returns the mininum and maximum stack requirements. // Stack returns the minimum and maximum stack requirements.
func (op *operation) Stack() (int, int) { func (op *operation) Stack() (int, int) {
return op.minStack, op.maxStack return op.minStack, op.maxStack
} }

View File

@ -101,6 +101,7 @@ const (
SELFBALANCE OpCode = 0x47 SELFBALANCE OpCode = 0x47
BASEFEE OpCode = 0x48 BASEFEE OpCode = 0x48
BLOBHASH OpCode = 0x49 BLOBHASH OpCode = 0x49
BLOBBASEFEE OpCode = 0x4a
) )
// 0x50 range - 'storage' and execution. // 0x50 range - 'storage' and execution.
@ -287,6 +288,7 @@ var opCodeToString = map[OpCode]string{
SELFBALANCE: "SELFBALANCE", SELFBALANCE: "SELFBALANCE",
BASEFEE: "BASEFEE", BASEFEE: "BASEFEE",
BLOBHASH: "BLOBHASH", BLOBHASH: "BLOBHASH",
BLOBBASEFEE: "BLOBBASEFEE",
// 0x50 range - 'storage' and execution. // 0x50 range - 'storage' and execution.
POP: "POP", POP: "POP",
@ -444,6 +446,7 @@ var stringToOp = map[string]OpCode{
"CHAINID": CHAINID, "CHAINID": CHAINID,
"BASEFEE": BASEFEE, "BASEFEE": BASEFEE,
"BLOBHASH": BLOBHASH, "BLOBHASH": BLOBHASH,
"BLOBBASEFEE": BLOBBASEFEE,
"DELEGATECALL": DELEGATECALL, "DELEGATECALL": DELEGATECALL,
"STATICCALL": STATICCALL, "STATICCALL": STATICCALL,
"CODESIZE": CODESIZE, "CODESIZE": CODESIZE,

View File

@ -37,6 +37,7 @@ func NewEnv(cfg *Config) *vm.EVM {
Difficulty: cfg.Difficulty, Difficulty: cfg.Difficulty,
GasLimit: cfg.GasLimit, GasLimit: cfg.GasLimit,
BaseFee: cfg.BaseFee, BaseFee: cfg.BaseFee,
BlobBaseFee: cfg.BlobBaseFee,
Random: cfg.Random, Random: cfg.Random,
} }

View File

@ -44,6 +44,7 @@ type Config struct {
Debug bool Debug bool
EVMConfig vm.Config EVMConfig vm.Config
BaseFee *big.Int BaseFee *big.Int
BlobBaseFee *big.Int
BlobHashes []common.Hash BlobHashes []common.Hash
Random *common.Hash Random *common.Hash
@ -95,6 +96,9 @@ func setDefaults(cfg *Config) {
if cfg.BaseFee == nil { if cfg.BaseFee == nil {
cfg.BaseFee = big.NewInt(params.InitialBaseFee) cfg.BaseFee = big.NewInt(params.InitialBaseFee)
} }
if cfg.BlobBaseFee == nil {
cfg.BlobBaseFee = new(big.Int)
}
} }
// Execute executes the code using the input as call data during the execution. // Execute executes the code using the input as call data during the execution.

View File

@ -334,7 +334,7 @@ func (b *EthAPIBackend) TxPool() *txpool.TxPool {
} }
func (b *EthAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { func (b *EthAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return b.eth.txPool.SubscribeNewTxsEvent(ch) return b.eth.txPool.SubscribeTransactions(ch, true)
} }
func (b *EthAPIBackend) SyncProgress() ethereum.SyncProgress { func (b *EthAPIBackend) SyncProgress() ethereum.SyncProgress {

View File

@ -133,8 +133,12 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
scheme, err := rawdb.ParseStateScheme(config.StateScheme, chainDb)
if err != nil {
return nil, err
}
// Try to recover offline state pruning only in hash-based. // Try to recover offline state pruning only in hash-based.
if config.StateScheme == rawdb.HashScheme { if scheme == rawdb.HashScheme {
if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb); err != nil { if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb); err != nil {
log.Error("Failed to recover state", "error", err) log.Error("Failed to recover state", "error", err)
} }
@ -194,7 +198,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
SnapshotLimit: config.SnapshotCache, SnapshotLimit: config.SnapshotCache,
Preimages: config.Preimages, Preimages: config.Preimages,
StateHistory: config.StateHistory, StateHistory: config.StateHistory,
StateScheme: config.StateScheme, StateScheme: scheme,
} }
) )
// Override the chain config with provided settings. // Override the chain config with provided settings.

View File

@ -513,7 +513,7 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe
block, err := engine.ExecutableDataToBlock(params, versionedHashes, beaconRoot) block, err := engine.ExecutableDataToBlock(params, versionedHashes, beaconRoot)
if err != nil { if err != nil {
log.Warn("Invalid NewPayload params", "params", params, "error", err) log.Warn("Invalid NewPayload params", "params", params, "error", err)
return engine.PayloadStatusV1{Status: engine.INVALID}, nil return api.invalid(err, nil), nil
} }
// Stash away the last update to warn the user if the beacon client goes offline // Stash away the last update to warn the user if the beacon client goes offline
api.lastNewPayloadLock.Lock() api.lastNewPayloadLock.Lock()
@ -694,20 +694,21 @@ func (api *ConsensusAPI) checkInvalidAncestor(check common.Hash, head common.Has
} }
} }
// invalid returns a response "INVALID" with the latest valid hash supplied by latest or to the current head // invalid returns a response "INVALID" with the latest valid hash supplied by latest.
// if no latestValid block was provided.
func (api *ConsensusAPI) invalid(err error, latestValid *types.Header) engine.PayloadStatusV1 { func (api *ConsensusAPI) invalid(err error, latestValid *types.Header) engine.PayloadStatusV1 {
currentHash := api.eth.BlockChain().CurrentBlock().Hash() var currentHash *common.Hash
if latestValid != nil { if latestValid != nil {
if latestValid.Difficulty.BitLen() != 0 {
// Set latest valid hash to 0x0 if parent is PoW block // Set latest valid hash to 0x0 if parent is PoW block
currentHash = common.Hash{} currentHash = &common.Hash{}
if latestValid.Difficulty.BitLen() == 0 { } else {
// Otherwise set latest valid hash to parent hash // Otherwise set latest valid hash to parent hash
currentHash = latestValid.Hash() h := latestValid.Hash()
currentHash = &h
} }
} }
errorMsg := err.Error() errorMsg := err.Error()
return engine.PayloadStatusV1{Status: engine.INVALID, LatestValidHash: &currentHash, ValidationError: &errorMsg} return engine.PayloadStatusV1{Status: engine.INVALID, LatestValidHash: currentHash, ValidationError: &errorMsg}
} }
// heartbeat loops indefinitely, and checks if there have been beacon client updates // heartbeat loops indefinitely, and checks if there have been beacon client updates
@ -776,7 +777,7 @@ func (api *ConsensusAPI) ExchangeCapabilities([]string) []string {
// GetPayloadBodiesByHashV1 implements engine_getPayloadBodiesByHashV1 which allows for retrieval of a list // GetPayloadBodiesByHashV1 implements engine_getPayloadBodiesByHashV1 which allows for retrieval of a list
// of block bodies by the engine api. // of block bodies by the engine api.
func (api *ConsensusAPI) GetPayloadBodiesByHashV1(hashes []common.Hash) []*engine.ExecutionPayloadBodyV1 { func (api *ConsensusAPI) GetPayloadBodiesByHashV1(hashes []common.Hash) []*engine.ExecutionPayloadBodyV1 {
var bodies = make([]*engine.ExecutionPayloadBodyV1, len(hashes)) bodies := make([]*engine.ExecutionPayloadBodyV1, len(hashes))
for i, hash := range hashes { for i, hash := range hashes {
block := api.eth.BlockChain().GetBlockByHash(hash) block := api.eth.BlockChain().GetBlockByHash(hash)
bodies[i] = getBody(block) bodies[i] = getBody(block)

View File

@ -199,7 +199,7 @@ func (c *SimulatedBeacon) sealBlock(withdrawals []*types.Withdrawal) error {
func (c *SimulatedBeacon) loopOnDemand() { func (c *SimulatedBeacon) loopOnDemand() {
var ( var (
newTxs = make(chan core.NewTxsEvent) newTxs = make(chan core.NewTxsEvent)
sub = c.eth.TxPool().SubscribeNewTxsEvent(newTxs) sub = c.eth.TxPool().SubscribeTransactions(newTxs, true)
) )
defer sub.Unsubscribe() defer sub.Unsubscribe()

View File

@ -20,7 +20,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
@ -28,22 +28,26 @@ import (
) )
// FullSyncTester is an auxiliary service that allows Geth to perform full sync // FullSyncTester is an auxiliary service that allows Geth to perform full sync
// alone without consensus-layer attached. Users must specify a valid block as // alone without consensus-layer attached. Users must specify a valid block hash
// the sync target. This tester can be applied to different networks, no matter // as the sync target.
// it's pre-merge or post-merge, but only for full-sync. //
// This tester can be applied to different networks, no matter it's pre-merge or
// post-merge, but only for full-sync.
type FullSyncTester struct { type FullSyncTester struct {
api *ConsensusAPI stack *node.Node
block *types.Block backend *eth.Ethereum
target common.Hash
closed chan struct{} closed chan struct{}
wg sync.WaitGroup wg sync.WaitGroup
} }
// RegisterFullSyncTester registers the full-sync tester service into the node // RegisterFullSyncTester registers the full-sync tester service into the node
// stack for launching and stopping the service controlled by node. // stack for launching and stopping the service controlled by node.
func RegisterFullSyncTester(stack *node.Node, backend *eth.Ethereum, block *types.Block) (*FullSyncTester, error) { func RegisterFullSyncTester(stack *node.Node, backend *eth.Ethereum, target common.Hash) (*FullSyncTester, error) {
cl := &FullSyncTester{ cl := &FullSyncTester{
api: newConsensusAPIWithoutHeartbeat(backend), stack: stack,
block: block, backend: backend,
target: target,
closed: make(chan struct{}), closed: make(chan struct{}),
} }
stack.RegisterLifecycle(cl) stack.RegisterLifecycle(cl)
@ -56,29 +60,25 @@ func (tester *FullSyncTester) Start() error {
go func() { go func() {
defer tester.wg.Done() defer tester.wg.Done()
// Trigger beacon sync with the provided block hash as trusted
// chain head.
err := tester.backend.Downloader().BeaconDevSync(downloader.FullSync, tester.target, tester.closed)
if err != nil {
log.Info("Failed to trigger beacon sync", "err", err)
}
ticker := time.NewTicker(time.Second * 5) ticker := time.NewTicker(time.Second * 5)
defer ticker.Stop() defer ticker.Stop()
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
// Don't bother downloader in case it's already syncing. // Stop in case the target block is already stored locally.
if tester.api.eth.Downloader().Synchronising() { if block := tester.backend.BlockChain().GetBlockByHash(tester.target); block != nil {
continue log.Info("Full-sync target reached", "number", block.NumberU64(), "hash", block.Hash())
} go tester.stack.Close() // async since we need to close ourselves
// Short circuit in case the target block is already stored
// locally. TODO(somehow terminate the node stack if target
// is reached).
if tester.api.eth.BlockChain().HasBlock(tester.block.Hash(), tester.block.NumberU64()) {
log.Info("Full-sync target reached", "number", tester.block.NumberU64(), "hash", tester.block.Hash())
return return
} }
// Trigger beacon sync with the provided block header as
// trusted chain head.
err := tester.api.eth.Downloader().BeaconSync(downloader.FullSync, tester.block.Header(), tester.block.Header())
if err != nil {
log.Info("Failed to beacon sync", "err", err)
}
case <-tester.closed: case <-tester.closed:
return return

View File

@ -0,0 +1,81 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package downloader
import (
"errors"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
// BeaconDevSync is a development helper to test synchronization by providing
// a block hash instead of header to run the beacon sync against.
//
// The method will reach out to the network to retrieve the header of the sync
// target instead of receiving it from the consensus node.
//
// Note, this must not be used in live code. If the forkchcoice endpoint where
// to use this instead of giving us the payload first, then essentially nobody
// in the network would have the block yet that we'd attempt to retrieve.
func (d *Downloader) BeaconDevSync(mode SyncMode, hash common.Hash, stop chan struct{}) error {
// Be very loud that this code should not be used in a live node
log.Warn("----------------------------------")
log.Warn("Beacon syncing with hash as target", "hash", hash)
log.Warn("This is unhealthy for a live node!")
log.Warn("----------------------------------")
log.Info("Waiting for peers to retrieve sync target")
for {
// If the node is going down, unblock
select {
case <-stop:
return errors.New("stop requested")
default:
}
// Pick a random peer to sync from and keep retrying if none are yet
// available due to fresh startup
d.peers.lock.RLock()
var peer *peerConnection
for _, peer = range d.peers.peers {
break
}
d.peers.lock.RUnlock()
if peer == nil {
time.Sleep(time.Second)
continue
}
// Found a peer, attempt to retrieve the header whilst blocking and
// retry if it fails for whatever reason
log.Info("Attempting to retrieve sync target", "peer", peer.id)
headers, metas, err := d.fetchHeadersByHash(peer, hash, 1, 0, false)
if err != nil || len(headers) != 1 {
log.Warn("Failed to fetch sync target", "headers", len(headers), "err", err)
time.Sleep(time.Second)
continue
}
// Head header retrieved, if the hash matches, start the actual sync
if metas[0] != hash {
log.Error("Received invalid sync target", "want", hash, "have", metas[0])
time.Sleep(time.Second)
continue
}
return d.BeaconSync(mode, headers[0], headers[0])
}
}

View File

@ -286,11 +286,6 @@ func (d *Downloader) Progress() ethereum.SyncProgress {
} }
} }
// Synchronising returns whether the downloader is currently retrieving blocks.
func (d *Downloader) Synchronising() bool {
return d.synchronising.Load()
}
// RegisterPeer injects a new download peer into the set of block source to be // RegisterPeer injects a new download peer into the set of block source to be
// used for fetching hashes and blocks from. // used for fetching hashes and blocks from.
func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error { func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error {
@ -309,11 +304,6 @@ func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error {
return nil return nil
} }
// RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer.
func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error {
return d.RegisterPeer(id, version, &lightPeerWrapper{peer})
}
// UnregisterPeer remove a peer from the known list, preventing any action from // UnregisterPeer remove a peer from the known list, preventing any action from
// the specified peer. An effort is also made to return any pending fetches into // the specified peer. An effort is also made to return any pending fetches into
// the queue. // the queue.

View File

@ -177,7 +177,7 @@ func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header {
// function can be used to retrieve batches of headers from the particular peer. // function can be used to retrieve batches of headers from the particular peer.
func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
// Service the header query via the live handler code // Service the header query via the live handler code
rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, &eth.GetBlockHeadersPacket{ rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
Hash: origin, Hash: origin,
}, },
@ -205,7 +205,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i
} }
res := &eth.Response{ res := &eth.Response{
Req: req, Req: req,
Res: (*eth.BlockHeadersPacket)(&headers), Res: (*eth.BlockHeadersRequest)(&headers),
Meta: hashes, Meta: hashes,
Time: 1, Time: 1,
Done: make(chan error, 1), // Ignore the returned status Done: make(chan error, 1), // Ignore the returned status
@ -221,7 +221,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i
// function can be used to retrieve batches of headers from the particular peer. // function can be used to retrieve batches of headers from the particular peer.
func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
// Service the header query via the live handler code // Service the header query via the live handler code
rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, &eth.GetBlockHeadersPacket{ rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
Number: origin, Number: origin,
}, },
@ -249,7 +249,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int,
} }
res := &eth.Response{ res := &eth.Response{
Req: req, Req: req,
Res: (*eth.BlockHeadersPacket)(&headers), Res: (*eth.BlockHeadersRequest)(&headers),
Meta: hashes, Meta: hashes,
Time: 1, Time: 1,
Done: make(chan error, 1), // Ignore the returned status Done: make(chan error, 1), // Ignore the returned status
@ -286,7 +286,7 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et
} }
res := &eth.Response{ res := &eth.Response{
Req: req, Req: req,
Res: (*eth.BlockBodiesPacket)(&bodies), Res: (*eth.BlockBodiesResponse)(&bodies),
Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes}, Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes},
Time: 1, Time: 1,
Done: make(chan error, 1), // Ignore the returned status Done: make(chan error, 1), // Ignore the returned status
@ -317,7 +317,7 @@ func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *
} }
res := &eth.Response{ res := &eth.Response{
Req: req, Req: req,
Res: (*eth.ReceiptsPacket)(&receipts), Res: (*eth.ReceiptsResponse)(&receipts),
Meta: hashes, Meta: hashes,
Time: 1, Time: 1,
Done: make(chan error, 1), // Ignore the returned status Done: make(chan error, 1), // Ignore the returned status
@ -437,9 +437,9 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
} }
} }
func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) } func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) }
func TestCanonicalSynchronisation66Snap(t *testing.T) { testCanonSync(t, eth.ETH66, SnapSync) } func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) }
func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) } func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) }
func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) } func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) }
func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) } func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) }
func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) } func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) }
@ -461,8 +461,8 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if a large batch of blocks are being downloaded, it is throttled // Tests that if a large batch of blocks are being downloaded, it is throttled
// until the cached blocks are retrieved. // until the cached blocks are retrieved.
func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) } func TestThrottling68Full(t *testing.T) { testThrottling(t, eth.ETH68, FullSync) }
func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) } func TestThrottling68Snap(t *testing.T) { testThrottling(t, eth.ETH68, SnapSync) }
func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) } func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) }
func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) } func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) }
@ -543,9 +543,9 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
// Tests that simple synchronization against a forked chain works correctly. In // Tests that simple synchronization against a forked chain works correctly. In
// this test common ancestor lookup should *not* be short circuited, and a full // this test common ancestor lookup should *not* be short circuited, and a full
// binary search should be executed. // binary search should be executed.
func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) } func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) }
func TestForkedSync66Snap(t *testing.T) { testForkedSync(t, eth.ETH66, SnapSync) } func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) }
func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) } func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) }
func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) } func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) }
func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) } func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) }
func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) } func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) }
@ -573,9 +573,9 @@ func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that synchronising against a much shorter but much heavier fork works // Tests that synchronising against a much shorter but much heavier fork works
// currently and is not dropped. // currently and is not dropped.
func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) } func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) }
func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) } func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) }
func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) } func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) }
func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) } func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) }
func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) } func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) }
func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) } func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) }
@ -605,9 +605,9 @@ func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that chain forks are contained within a certain interval of the current // Tests that chain forks are contained within a certain interval of the current
// chain head, ensuring that malicious peers cannot waste resources by feeding // chain head, ensuring that malicious peers cannot waste resources by feeding
// long dead chains. // long dead chains.
func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) } func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) }
func TestBoundedForkedSync66Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, SnapSync) } func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) }
func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) } func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) }
func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) } func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) }
func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) } func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) }
func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) } func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) }
@ -636,14 +636,14 @@ func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that chain forks are contained within a certain interval of the current // Tests that chain forks are contained within a certain interval of the current
// chain head for short but heavy forks too. These are a bit special because they // chain head for short but heavy forks too. These are a bit special because they
// take different ancestor lookup paths. // take different ancestor lookup paths.
func TestBoundedHeavyForkedSync66Full(t *testing.T) { func TestBoundedHeavyForkedSync68Full(t *testing.T) {
testBoundedHeavyForkedSync(t, eth.ETH66, FullSync) testBoundedHeavyForkedSync(t, eth.ETH68, FullSync)
} }
func TestBoundedHeavyForkedSync66Snap(t *testing.T) { func TestBoundedHeavyForkedSync68Snap(t *testing.T) {
testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync) testBoundedHeavyForkedSync(t, eth.ETH68, SnapSync)
} }
func TestBoundedHeavyForkedSync66Light(t *testing.T) { func TestBoundedHeavyForkedSync68Light(t *testing.T) {
testBoundedHeavyForkedSync(t, eth.ETH66, LightSync) testBoundedHeavyForkedSync(t, eth.ETH68, LightSync)
} }
func TestBoundedHeavyForkedSync67Full(t *testing.T) { func TestBoundedHeavyForkedSync67Full(t *testing.T) {
testBoundedHeavyForkedSync(t, eth.ETH67, FullSync) testBoundedHeavyForkedSync(t, eth.ETH67, FullSync)
@ -678,9 +678,9 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
} }
// Tests that a canceled download wipes all previously accumulated state. // Tests that a canceled download wipes all previously accumulated state.
func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) } func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) }
func TestCancel66Snap(t *testing.T) { testCancel(t, eth.ETH66, SnapSync) } func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) }
func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) } func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) }
func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) } func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) }
func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) } func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) }
func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) } func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) }
@ -708,9 +708,9 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) {
} }
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test). // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) } func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) }
func TestMultiSynchronisation66Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, SnapSync) } func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) }
func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) } func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) }
func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) } func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) }
func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) } func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) }
func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) } func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) }
@ -735,9 +735,9 @@ func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
// Tests that synchronisations behave well in multi-version protocol environments // Tests that synchronisations behave well in multi-version protocol environments
// and not wreak havoc on other nodes in the network. // and not wreak havoc on other nodes in the network.
func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) } func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) }
func TestMultiProtoSynchronisation66Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH66, SnapSync) } func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) }
func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) } func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) }
func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) } func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) }
func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) } func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) }
func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) } func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) }
@ -750,7 +750,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
chain := testChainBase.shorten(blockCacheMaxItems - 15) chain := testChainBase.shorten(blockCacheMaxItems - 15)
// Create peers of every type // Create peers of every type
tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:]) tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:])
tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:]) tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:])
// Synchronise with the requested peer and make sure all blocks were retrieved // Synchronise with the requested peer and make sure all blocks were retrieved
@ -760,7 +760,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
assertOwnChain(t, tester, len(chain.blocks)) assertOwnChain(t, tester, len(chain.blocks))
// Check that no peers have been dropped off // Check that no peers have been dropped off
for _, version := range []int{66, 67} { for _, version := range []int{68, 67} {
peer := fmt.Sprintf("peer %d", version) peer := fmt.Sprintf("peer %d", version)
if _, ok := tester.peers[peer]; !ok { if _, ok := tester.peers[peer]; !ok {
t.Errorf("%s dropped", peer) t.Errorf("%s dropped", peer)
@ -770,9 +770,9 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if a block is empty (e.g. header only), no body request should be // Tests that if a block is empty (e.g. header only), no body request should be
// made, and instead the header should be assembled into a whole block in itself. // made, and instead the header should be assembled into a whole block in itself.
func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) } func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) }
func TestEmptyShortCircuit66Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, SnapSync) } func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) }
func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) } func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) }
func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) } func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) }
func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) } func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) }
func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) } func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) }
@ -821,9 +821,9 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
// Tests that headers are enqueued continuously, preventing malicious nodes from // Tests that headers are enqueued continuously, preventing malicious nodes from
// stalling the downloader by feeding gapped header chains. // stalling the downloader by feeding gapped header chains.
func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) } func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) }
func TestMissingHeaderAttack66Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, SnapSync) } func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) }
func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) } func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) }
func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) } func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) }
func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) } func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) }
func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) } func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) }
@ -850,9 +850,9 @@ func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if requested headers are shifted (i.e. first is missing), the queue // Tests that if requested headers are shifted (i.e. first is missing), the queue
// detects the invalid numbering. // detects the invalid numbering.
func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) } func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) }
func TestShiftedHeaderAttack66Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) } func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) }
func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) } func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) }
func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) } func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) }
func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) } func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) }
func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) } func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) }
@ -880,14 +880,14 @@ func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
// Tests that a peer advertising a high TD doesn't get to stall the downloader // Tests that a peer advertising a high TD doesn't get to stall the downloader
// afterwards by not sending any useful hashes. // afterwards by not sending any useful hashes.
func TestHighTDStarvationAttack66Full(t *testing.T) { func TestHighTDStarvationAttack68Full(t *testing.T) {
testHighTDStarvationAttack(t, eth.ETH66, FullSync) testHighTDStarvationAttack(t, eth.ETH68, FullSync)
} }
func TestHighTDStarvationAttack66Snap(t *testing.T) { func TestHighTDStarvationAttack68Snap(t *testing.T) {
testHighTDStarvationAttack(t, eth.ETH66, SnapSync) testHighTDStarvationAttack(t, eth.ETH68, SnapSync)
} }
func TestHighTDStarvationAttack66Light(t *testing.T) { func TestHighTDStarvationAttack68Light(t *testing.T) {
testHighTDStarvationAttack(t, eth.ETH66, LightSync) testHighTDStarvationAttack(t, eth.ETH68, LightSync)
} }
func TestHighTDStarvationAttack67Full(t *testing.T) { func TestHighTDStarvationAttack67Full(t *testing.T) {
testHighTDStarvationAttack(t, eth.ETH67, FullSync) testHighTDStarvationAttack(t, eth.ETH67, FullSync)
@ -911,7 +911,7 @@ func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
} }
// Tests that misbehaving peers are disconnected, whilst behaving ones are not. // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) } func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) }
func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) } func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) }
func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
@ -960,9 +960,9 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
// Tests that synchronisation progress (origin block number, current block number // Tests that synchronisation progress (origin block number, current block number
// and highest block number) is tracked and updated correctly. // and highest block number) is tracked and updated correctly.
func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) } func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) }
func TestSyncProgress66Snap(t *testing.T) { testSyncProgress(t, eth.ETH66, SnapSync) } func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) }
func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) } func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) }
func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) } func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) }
func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) } func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) }
func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) } func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) }
@ -1040,9 +1040,9 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync
// Tests that synchronisation progress (origin block number and highest block // Tests that synchronisation progress (origin block number and highest block
// number) is tracked and updated correctly in case of a fork (or manual head // number) is tracked and updated correctly in case of a fork (or manual head
// revertal). // revertal).
func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) } func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) }
func TestForkedSyncProgress66Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, SnapSync) } func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) }
func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) } func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) }
func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) } func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) }
func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) } func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) }
func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) } func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) }
@ -1114,9 +1114,9 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if synchronisation is aborted due to some failure, then the progress // Tests that if synchronisation is aborted due to some failure, then the progress
// origin is not updated in the next sync cycle, as it should be considered the // origin is not updated in the next sync cycle, as it should be considered the
// continuation of the previous sync and not a new instance. // continuation of the previous sync and not a new instance.
func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) } func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) }
func TestFailedSyncProgress66Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, SnapSync) } func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) }
func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) } func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) }
func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) } func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) }
func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) } func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) }
func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) } func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) }
@ -1183,9 +1183,9 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if an attacker fakes a chain height, after the attack is detected, // Tests that if an attacker fakes a chain height, after the attack is detected,
// the progress height is successfully reduced at the next sync invocation. // the progress height is successfully reduced at the next sync invocation.
func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) } func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) }
func TestFakedSyncProgress66Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, SnapSync) } func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) }
func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) } func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) }
func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) } func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) }
func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) } func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) }
func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) } func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) }
@ -1330,8 +1330,10 @@ func TestRemoteHeaderRequestSpan(t *testing.T) {
// Tests that peers below a pre-configured checkpoint block are prevented from // Tests that peers below a pre-configured checkpoint block are prevented from
// being fast-synced from, avoiding potential cheap eclipse attacks. // being fast-synced from, avoiding potential cheap eclipse attacks.
func TestBeaconSync66Full(t *testing.T) { testBeaconSync(t, eth.ETH66, FullSync) } func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) }
func TestBeaconSync66Snap(t *testing.T) { testBeaconSync(t, eth.ETH66, SnapSync) } func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) }
func TestBeaconSync67Full(t *testing.T) { testBeaconSync(t, eth.ETH67, FullSync) }
func TestBeaconSync67Snap(t *testing.T) { testBeaconSync(t, eth.ETH67, SnapSync) }
func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) {
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))

View File

@ -58,14 +58,14 @@ func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amo
case res := <-resCh: case res := <-resCh:
// Headers successfully retrieved, update the metrics // Headers successfully retrieved, update the metrics
headerReqTimer.Update(time.Since(start)) headerReqTimer.Update(time.Since(start))
headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest))))
// Don't reject the packet even if it turns out to be bad, downloader will // Don't reject the packet even if it turns out to be bad, downloader will
// disconnect the peer on its own terms. Simply delivery the headers to // disconnect the peer on its own terms. Simply delivery the headers to
// be processed by the caller // be processed by the caller
res.Done <- nil res.Done <- nil
return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil
} }
} }
@ -103,13 +103,13 @@ func (d *Downloader) fetchHeadersByNumber(p *peerConnection, number uint64, amou
case res := <-resCh: case res := <-resCh:
// Headers successfully retrieved, update the metrics // Headers successfully retrieved, update the metrics
headerReqTimer.Update(time.Since(start)) headerReqTimer.Update(time.Since(start))
headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest))))
// Don't reject the packet even if it turns out to be bad, downloader will // Don't reject the packet even if it turns out to be bad, downloader will
// disconnect the peer on its own terms. Simply delivery the headers to // disconnect the peer on its own terms. Simply delivery the headers to
// be processed by the caller // be processed by the caller
res.Done <- nil res.Done <- nil
return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil
} }
} }

View File

@ -89,7 +89,7 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan
// deliver is responsible for taking a generic response packet from the concurrent // deliver is responsible for taking a generic response packet from the concurrent
// fetcher, unpacking the body data and delivering it to the downloader's queue. // fetcher, unpacking the body data and delivering it to the downloader's queue.
func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesPacket).Unpack() txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesResponse).Unpack()
hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes} hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes}
accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2]) accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2])

View File

@ -81,7 +81,7 @@ func (q *headerQueue) request(peer *peerConnection, req *fetchRequest, resCh cha
// deliver is responsible for taking a generic response packet from the concurrent // deliver is responsible for taking a generic response packet from the concurrent
// fetcher, unpacking the header data and delivering it to the downloader's queue. // fetcher, unpacking the header data and delivering it to the downloader's queue.
func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
headers := *packet.Res.(*eth.BlockHeadersPacket) headers := *packet.Res.(*eth.BlockHeadersRequest)
hashes := packet.Meta.([]common.Hash) hashes := packet.Meta.([]common.Hash)
accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh) accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh)

View File

@ -88,7 +88,7 @@ func (q *receiptQueue) request(peer *peerConnection, req *fetchRequest, resCh ch
// deliver is responsible for taking a generic response packet from the concurrent // deliver is responsible for taking a generic response packet from the concurrent
// fetcher, unpacking the receipt data and delivering it to the downloader's queue. // fetcher, unpacking the receipt data and delivering it to the downloader's queue.
func (q *receiptQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { func (q *receiptQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
receipts := *packet.Res.(*eth.ReceiptsPacket) receipts := *packet.Res.(*eth.ReceiptsResponse)
hashes := packet.Meta.([]common.Hash) // {receipt hashes} hashes := packet.Meta.([]common.Hash) // {receipt hashes}
accepted, err := q.queue.DeliverReceipts(peer.id, receipts, hashes) accepted, err := q.queue.DeliverReceipts(peer.id, receipts, hashes)

View File

@ -55,39 +55,16 @@ type peerConnection struct {
lock sync.RWMutex lock sync.RWMutex
} }
// LightPeer encapsulates the methods required to synchronise with a remote light peer. // Peer encapsulates the methods required to synchronise with a remote full peer.
type LightPeer interface { type Peer interface {
Head() (common.Hash, *big.Int) Head() (common.Hash, *big.Int)
RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error) RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error)
RequestHeadersByNumber(uint64, int, int, bool, chan *eth.Response) (*eth.Request, error) RequestHeadersByNumber(uint64, int, int, bool, chan *eth.Response) (*eth.Request, error)
}
// Peer encapsulates the methods required to synchronise with a remote full peer.
type Peer interface {
LightPeer
RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error)
RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error)
} }
// lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods.
type lightPeerWrapper struct {
peer LightPeer
}
func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() }
func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
return w.peer.RequestHeadersByHash(h, amount, skip, reverse, sink)
}
func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
return w.peer.RequestHeadersByNumber(i, amount, skip, reverse, sink)
}
func (w *lightPeerWrapper) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) {
panic("RequestBodies not supported in light client mode sync")
}
func (w *lightPeerWrapper) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) {
panic("RequestReceipts not supported in light client mode sync")
}
// newPeerConnection creates a new downloader peer. // newPeerConnection creates a new downloader peer.
func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection { func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection {
return &peerConnection{ return &peerConnection{

View File

@ -794,7 +794,7 @@ func (s *skeleton) executeTask(peer *peerConnection, req *headerRequest) {
case res := <-resCh: case res := <-resCh:
// Headers successfully retrieved, update the metrics // Headers successfully retrieved, update the metrics
headers := *res.Res.(*eth.BlockHeadersPacket) headers := *res.Res.(*eth.BlockHeadersRequest)
headerReqTimer.Update(time.Since(start)) headerReqTimer.Update(time.Since(start))
s.peers.rates.Update(peer.id, eth.BlockHeadersMsg, res.Time, len(headers)) s.peers.rates.Update(peer.id, eth.BlockHeadersMsg, res.Time, len(headers))

View File

@ -173,7 +173,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski
} }
res := &eth.Response{ res := &eth.Response{
Req: req, Req: req,
Res: (*eth.BlockHeadersPacket)(&headers), Res: (*eth.BlockHeadersRequest)(&headers),
Meta: hashes, Meta: hashes,
Time: 1, Time: 1,
Done: make(chan error), Done: make(chan error),
@ -811,7 +811,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
// Create a peer set to feed headers through // Create a peer set to feed headers through
peerset := newPeerSet() peerset := newPeerSet()
for _, peer := range tt.peers { for _, peer := range tt.peers {
peerset.Register(newPeerConnection(peer.id, eth.ETH66, peer, log.New("id", peer.id))) peerset.Register(newPeerConnection(peer.id, eth.ETH67, peer, log.New("id", peer.id)))
} }
// Create a peer dropper to track malicious peers // Create a peer dropper to track malicious peers
dropped := make(map[string]int) dropped := make(map[string]int)
@ -913,7 +913,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
skeleton.Sync(tt.newHead, nil, true) skeleton.Sync(tt.newHead, nil, true)
} }
if tt.newPeer != nil { if tt.newPeer != nil {
if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH67, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil {
t.Errorf("test %d: failed to register new peer: %v", i, err) t.Errorf("test %d: failed to register new peer: %v", i, err)
} }
} }

View File

@ -27,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool/blobpool" "github.com/ethereum/go-ethereum/core/txpool/blobpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
@ -65,7 +64,6 @@ var Defaults = Config{
TxLookupLimit: 2350000, TxLookupLimit: 2350000,
TransactionHistory: 2350000, TransactionHistory: 2350000,
StateHistory: params.FullImmutabilityThreshold, StateHistory: params.FullImmutabilityThreshold,
StateScheme: rawdb.HashScheme,
LightPeers: 100, LightPeers: 100,
DatabaseCache: 512, DatabaseCache: 512,
TrieCleanCache: 154, TrieCleanCache: 154,
@ -84,7 +82,7 @@ var Defaults = Config{
//go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go //go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go
// Config contains configuration options for of the ETH and LES protocols. // Config contains configuration options for ETH and LES protocols.
type Config struct { type Config struct {
// The genesis block, which is inserted if the database is empty. // The genesis block, which is inserted if the database is empty.
// If nil, the Ethereum main net block is used. // If nil, the Ethereum main net block is used.
@ -106,7 +104,11 @@ type Config struct {
TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
TransactionHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. TransactionHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
StateHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose state histories are reserved. StateHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose state histories are reserved.
StateScheme string `toml:",omitempty"` // State scheme used to store ethereum state and merkle trie nodes on top
// State scheme represents the scheme used to store ethereum states and trie
// nodes on top. It can be 'hash', 'path', or none which means use the scheme
// consistent with persistent state.
StateScheme string `toml:",omitempty"`
// RequiredBlocks is a set of block number -> hash mappings which must be in the // RequiredBlocks is a set of block number -> hash mappings which must be in the
// canonical chain of all remote peers. Setting the option makes geth verify the // canonical chain of all remote peers. Setting the option makes geth verify the
@ -184,7 +186,7 @@ func CreateConsensusEngine(config *params.ChainConfig, db ethdb.Database) (conse
return beacon.New(clique.New(config.Clique, db)), nil return beacon.New(clique.New(config.Clique, db)), nil
} }
// If defaulting to proof-of-work, enforce an already merged network since // If defaulting to proof-of-work, enforce an already merged network since
// we cannot run PoW algorithms and more, so we cannot even follow a chain // we cannot run PoW algorithms anymore, so we cannot even follow a chain
// not coordinated by a beacon node. // not coordinated by a beacon node.
if !config.TerminalTotalDifficultyPassed { if !config.TerminalTotalDifficultyPassed {
return nil, errors.New("ethash is only supported as a historical component of already merged networks") return nil, errors.New("ethash is only supported as a historical component of already merged networks")

View File

@ -483,7 +483,7 @@ func (f *BlockFetcher) loop() {
select { select {
case res := <-resCh: case res := <-resCh:
res.Done <- nil res.Done <- nil
f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersPacket), time.Now().Add(res.Time)) f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersRequest), time.Now().Add(res.Time))
case <-timeout.C: case <-timeout.C:
// The peer didn't respond in time. The request // The peer didn't respond in time. The request
@ -541,7 +541,7 @@ func (f *BlockFetcher) loop() {
case res := <-resCh: case res := <-resCh:
res.Done <- nil res.Done <- nil
// Ignoring withdrawals here, since the block fetcher is not used post-merge. // Ignoring withdrawals here, since the block fetcher is not used post-merge.
txs, uncles, _ := res.Res.(*eth.BlockBodiesPacket).Unpack() txs, uncles, _ := res.Res.(*eth.BlockBodiesResponse).Unpack()
f.FilterBodies(peer, txs, uncles, time.Now()) f.FilterBodies(peer, txs, uncles, time.Now())
case <-timeout.C: case <-timeout.C:

View File

@ -213,7 +213,7 @@ func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*t
} }
res := &eth.Response{ res := &eth.Response{
Req: req, Req: req,
Res: (*eth.BlockHeadersPacket)(&headers), Res: (*eth.BlockHeadersRequest)(&headers),
Time: drift, Time: drift,
Done: make(chan error, 1), // Ignore the returned status Done: make(chan error, 1), // Ignore the returned status
} }
@ -255,7 +255,7 @@ func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*typ
} }
res := &eth.Response{ res := &eth.Response{
Req: req, Req: req,
Res: (*eth.BlockBodiesPacket)(&bodies), Res: (*eth.BlockBodiesResponse)(&bodies),
Time: drift, Time: drift,
Done: make(chan error, 1), // Ignore the returned status Done: make(chan error, 1), // Ignore the returned status
} }

View File

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
"errors" "errors"
"fmt" "fmt"
"math"
mrand "math/rand" mrand "math/rand"
"sort" "sort"
"time" "time"
@ -38,16 +39,22 @@ const (
// can announce in a short time. // can announce in a short time.
maxTxAnnounces = 4096 maxTxAnnounces = 4096
// maxTxRetrievals is the maximum transaction number can be fetched in one // maxTxRetrievals is the maximum number of transactions that can be fetched
// request. The rationale to pick 256 is: // in one request. The rationale for picking 256 is to have a reasonabe lower
// - In eth protocol, the softResponseLimit is 2MB. Nowadays according to // bound for the transferred data (don't waste RTTs, transfer more meaningful
// Etherscan the average transaction size is around 200B, so in theory // batch sizes), but also have an upper bound on the sequentiality to allow
// we can include lots of transaction in a single protocol packet. // using our entire peerset for deliveries.
// - However the maximum size of a single transaction is raised to 128KB, //
// so pick a middle value here to ensure we can maximize the efficiency // This number also acts as a failsafe against malicious announces which might
// of the retrieval and response size overflow won't happen in most cases. // cause us to request more data than we'd expect.
maxTxRetrievals = 256 maxTxRetrievals = 256
// maxTxRetrievalSize is the max number of bytes that delivered transactions
// should weigh according to the announcements. The 128KB was chosen to limit
// retrieving a maximum of one blob transaction at a time to minimize hogging
// a connection between two peers.
maxTxRetrievalSize = 128 * 1024
// maxTxUnderpricedSetSize is the size of the underpriced transaction set that // maxTxUnderpricedSetSize is the size of the underpriced transaction set that
// is used to track recent transactions that have been dropped so we don't // is used to track recent transactions that have been dropped so we don't
// re-request them. // re-request them.
@ -105,6 +112,14 @@ var (
type txAnnounce struct { type txAnnounce struct {
origin string // Identifier of the peer originating the notification origin string // Identifier of the peer originating the notification
hashes []common.Hash // Batch of transaction hashes being announced hashes []common.Hash // Batch of transaction hashes being announced
metas []*txMetadata // Batch of metadatas associated with the hashes (nil before eth/68)
}
// txMetadata is a set of extra data transmitted along the announcement for better
// fetch scheduling.
type txMetadata struct {
kind byte // Transaction consensus type
size uint32 // Transaction size in bytes
} }
// txRequest represents an in-flight transaction retrieval request destined to // txRequest represents an in-flight transaction retrieval request destined to
@ -120,6 +135,7 @@ type txRequest struct {
type txDelivery struct { type txDelivery struct {
origin string // Identifier of the peer originating the notification origin string // Identifier of the peer originating the notification
hashes []common.Hash // Batch of transaction hashes having been delivered hashes []common.Hash // Batch of transaction hashes having been delivered
metas []txMetadata // Batch of metadatas associated with the delivered hashes
direct bool // Whether this is a direct reply or a broadcast direct bool // Whether this is a direct reply or a broadcast
} }
@ -157,11 +173,11 @@ type TxFetcher struct {
// broadcast without needing explicit request/reply round trips. // broadcast without needing explicit request/reply round trips.
waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast
waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist
waitslots map[string]map[common.Hash]struct{} // Waiting announcements grouped by peer (DoS protection) waitslots map[string]map[common.Hash]*txMetadata // Waiting announcements grouped by peer (DoS protection)
// Stage 2: Queue of transactions that waiting to be allocated to some peer // Stage 2: Queue of transactions that waiting to be allocated to some peer
// to be retrieved directly. // to be retrieved directly.
announces map[string]map[common.Hash]struct{} // Set of announced transactions, grouped by origin peer announces map[string]map[common.Hash]*txMetadata // Set of announced transactions, grouped by origin peer
announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash
// Stage 3: Set of transactions currently being retrieved, some which may be // Stage 3: Set of transactions currently being retrieved, some which may be
@ -175,6 +191,7 @@ type TxFetcher struct {
hasTx func(common.Hash) bool // Retrieves a tx from the local txpool hasTx func(common.Hash) bool // Retrieves a tx from the local txpool
addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool
fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer
dropPeer func(string) // Drops a peer in case of announcement violation
step chan struct{} // Notification channel when the fetcher loop iterates step chan struct{} // Notification channel when the fetcher loop iterates
clock mclock.Clock // Time wrapper to simulate in tests clock mclock.Clock // Time wrapper to simulate in tests
@ -183,14 +200,14 @@ type TxFetcher struct {
// NewTxFetcher creates a transaction fetcher to retrieve transaction // NewTxFetcher creates a transaction fetcher to retrieve transaction
// based on hash announcements. // based on hash announcements.
func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher { func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string)) *TxFetcher {
return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil) return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, dropPeer, mclock.System{}, nil)
} }
// NewTxFetcherForTests is a testing method to mock out the realtime clock with // NewTxFetcherForTests is a testing method to mock out the realtime clock with
// a simulated version and the internal randomness with a deterministic one. // a simulated version and the internal randomness with a deterministic one.
func NewTxFetcherForTests( func NewTxFetcherForTests(
hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string),
clock mclock.Clock, rand *mrand.Rand) *TxFetcher { clock mclock.Clock, rand *mrand.Rand) *TxFetcher {
return &TxFetcher{ return &TxFetcher{
notify: make(chan *txAnnounce), notify: make(chan *txAnnounce),
@ -199,8 +216,8 @@ func NewTxFetcherForTests(
quit: make(chan struct{}), quit: make(chan struct{}),
waitlist: make(map[common.Hash]map[string]struct{}), waitlist: make(map[common.Hash]map[string]struct{}),
waittime: make(map[common.Hash]mclock.AbsTime), waittime: make(map[common.Hash]mclock.AbsTime),
waitslots: make(map[string]map[common.Hash]struct{}), waitslots: make(map[string]map[common.Hash]*txMetadata),
announces: make(map[string]map[common.Hash]struct{}), announces: make(map[string]map[common.Hash]*txMetadata),
announced: make(map[common.Hash]map[string]struct{}), announced: make(map[common.Hash]map[string]struct{}),
fetching: make(map[common.Hash]string), fetching: make(map[common.Hash]string),
requests: make(map[string]*txRequest), requests: make(map[string]*txRequest),
@ -209,6 +226,7 @@ func NewTxFetcherForTests(
hasTx: hasTx, hasTx: hasTx,
addTxs: addTxs, addTxs: addTxs,
fetchTxs: fetchTxs, fetchTxs: fetchTxs,
dropPeer: dropPeer,
clock: clock, clock: clock,
rand: rand, rand: rand,
} }
@ -216,7 +234,7 @@ func NewTxFetcherForTests(
// Notify announces the fetcher of the potential availability of a new batch of // Notify announces the fetcher of the potential availability of a new batch of
// transactions in the network. // transactions in the network.
func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error { func (f *TxFetcher) Notify(peer string, types []byte, sizes []uint32, hashes []common.Hash) error {
// Keep track of all the announced transactions // Keep track of all the announced transactions
txAnnounceInMeter.Mark(int64(len(hashes))) txAnnounceInMeter.Mark(int64(len(hashes)))
@ -226,28 +244,35 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
// still valuable to check here because it runs concurrent to the internal // still valuable to check here because it runs concurrent to the internal
// loop, so anything caught here is time saved internally. // loop, so anything caught here is time saved internally.
var ( var (
unknowns = make([]common.Hash, 0, len(hashes)) unknownHashes = make([]common.Hash, 0, len(hashes))
unknownMetas = make([]*txMetadata, 0, len(hashes))
duplicate int64 duplicate int64
underpriced int64 underpriced int64
) )
for _, hash := range hashes { for i, hash := range hashes {
switch { switch {
case f.hasTx(hash): case f.hasTx(hash):
duplicate++ duplicate++
case f.isKnownUnderpriced(hash): case f.isKnownUnderpriced(hash):
underpriced++ underpriced++
default: default:
unknowns = append(unknowns, hash) unknownHashes = append(unknownHashes, hash)
if types == nil {
unknownMetas = append(unknownMetas, nil)
} else {
unknownMetas = append(unknownMetas, &txMetadata{kind: types[i], size: sizes[i]})
}
} }
} }
txAnnounceKnownMeter.Mark(duplicate) txAnnounceKnownMeter.Mark(duplicate)
txAnnounceUnderpricedMeter.Mark(underpriced) txAnnounceUnderpricedMeter.Mark(underpriced)
// If anything's left to announce, push it into the internal loop // If anything's left to announce, push it into the internal loop
if len(unknowns) == 0 { if len(unknownHashes) == 0 {
return nil return nil
} }
announce := &txAnnounce{origin: peer, hashes: unknowns} announce := &txAnnounce{origin: peer, hashes: unknownHashes, metas: unknownMetas}
select { select {
case f.notify <- announce: case f.notify <- announce:
return nil return nil
@ -290,6 +315,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
// re-requesting them and dropping the peer in case of malicious transfers. // re-requesting them and dropping the peer in case of malicious transfers.
var ( var (
added = make([]common.Hash, 0, len(txs)) added = make([]common.Hash, 0, len(txs))
metas = make([]txMetadata, 0, len(txs))
) )
// proceed in batches // proceed in batches
for i := 0; i < len(txs); i += 128 { for i := 0; i < len(txs); i += 128 {
@ -325,6 +351,10 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
otherreject++ otherreject++
} }
added = append(added, batch[j].Hash()) added = append(added, batch[j].Hash())
metas = append(metas, txMetadata{
kind: batch[j].Type(),
size: uint32(batch[j].Size()),
})
} }
knownMeter.Mark(duplicate) knownMeter.Mark(duplicate)
underpricedMeter.Mark(underpriced) underpricedMeter.Mark(underpriced)
@ -337,7 +367,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
} }
} }
select { select {
case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}: case f.cleanup <- &txDelivery{origin: peer, hashes: added, metas: metas, direct: direct}:
return nil return nil
case <-f.quit: case <-f.quit:
return errTerminated return errTerminated
@ -394,13 +424,15 @@ func (f *TxFetcher) loop() {
want := used + len(ann.hashes) want := used + len(ann.hashes)
if want > maxTxAnnounces { if want > maxTxAnnounces {
txAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces)) txAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces))
ann.hashes = ann.hashes[:want-maxTxAnnounces] ann.hashes = ann.hashes[:want-maxTxAnnounces]
ann.metas = ann.metas[:want-maxTxAnnounces]
} }
// All is well, schedule the remainder of the transactions // All is well, schedule the remainder of the transactions
idleWait := len(f.waittime) == 0 idleWait := len(f.waittime) == 0
_, oldPeer := f.announces[ann.origin] _, oldPeer := f.announces[ann.origin]
for _, hash := range ann.hashes { for i, hash := range ann.hashes {
// If the transaction is already downloading, add it to the list // If the transaction is already downloading, add it to the list
// of possible alternates (in case the current retrieval fails) and // of possible alternates (in case the current retrieval fails) and
// also account it for the peer. // also account it for the peer.
@ -409,9 +441,9 @@ func (f *TxFetcher) loop() {
// Stage 2 and 3 share the set of origins per tx // Stage 2 and 3 share the set of origins per tx
if announces := f.announces[ann.origin]; announces != nil { if announces := f.announces[ann.origin]; announces != nil {
announces[hash] = struct{}{} announces[hash] = ann.metas[i]
} else { } else {
f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}} f.announces[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]}
} }
continue continue
} }
@ -422,9 +454,9 @@ func (f *TxFetcher) loop() {
// Stage 2 and 3 share the set of origins per tx // Stage 2 and 3 share the set of origins per tx
if announces := f.announces[ann.origin]; announces != nil { if announces := f.announces[ann.origin]; announces != nil {
announces[hash] = struct{}{} announces[hash] = ann.metas[i]
} else { } else {
f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}} f.announces[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]}
} }
continue continue
} }
@ -432,12 +464,18 @@ func (f *TxFetcher) loop() {
// yet downloading, add the peer as an alternate origin in the // yet downloading, add the peer as an alternate origin in the
// waiting list. // waiting list.
if f.waitlist[hash] != nil { if f.waitlist[hash] != nil {
// Ignore double announcements from the same peer. This is
// especially important if metadata is also passed along to
// prevent malicious peers flip-flopping good/bad values.
if _, ok := f.waitlist[hash][ann.origin]; ok {
continue
}
f.waitlist[hash][ann.origin] = struct{}{} f.waitlist[hash][ann.origin] = struct{}{}
if waitslots := f.waitslots[ann.origin]; waitslots != nil { if waitslots := f.waitslots[ann.origin]; waitslots != nil {
waitslots[hash] = struct{}{} waitslots[hash] = ann.metas[i]
} else { } else {
f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}} f.waitslots[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]}
} }
continue continue
} }
@ -446,9 +484,9 @@ func (f *TxFetcher) loop() {
f.waittime[hash] = f.clock.Now() f.waittime[hash] = f.clock.Now()
if waitslots := f.waitslots[ann.origin]; waitslots != nil { if waitslots := f.waitslots[ann.origin]; waitslots != nil {
waitslots[hash] = struct{}{} waitslots[hash] = ann.metas[i]
} else { } else {
f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}} f.waitslots[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]}
} }
} }
// If a new item was added to the waitlist, schedule it into the fetcher // If a new item was added to the waitlist, schedule it into the fetcher
@ -474,9 +512,9 @@ func (f *TxFetcher) loop() {
f.announced[hash] = f.waitlist[hash] f.announced[hash] = f.waitlist[hash]
for peer := range f.waitlist[hash] { for peer := range f.waitlist[hash] {
if announces := f.announces[peer]; announces != nil { if announces := f.announces[peer]; announces != nil {
announces[hash] = struct{}{} announces[hash] = f.waitslots[peer][hash]
} else { } else {
f.announces[peer] = map[common.Hash]struct{}{hash: {}} f.announces[peer] = map[common.Hash]*txMetadata{hash: f.waitslots[peer][hash]}
} }
delete(f.waitslots[peer], hash) delete(f.waitslots[peer], hash)
if len(f.waitslots[peer]) == 0 { if len(f.waitslots[peer]) == 0 {
@ -545,10 +583,27 @@ func (f *TxFetcher) loop() {
case delivery := <-f.cleanup: case delivery := <-f.cleanup:
// Independent if the delivery was direct or broadcast, remove all // Independent if the delivery was direct or broadcast, remove all
// traces of the hash from internal trackers // traces of the hash from internal trackers. That said, compare any
for _, hash := range delivery.hashes { // advertised metadata with the real ones and drop bad peers.
for i, hash := range delivery.hashes {
if _, ok := f.waitlist[hash]; ok { if _, ok := f.waitlist[hash]; ok {
for peer, txset := range f.waitslots { for peer, txset := range f.waitslots {
if meta := txset[hash]; meta != nil {
if delivery.metas[i].kind != meta.kind {
log.Warn("Announced transaction type mismatch", "peer", peer, "tx", hash, "type", delivery.metas[i].kind, "ann", meta.kind)
f.dropPeer(peer)
} else if delivery.metas[i].size != meta.size {
log.Warn("Announced transaction size mismatch", "peer", peer, "tx", hash, "size", delivery.metas[i].size, "ann", meta.size)
if math.Abs(float64(delivery.metas[i].size)-float64(meta.size)) > 8 {
// Normally we should drop a peer considering this is a protocol violation.
// However, due to the RLP vs consensus format messyness, allow a few bytes
// wiggle-room where we only warn, but don't drop.
//
// TODO(karalabe): Get rid of this relaxation when clients are proven stable.
f.dropPeer(peer)
}
}
}
delete(txset, hash) delete(txset, hash)
if len(txset) == 0 { if len(txset) == 0 {
delete(f.waitslots, peer) delete(f.waitslots, peer)
@ -558,6 +613,22 @@ func (f *TxFetcher) loop() {
delete(f.waittime, hash) delete(f.waittime, hash)
} else { } else {
for peer, txset := range f.announces { for peer, txset := range f.announces {
if meta := txset[hash]; meta != nil {
if delivery.metas[i].kind != meta.kind {
log.Warn("Announced transaction type mismatch", "peer", peer, "tx", hash, "type", delivery.metas[i].kind, "ann", meta.kind)
f.dropPeer(peer)
} else if delivery.metas[i].size != meta.size {
log.Warn("Announced transaction size mismatch", "peer", peer, "tx", hash, "size", delivery.metas[i].size, "ann", meta.size)
if math.Abs(float64(delivery.metas[i].size)-float64(meta.size)) > 8 {
// Normally we should drop a peer considering this is a protocol violation.
// However, due to the RLP vs consensus format messyness, allow a few bytes
// wiggle-room where we only warn, but don't drop.
//
// TODO(karalabe): Get rid of this relaxation when clients are proven stable.
f.dropPeer(peer)
}
}
}
delete(txset, hash) delete(txset, hash)
if len(txset) == 0 { if len(txset) == 0 {
delete(f.announces, peer) delete(f.announces, peer)
@ -794,9 +865,15 @@ func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{},
if len(f.announces[peer]) == 0 { if len(f.announces[peer]) == 0 {
return // continue in the for-each return // continue in the for-each
} }
hashes := make([]common.Hash, 0, maxTxRetrievals) var (
f.forEachHash(f.announces[peer], func(hash common.Hash) bool { hashes = make([]common.Hash, 0, maxTxRetrievals)
if _, ok := f.fetching[hash]; !ok { bytes uint64
)
f.forEachAnnounce(f.announces[peer], func(hash common.Hash, meta *txMetadata) bool {
// If the transaction is already fetching, skip to the next one
if _, ok := f.fetching[hash]; ok {
return true
}
// Mark the hash as fetching and stash away possible alternates // Mark the hash as fetching and stash away possible alternates
f.fetching[hash] = peer f.fetching[hash] = peer
@ -811,8 +888,13 @@ func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{},
if len(hashes) >= maxTxRetrievals { if len(hashes) >= maxTxRetrievals {
return false // break in the for-each return false // break in the for-each
} }
if meta != nil { // Only set eth/68 and upwards
bytes += uint64(meta.size)
if bytes >= maxTxRetrievalSize {
return false
} }
return true // continue in the for-each }
return true // scheduled, try to add more
}) })
// If any hashes were allocated, request them from the peer // If any hashes were allocated, request them from the peer
if len(hashes) > 0 { if len(hashes) > 0 {
@ -857,27 +939,28 @@ func (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string))
} }
} }
// forEachHash does a range loop over a map of hashes in production, but during // forEachAnnounce does a range loop over a map of announcements in production,
// testing it does a deterministic sorted random to allow reproducing issues. // but during testing it does a deterministic sorted random to allow reproducing
func (f *TxFetcher) forEachHash(hashes map[common.Hash]struct{}, do func(hash common.Hash) bool) { // issues.
func (f *TxFetcher) forEachAnnounce(announces map[common.Hash]*txMetadata, do func(hash common.Hash, meta *txMetadata) bool) {
// If we're running production, use whatever Go's map gives us // If we're running production, use whatever Go's map gives us
if f.rand == nil { if f.rand == nil {
for hash := range hashes { for hash, meta := range announces {
if !do(hash) { if !do(hash, meta) {
return return
} }
} }
return return
} }
// We're running the test suite, make iteration deterministic // We're running the test suite, make iteration deterministic
list := make([]common.Hash, 0, len(hashes)) list := make([]common.Hash, 0, len(announces))
for hash := range hashes { for hash := range announces {
list = append(list, hash) list = append(list, hash)
} }
sortHashes(list) sortHashes(list)
rotateHashes(list, f.rand.Intn(len(list))) rotateHashes(list, f.rand.Intn(len(list)))
for _, hash := range list { for _, hash := range list {
if !do(hash) { if !do(hash, announces[hash]) {
return return
} }
} }

View File

@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
) )
var ( var (
@ -41,9 +42,20 @@ var (
testTxsHashes = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()} testTxsHashes = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()}
) )
type announce struct {
hash common.Hash
kind *byte
size *uint32
}
func typeptr(t byte) *byte { return &t }
func sizeptr(n uint32) *uint32 { return &n }
type doTxNotify struct { type doTxNotify struct {
peer string peer string
hashes []common.Hash hashes []common.Hash
types []byte
sizes []uint32
} }
type doTxEnqueue struct { type doTxEnqueue struct {
peer string peer string
@ -57,7 +69,14 @@ type doWait struct {
type doDrop string type doDrop string
type doFunc func() type doFunc func()
type isWaitingWithMeta map[string][]announce
type isWaiting map[string][]common.Hash type isWaiting map[string][]common.Hash
type isScheduledWithMeta struct {
tracking map[string][]announce
fetching map[string][]common.Hash
dangling map[string][]common.Hash
}
type isScheduled struct { type isScheduled struct {
tracking map[string][]common.Hash tracking map[string][]common.Hash
fetching map[string][]common.Hash fetching map[string][]common.Hash
@ -81,6 +100,7 @@ func TestTransactionFetcherWaiting(t *testing.T) {
func(common.Hash) bool { return false }, func(common.Hash) bool { return false },
nil, nil,
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -162,6 +182,212 @@ func TestTransactionFetcherWaiting(t *testing.T) {
}) })
} }
// Tests that transaction announcements with associated metadata are added to a
// waitlist, and none of them are scheduled for retrieval until the wait expires.
//
// This test is an extended version of TestTransactionFetcherWaiting. It's mostly
// to cover the metadata checkes without bloating up the basic behavioral tests
// with all the useless extra fields.
func TestTransactionFetcherWaitingWithMeta(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
nil,
)
},
steps: []interface{}{
// Initial announcement to get something into the waitlist
doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 222}},
isWaitingWithMeta(map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
},
}),
// Announce from a new peer to check that no overwrite happens
doTxNotify{peer: "B", hashes: []common.Hash{{0x03}, {0x04}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{333, 444}},
isWaitingWithMeta(map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
},
"B": {
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
}),
// Announce clashing hashes but unique new peer
doTxNotify{peer: "C", hashes: []common.Hash{{0x01}, {0x04}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 444}},
isWaitingWithMeta(map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
},
"B": {
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
"C": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
}),
// Announce existing and clashing hashes from existing peer. Clashes
// should not overwrite previous announcements.
doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x03}, {0x05}}, types: []byte{types.LegacyTxType, types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{999, 333, 555}},
isWaitingWithMeta(map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)},
},
"B": {
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
"C": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
}),
// Announce clashing hashes with conflicting metadata. Somebody will
// be in the wrong, but we don't know yet who.
doTxNotify{peer: "D", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.BlobTxType}, sizes: []uint32{999, 222}},
isWaitingWithMeta(map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)},
},
"B": {
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
"C": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
"D": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)},
{common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)},
},
}),
isScheduled{tracking: nil, fetching: nil},
// Wait for the arrival timeout which should move all expired items
// from the wait list to the scheduler
doWait{time: txArriveTimeout, step: true},
isWaiting(nil),
isScheduledWithMeta{
tracking: map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)},
},
"B": {
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
"C": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
"D": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)},
{common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)},
},
},
fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer
"A": {{0x03}, {0x05}},
"C": {{0x01}, {0x04}},
"D": {{0x02}},
},
},
// Queue up a non-fetchable transaction and then trigger it with a new
// peer (weird case to test 1 line in the fetcher)
doTxNotify{peer: "C", hashes: []common.Hash{{0x06}, {0x07}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{666, 777}},
isWaitingWithMeta(map[string][]announce{
"C": {
{common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)},
{common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)},
},
}),
doWait{time: txArriveTimeout, step: true},
isScheduledWithMeta{
tracking: map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)},
},
"B": {
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
"C": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
{common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)},
{common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)},
},
"D": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)},
{common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)},
},
},
fetching: map[string][]common.Hash{
"A": {{0x03}, {0x05}},
"C": {{0x01}, {0x04}},
"D": {{0x02}},
},
},
doTxNotify{peer: "E", hashes: []common.Hash{{0x06}, {0x07}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{666, 777}},
isScheduledWithMeta{
tracking: map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)},
},
"B": {
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
"C": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
{common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)},
{common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)},
},
"D": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)},
{common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)},
},
"E": {
{common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)},
{common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)},
},
},
fetching: map[string][]common.Hash{
"A": {{0x03}, {0x05}},
"C": {{0x01}, {0x04}},
"D": {{0x02}},
"E": {{0x06}, {0x07}},
},
},
},
})
}
// Tests that transaction announcements skip the waiting list if they are // Tests that transaction announcements skip the waiting list if they are
// already scheduled. // already scheduled.
func TestTransactionFetcherSkipWaiting(t *testing.T) { func TestTransactionFetcherSkipWaiting(t *testing.T) {
@ -171,6 +397,7 @@ func TestTransactionFetcherSkipWaiting(t *testing.T) {
func(common.Hash) bool { return false }, func(common.Hash) bool { return false },
nil, nil,
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -234,6 +461,7 @@ func TestTransactionFetcherSingletonRequesting(t *testing.T) {
func(common.Hash) bool { return false }, func(common.Hash) bool { return false },
nil, nil,
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -313,6 +541,7 @@ func TestTransactionFetcherFailedRescheduling(t *testing.T) {
<-proceed <-proceed
return errors.New("peer disconnected") return errors.New("peer disconnected")
}, },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -382,6 +611,7 @@ func TestTransactionFetcherCleanup(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -421,6 +651,7 @@ func TestTransactionFetcherCleanupEmpty(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -459,6 +690,7 @@ func TestTransactionFetcherMissingRescheduling(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -505,6 +737,7 @@ func TestTransactionFetcherMissingCleanup(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -543,6 +776,7 @@ func TestTransactionFetcherBroadcasts(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -591,6 +825,7 @@ func TestTransactionFetcherWaitTimerResets(t *testing.T) {
func(common.Hash) bool { return false }, func(common.Hash) bool { return false },
nil, nil,
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -648,6 +883,7 @@ func TestTransactionFetcherTimeoutRescheduling(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -713,6 +949,7 @@ func TestTransactionFetcherTimeoutTimerResets(t *testing.T) {
func(common.Hash) bool { return false }, func(common.Hash) bool { return false },
nil, nil,
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -757,21 +994,21 @@ func TestTransactionFetcherTimeoutTimerResets(t *testing.T) {
}) })
} }
// Tests that if thousands of transactions are announces, only a small // Tests that if thousands of transactions are announced, only a small
// number of them will be requested at a time. // number of them will be requested at a time.
func TestTransactionFetcherRateLimiting(t *testing.T) { func TestTransactionFetcherRateLimiting(t *testing.T) {
// Create a slew of transactions and to announce them // Create a slew of transactions and announce them
var hashes []common.Hash var hashes []common.Hash
for i := 0; i < maxTxAnnounces; i++ { for i := 0; i < maxTxAnnounces; i++ {
hashes = append(hashes, common.Hash{byte(i / 256), byte(i % 256)}) hashes = append(hashes, common.Hash{byte(i / 256), byte(i % 256)})
} }
testTransactionFetcherParallel(t, txFetcherTest{ testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher { init: func() *TxFetcher {
return NewTxFetcher( return NewTxFetcher(
func(common.Hash) bool { return false }, func(common.Hash) bool { return false },
nil, nil,
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -792,6 +1029,68 @@ func TestTransactionFetcherRateLimiting(t *testing.T) {
}) })
} }
// Tests that if huge transactions are announced, only a small number of them will
// be requested at a time, to keep the responses below a resonable level.
func TestTransactionFetcherBandwidthLimiting(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
nil,
)
},
steps: []interface{}{
// Announce mid size transactions from A to verify that multiple
// ones can be piled into a single request.
doTxNotify{peer: "A",
hashes: []common.Hash{{0x01}, {0x02}, {0x03}, {0x04}},
types: []byte{types.LegacyTxType, types.LegacyTxType, types.LegacyTxType, types.LegacyTxType},
sizes: []uint32{48 * 1024, 48 * 1024, 48 * 1024, 48 * 1024},
},
// Announce exactly on the limit transactions to see that only one
// gets requested
doTxNotify{peer: "B",
hashes: []common.Hash{{0x05}, {0x06}},
types: []byte{types.LegacyTxType, types.LegacyTxType},
sizes: []uint32{maxTxRetrievalSize, maxTxRetrievalSize},
},
// Announce oversized blob transactions to see that overflows are ok
doTxNotify{peer: "C",
hashes: []common.Hash{{0x07}, {0x08}},
types: []byte{types.BlobTxType, types.BlobTxType},
sizes: []uint32{params.MaxBlobGasPerBlock, params.MaxBlobGasPerBlock},
},
doWait{time: txArriveTimeout, step: true},
isWaiting(nil),
isScheduledWithMeta{
tracking: map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)},
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)},
},
"B": {
{common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(maxTxRetrievalSize)},
{common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(maxTxRetrievalSize)},
},
"C": {
{common.Hash{0x07}, typeptr(types.BlobTxType), sizeptr(params.MaxBlobGasPerBlock)},
{common.Hash{0x08}, typeptr(types.BlobTxType), sizeptr(params.MaxBlobGasPerBlock)},
},
},
fetching: map[string][]common.Hash{
"A": {{0x02}, {0x03}, {0x04}},
"B": {{0x06}},
"C": {{0x08}},
},
},
},
})
}
// Tests that then number of transactions a peer is allowed to announce and/or // Tests that then number of transactions a peer is allowed to announce and/or
// request at the same time is hard capped. // request at the same time is hard capped.
func TestTransactionFetcherDoSProtection(t *testing.T) { func TestTransactionFetcherDoSProtection(t *testing.T) {
@ -810,6 +1109,7 @@ func TestTransactionFetcherDoSProtection(t *testing.T) {
func(common.Hash) bool { return false }, func(common.Hash) bool { return false },
nil, nil,
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -877,6 +1177,7 @@ func TestTransactionFetcherUnderpricedDedup(t *testing.T) {
return errs return errs
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -946,6 +1247,7 @@ func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) {
return errs return errs
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: append(steps, []interface{}{ steps: append(steps, []interface{}{
@ -968,6 +1270,7 @@ func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -1021,6 +1324,7 @@ func TestTransactionFetcherDrop(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -1087,6 +1391,7 @@ func TestTransactionFetcherDropRescheduling(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -1120,6 +1425,74 @@ func TestTransactionFetcherDropRescheduling(t *testing.T) {
}) })
} }
// Tests that announced transactions with the wrong transaction type or size will
// result in a dropped peer.
func TestInvalidAnnounceMetadata(t *testing.T) {
drop := make(chan string, 2)
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
func(peer string) { drop <- peer },
)
},
steps: []interface{}{
// Initial announcement to get something into the waitlist
doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}, types: []byte{testTxs[0].Type(), testTxs[1].Type()}, sizes: []uint32{uint32(testTxs[0].Size()), uint32(testTxs[1].Size())}},
isWaitingWithMeta(map[string][]announce{
"A": {
{testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))},
{testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))},
},
}),
// Announce from new peers conflicting transactions
doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{1024 + uint32(testTxs[0].Size())}},
doTxNotify{peer: "C", hashes: []common.Hash{testTxsHashes[1]}, types: []byte{1 + testTxs[1].Type()}, sizes: []uint32{uint32(testTxs[1].Size())}},
isWaitingWithMeta(map[string][]announce{
"A": {
{testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))},
{testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))},
},
"B": {
{testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(1024 + uint32(testTxs[0].Size()))},
},
"C": {
{testTxsHashes[1], typeptr(1 + testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))},
},
}),
// Schedule all the transactions for retrieval
doWait{time: txArriveTimeout, step: true},
isWaitingWithMeta(nil),
isScheduledWithMeta{
tracking: map[string][]announce{
"A": {
{testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))},
{testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))},
},
"B": {
{testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(1024 + uint32(testTxs[0].Size()))},
},
"C": {
{testTxsHashes[1], typeptr(1 + testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))},
},
},
fetching: map[string][]common.Hash{
"A": {testTxsHashes[0]},
"C": {testTxsHashes[1]},
},
},
// Deliver the transactions and wait for B to be dropped
doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}},
doFunc(func() { <-drop }),
doFunc(func() { <-drop }),
},
})
}
// This test reproduces a crash caught by the fuzzer. The root cause was a // This test reproduces a crash caught by the fuzzer. The root cause was a
// dangling transaction timing out and clashing on re-add with a concurrently // dangling transaction timing out and clashing on re-add with a concurrently
// announced one. // announced one.
@ -1132,6 +1505,7 @@ func TestTransactionFetcherFuzzCrash01(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -1159,6 +1533,7 @@ func TestTransactionFetcherFuzzCrash02(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -1188,6 +1563,7 @@ func TestTransactionFetcherFuzzCrash03(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -1224,6 +1600,7 @@ func TestTransactionFetcherFuzzCrash04(t *testing.T) {
<-proceed <-proceed
return errors.New("peer disconnected") return errors.New("peer disconnected")
}, },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -1274,9 +1651,34 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
// Crunch through all the test steps and execute them // Crunch through all the test steps and execute them
for i, step := range tt.steps { for i, step := range tt.steps {
// Auto-expand certain steps to ones with metadata
switch old := step.(type) {
case isWaiting:
new := make(isWaitingWithMeta)
for peer, hashes := range old {
for _, hash := range hashes {
new[peer] = append(new[peer], announce{hash, nil, nil})
}
}
step = new
case isScheduled:
new := isScheduledWithMeta{
tracking: make(map[string][]announce),
fetching: old.fetching,
dangling: old.dangling,
}
for peer, hashes := range old.tracking {
for _, hash := range hashes {
new.tracking[peer] = append(new.tracking[peer], announce{hash, nil, nil})
}
}
step = new
}
// Process the original or expanded steps
switch step := step.(type) { switch step := step.(type) {
case doTxNotify: case doTxNotify:
if err := fetcher.Notify(step.peer, step.hashes); err != nil { if err := fetcher.Notify(step.peer, step.types, step.sizes, step.hashes); err != nil {
t.Errorf("step %d: %v", i, err) t.Errorf("step %d: %v", i, err)
} }
<-wait // Fetcher needs to process this, wait until it's done <-wait // Fetcher needs to process this, wait until it's done
@ -1307,24 +1709,34 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
case doFunc: case doFunc:
step() step()
case isWaiting: case isWaitingWithMeta:
// We need to check that the waiting list (stage 1) internals // We need to check that the waiting list (stage 1) internals
// match with the expected set. Check the peer->hash mappings // match with the expected set. Check the peer->hash mappings
// first. // first.
for peer, hashes := range step { for peer, announces := range step {
waiting := fetcher.waitslots[peer] waiting := fetcher.waitslots[peer]
if waiting == nil { if waiting == nil {
t.Errorf("step %d: peer %s missing from waitslots", i, peer) t.Errorf("step %d: peer %s missing from waitslots", i, peer)
continue continue
} }
for _, hash := range hashes { for _, ann := range announces {
if _, ok := waiting[hash]; !ok { if meta, ok := waiting[ann.hash]; !ok {
t.Errorf("step %d, peer %s: hash %x missing from waitslots", i, peer, hash) t.Errorf("step %d, peer %s: hash %x missing from waitslots", i, peer, ann.hash)
} else {
if (meta == nil && (ann.kind != nil || ann.size != nil)) ||
(meta != nil && (ann.kind == nil || ann.size == nil)) ||
(meta != nil && (meta.kind != *ann.kind || meta.size != *ann.size)) {
t.Errorf("step %d, peer %s, hash %x: waitslot metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, *ann.kind, *ann.size)
} }
} }
for hash := range waiting { }
if !containsHash(hashes, hash) { for hash, meta := range waiting {
t.Errorf("step %d, peer %s: hash %x extra in waitslots", i, peer, hash) ann := announce{hash: hash}
if meta != nil {
ann.kind, ann.size = &meta.kind, &meta.size
}
if !containsAnnounce(announces, ann) {
t.Errorf("step %d, peer %s: announce %v extra in waitslots", i, peer, ann)
} }
} }
} }
@ -1334,13 +1746,13 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
} }
} }
// Peer->hash sets correct, check the hash->peer and timeout sets // Peer->hash sets correct, check the hash->peer and timeout sets
for peer, hashes := range step { for peer, announces := range step {
for _, hash := range hashes { for _, ann := range announces {
if _, ok := fetcher.waitlist[hash][peer]; !ok { if _, ok := fetcher.waitlist[ann.hash][peer]; !ok {
t.Errorf("step %d, hash %x: peer %s missing from waitlist", i, hash, peer) t.Errorf("step %d, hash %x: peer %s missing from waitlist", i, ann.hash, peer)
} }
if _, ok := fetcher.waittime[hash]; !ok { if _, ok := fetcher.waittime[ann.hash]; !ok {
t.Errorf("step %d: hash %x missing from waittime", i, hash) t.Errorf("step %d: hash %x missing from waittime", i, ann.hash)
} }
} }
} }
@ -1349,15 +1761,15 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
t.Errorf("step %d, hash %x: empty peerset in waitlist", i, hash) t.Errorf("step %d, hash %x: empty peerset in waitlist", i, hash)
} }
for peer := range peers { for peer := range peers {
if !containsHash(step[peer], hash) { if !containsHashInAnnounces(step[peer], hash) {
t.Errorf("step %d, hash %x: peer %s extra in waitlist", i, hash, peer) t.Errorf("step %d, hash %x: peer %s extra in waitlist", i, hash, peer)
} }
} }
} }
for hash := range fetcher.waittime { for hash := range fetcher.waittime {
var found bool var found bool
for _, hashes := range step { for _, announces := range step {
if containsHash(hashes, hash) { if containsHashInAnnounces(announces, hash) {
found = true found = true
break break
} }
@ -1367,23 +1779,33 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
} }
} }
case isScheduled: case isScheduledWithMeta:
// Check that all scheduled announces are accounted for and no // Check that all scheduled announces are accounted for and no
// extra ones are present. // extra ones are present.
for peer, hashes := range step.tracking { for peer, announces := range step.tracking {
scheduled := fetcher.announces[peer] scheduled := fetcher.announces[peer]
if scheduled == nil { if scheduled == nil {
t.Errorf("step %d: peer %s missing from announces", i, peer) t.Errorf("step %d: peer %s missing from announces", i, peer)
continue continue
} }
for _, hash := range hashes { for _, ann := range announces {
if _, ok := scheduled[hash]; !ok { if meta, ok := scheduled[ann.hash]; !ok {
t.Errorf("step %d, peer %s: hash %x missing from announces", i, peer, hash) t.Errorf("step %d, peer %s: hash %x missing from announces", i, peer, ann.hash)
} else {
if (meta == nil && (ann.kind != nil || ann.size != nil)) ||
(meta != nil && (ann.kind == nil || ann.size == nil)) ||
(meta != nil && (meta.kind != *ann.kind || meta.size != *ann.size)) {
t.Errorf("step %d, peer %s, hash %x: announce metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, *ann.kind, *ann.size)
} }
} }
for hash := range scheduled { }
if !containsHash(hashes, hash) { for hash, meta := range scheduled {
t.Errorf("step %d, peer %s: hash %x extra in announces", i, peer, hash) ann := announce{hash: hash}
if meta != nil {
ann.kind, ann.size = &meta.kind, &meta.size
}
if !containsAnnounce(announces, ann) {
t.Errorf("step %d, peer %s: announce %x extra in announces", i, peer, hash)
} }
} }
} }
@ -1483,17 +1905,17 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
// retrieval but not actively being downloaded are tracked only // retrieval but not actively being downloaded are tracked only
// in the stage 2 `announced` map. // in the stage 2 `announced` map.
var queued []common.Hash var queued []common.Hash
for _, hashes := range step.tracking { for _, announces := range step.tracking {
for _, hash := range hashes { for _, ann := range announces {
var found bool var found bool
for _, hs := range step.fetching { for _, hs := range step.fetching {
if containsHash(hs, hash) { if containsHash(hs, ann.hash) {
found = true found = true
break break
} }
} }
if !found { if !found {
queued = append(queued, hash) queued = append(queued, ann.hash)
} }
} }
} }
@ -1526,6 +1948,42 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
} }
} }
// containsAnnounce returns whether an announcement is contained within a slice
// of announcements.
func containsAnnounce(slice []announce, ann announce) bool {
for _, have := range slice {
if have.hash == ann.hash {
if have.kind == nil || ann.kind == nil {
if have.kind != ann.kind {
return false
}
} else if *have.kind != *ann.kind {
return false
}
if have.size == nil || ann.size == nil {
if have.size != ann.size {
return false
}
} else if *have.size != *ann.size {
return false
}
return true
}
}
return false
}
// containsHashInAnnounces returns whether a hash is contained within a slice
// of announcements.
func containsHashInAnnounces(slice []announce, hash common.Hash) bool {
for _, have := range slice {
if have.hash == hash {
return true
}
}
return false
}
// containsHash returns whether a hash is contained within a hash slice. // containsHash returns whether a hash is contained within a hash slice.
func containsHash(slice []common.Hash, hash common.Hash) bool { func containsHash(slice []common.Hash, hash common.Hash) bool {
for _, have := range slice { for _, have := range slice {

View File

@ -55,9 +55,7 @@ const (
txMaxBroadcastSize = 4096 txMaxBroadcastSize = 4096
) )
var ( var syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge
syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge
)
// txPool defines the methods needed from a transaction pool implementation to // txPool defines the methods needed from a transaction pool implementation to
// support all the operations needed by the Ethereum chain protocols. // support all the operations needed by the Ethereum chain protocols.
@ -77,9 +75,10 @@ type txPool interface {
// The slice should be modifiable by the caller. // The slice should be modifiable by the caller.
Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction
// SubscribeNewTxsEvent should return an event subscription of // SubscribeTransactions subscribes to new transaction events. The subscriber
// NewTxsEvent and send events to the given channel. // can decide whether to receive notifications only for newly seen transactions
SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription // or also for reorged out ones.
SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription
} }
// handlerConfig is the collection of initialization parameters to create a full // handlerConfig is the collection of initialization parameters to create a full
@ -89,7 +88,7 @@ type handlerConfig struct {
Chain *core.BlockChain // Blockchain to serve data from Chain *core.BlockChain // Blockchain to serve data from
TxPool txPool // Transaction pool to propagate from TxPool txPool // Transaction pool to propagate from
Merger *consensus.Merger // The manager for eth1/2 transition Merger *consensus.Merger // The manager for eth1/2 transition
Network uint64 // Network identifier to adfvertise Network uint64 // Network identifier to advertise
Sync downloader.SyncMode // Whether to snap or full sync Sync downloader.SyncMode // Whether to snap or full sync
BloomCache uint64 // Megabytes to alloc for snap sync bloom BloomCache uint64 // Megabytes to alloc for snap sync bloom
EventMux *event.TypeMux // Legacy event mux, deprecate for `feed` EventMux *event.TypeMux // Legacy event mux, deprecate for `feed`
@ -255,7 +254,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
} }
td := new(big.Int).Add(ptd, block.Difficulty()) td := new(big.Int).Add(ptd, block.Difficulty())
if !h.chain.Config().IsTerminalPoWBlock(ptd, td) { if !h.chain.Config().IsTerminalPoWBlock(ptd, td) {
log.Info("Filtered out non-termimal pow block", "number", block.NumberU64(), "hash", block.Hash()) log.Info("Filtered out non-terminal pow block", "number", block.NumberU64(), "hash", block.Hash())
return 0, nil return 0, nil
} }
if err := h.chain.InsertBlockWithoutSetHead(block); err != nil { if err := h.chain.InsertBlockWithoutSetHead(block); err != nil {
@ -278,7 +277,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
addTxs := func(txs []*types.Transaction) []error { addTxs := func(txs []*types.Transaction) []error {
return h.txpool.Add(txs, false, false) return h.txpool.Add(txs, false, false)
} }
h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx) h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx, h.removePeer)
h.chainSync = newChainSyncer(h) h.chainSync = newChainSyncer(h)
return h, nil return h, nil
} }
@ -416,7 +415,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
select { select {
case res := <-resCh: case res := <-resCh:
headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket)) headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersRequest))
if len(headers) == 0 { if len(headers) == 0 {
// Required blocks are allowed to be missing if the remote // Required blocks are allowed to be missing if the remote
// node is not yet synced // node is not yet synced
@ -463,7 +462,7 @@ func (h *handler) runSnapExtension(peer *snap.Peer, handler snap.Handler) error
snap.EgressRegistrationErrorMeter.Mark(1) snap.EgressRegistrationErrorMeter.Mark(1)
} }
} }
peer.Log().Warn("Snapshot extension registration failed", "err", err) peer.Log().Debug("Snapshot extension registration failed", "err", err)
return err return err
} }
return handler(peer) return handler(peer)
@ -511,10 +510,10 @@ func (h *handler) unregisterPeer(id string) {
func (h *handler) Start(maxPeers int) { func (h *handler) Start(maxPeers int) {
h.maxPeers = maxPeers h.maxPeers = maxPeers
// broadcast transactions // broadcast and announce transactions (only new ones, not resurrected ones)
h.wg.Add(1) h.wg.Add(1)
h.txsCh = make(chan core.NewTxsEvent, txChanSize) h.txsCh = make(chan core.NewTxsEvent, txChanSize)
h.txsSub = h.txpool.SubscribeNewTxsEvent(h.txsCh) h.txsSub = h.txpool.SubscribeTransactions(h.txsCh, false)
go h.txBroadcastLoop() go h.txBroadcastLoop()
// broadcast mined blocks // broadcast mined blocks
@ -594,26 +593,33 @@ func (h *handler) BroadcastBlock(block *types.Block, propagate bool) {
} }
// BroadcastTransactions will propagate a batch of transactions // BroadcastTransactions will propagate a batch of transactions
// - To a square root of all peers // - To a square root of all peers for non-blob transactions
// - And, separately, as announcements to all peers which are not known to // - And, separately, as announcements to all peers which are not known to
// already have the given transaction. // already have the given transaction.
func (h *handler) BroadcastTransactions(txs types.Transactions) { func (h *handler) BroadcastTransactions(txs types.Transactions) {
var ( var (
annoCount int // Count of announcements made blobTxs int // Number of blob transactions to announce only
annoPeers int largeTxs int // Number of large transactions to announce only
directCount int // Count of the txs sent directly to peers
directPeers int // Count of the peers that were sent transactions directly directCount int // Number of transactions sent directly to peers (duplicates included)
directPeers int // Number of peers that were sent transactions directly
annCount int // Number of transactions announced across all peers (duplicates included)
annPeers int // Number of peers announced about transactions
txset = make(map[*ethPeer][]common.Hash) // Set peer->hash to transfer directly txset = make(map[*ethPeer][]common.Hash) // Set peer->hash to transfer directly
annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce
) )
// Broadcast transactions to a batch of peers not knowing about it // Broadcast transactions to a batch of peers not knowing about it
for _, tx := range txs { for _, tx := range txs {
peers := h.peers.peersWithoutTransaction(tx.Hash()) peers := h.peers.peersWithoutTransaction(tx.Hash())
var numDirect int var numDirect int
if tx.Size() <= txMaxBroadcastSize { switch {
case tx.Type() == types.BlobTxType:
blobTxs++
case tx.Size() > txMaxBroadcastSize:
largeTxs++
default:
numDirect = int(math.Sqrt(float64(len(peers)))) numDirect = int(math.Sqrt(float64(len(peers))))
} }
// Send the tx unconditionally to a subset of our peers // Send the tx unconditionally to a subset of our peers
@ -631,13 +637,12 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) {
peer.AsyncSendTransactions(hashes) peer.AsyncSendTransactions(hashes)
} }
for peer, hashes := range annos { for peer, hashes := range annos {
annoPeers++ annPeers++
annoCount += len(hashes) annCount += len(hashes)
peer.AsyncSendPooledTransactionHashes(hashes) peer.AsyncSendPooledTransactionHashes(hashes)
} }
log.Debug("Transaction broadcast", "txs", len(txs), log.Debug("Distributed transactions", "plaintxs", len(txs)-blobTxs-largeTxs, "blobtxs", blobTxs, "largetxs", largeTxs,
"announce packs", annoPeers, "announced hashes", annoCount, "bcastpeers", directPeers, "bcastcount", directCount, "annpeers", annPeers, "anncount", annCount)
"tx packs", directPeers, "broadcast txs", directCount)
} }
// minedBroadcastLoop sends mined blocks to connected peers. // minedBroadcastLoop sends mined blocks to connected peers.

View File

@ -17,6 +17,7 @@
package eth package eth
import ( import (
"errors"
"fmt" "fmt"
"math/big" "math/big"
"time" "time"
@ -66,16 +67,21 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
case *eth.NewBlockPacket: case *eth.NewBlockPacket:
return h.handleBlockBroadcast(peer, packet.Block, packet.TD) return h.handleBlockBroadcast(peer, packet.Block, packet.TD)
case *eth.NewPooledTransactionHashesPacket66: case *eth.NewPooledTransactionHashesPacket67:
return h.txFetcher.Notify(peer.ID(), *packet) return h.txFetcher.Notify(peer.ID(), nil, nil, *packet)
case *eth.NewPooledTransactionHashesPacket68: case *eth.NewPooledTransactionHashesPacket68:
return h.txFetcher.Notify(peer.ID(), packet.Hashes) return h.txFetcher.Notify(peer.ID(), packet.Types, packet.Sizes, packet.Hashes)
case *eth.TransactionsPacket: case *eth.TransactionsPacket:
for _, tx := range *packet {
if tx.Type() == types.BlobTxType {
return errors.New("disallowed broadcast blob transaction")
}
}
return h.txFetcher.Enqueue(peer.ID(), *packet, false) return h.txFetcher.Enqueue(peer.ID(), *packet, false)
case *eth.PooledTransactionsPacket: case *eth.PooledTransactionsResponse:
return h.txFetcher.Enqueue(peer.ID(), *packet, true) return h.txFetcher.Enqueue(peer.ID(), *packet, true)
default: default:
@ -90,9 +96,7 @@ func (h *ethHandler) handleBlockAnnounces(peer *eth.Peer, hashes []common.Hash,
// the chain already entered the pos stage and disconnect the // the chain already entered the pos stage and disconnect the
// remote peer. // remote peer.
if h.merger.PoSFinalized() { if h.merger.PoSFinalized() {
// TODO (MariusVanDerWijden) drop non-updated peers after the merge return errors.New("disallowed block announcement")
return nil
// return errors.New("unexpected block announces")
} }
// Schedule all the unknown hashes for retrieval // Schedule all the unknown hashes for retrieval
var ( var (
@ -118,9 +122,7 @@ func (h *ethHandler) handleBlockBroadcast(peer *eth.Peer, block *types.Block, td
// the chain already entered the pos stage and disconnect the // the chain already entered the pos stage and disconnect the
// remote peer. // remote peer.
if h.merger.PoSFinalized() { if h.merger.PoSFinalized() {
// TODO (MariusVanDerWijden) drop non-updated peers after the merge return errors.New("disallowed block broadcast")
return nil
// return errors.New("unexpected block announces")
} }
// Schedule the block for import // Schedule the block for import
h.blockFetcher.Enqueue(peer.ID(), block) h.blockFetcher.Enqueue(peer.ID(), block)

View File

@ -58,7 +58,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
h.blockBroadcasts.Send(packet.Block) h.blockBroadcasts.Send(packet.Block)
return nil return nil
case *eth.NewPooledTransactionHashesPacket66: case *eth.NewPooledTransactionHashesPacket67:
h.txAnnounces.Send(([]common.Hash)(*packet)) h.txAnnounces.Send(([]common.Hash)(*packet))
return nil return nil
@ -70,7 +70,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
h.txBroadcasts.Send(([]*types.Transaction)(*packet)) h.txBroadcasts.Send(([]*types.Transaction)(*packet))
return nil return nil
case *eth.PooledTransactionsPacket: case *eth.PooledTransactionsResponse:
h.txBroadcasts.Send(([]*types.Transaction)(*packet)) h.txBroadcasts.Send(([]*types.Transaction)(*packet))
return nil return nil
@ -81,7 +81,6 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
// Tests that peers are correctly accepted (or rejected) based on the advertised // Tests that peers are correctly accepted (or rejected) based on the advertised
// fork IDs in the protocol handshake. // fork IDs in the protocol handshake.
func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) }
func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) } func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) }
func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) } func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) }
@ -237,7 +236,6 @@ func testForkIDSplit(t *testing.T, protocol uint) {
} }
// Tests that received transactions are added to the local pool. // Tests that received transactions are added to the local pool.
func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) }
func TestRecvTransactions67(t *testing.T) { testRecvTransactions(t, eth.ETH67) } func TestRecvTransactions67(t *testing.T) { testRecvTransactions(t, eth.ETH67) }
func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) } func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) }
@ -251,7 +249,7 @@ func testRecvTransactions(t *testing.T, protocol uint) {
handler.handler.synced.Store(true) // mark synced to accept transactions handler.handler.synced.Store(true) // mark synced to accept transactions
txs := make(chan core.NewTxsEvent) txs := make(chan core.NewTxsEvent)
sub := handler.txpool.SubscribeNewTxsEvent(txs) sub := handler.txpool.SubscribeTransactions(txs, false)
defer sub.Unsubscribe() defer sub.Unsubscribe()
// Create a source peer to send messages through and a sink handler to receive them // Create a source peer to send messages through and a sink handler to receive them
@ -296,7 +294,6 @@ func testRecvTransactions(t *testing.T, protocol uint) {
} }
// This test checks that pending transactions are sent. // This test checks that pending transactions are sent.
func TestSendTransactions66(t *testing.T) { testSendTransactions(t, eth.ETH66) }
func TestSendTransactions67(t *testing.T) { testSendTransactions(t, eth.ETH67) } func TestSendTransactions67(t *testing.T) { testSendTransactions(t, eth.ETH67) }
func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) } func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) }
@ -356,7 +353,7 @@ func testSendTransactions(t *testing.T, protocol uint) {
seen := make(map[common.Hash]struct{}) seen := make(map[common.Hash]struct{})
for len(seen) < len(insert) { for len(seen) < len(insert) {
switch protocol { switch protocol {
case 66, 67, 68: case 67, 68:
select { select {
case hashes := <-anns: case hashes := <-anns:
for _, hash := range hashes { for _, hash := range hashes {
@ -382,7 +379,6 @@ func testSendTransactions(t *testing.T, protocol uint) {
// Tests that transactions get propagated to all attached peers, either via direct // Tests that transactions get propagated to all attached peers, either via direct
// broadcasts or via announcements/retrievals. // broadcasts or via announcements/retrievals.
func TestTransactionPropagation66(t *testing.T) { testTransactionPropagation(t, eth.ETH66) }
func TestTransactionPropagation67(t *testing.T) { testTransactionPropagation(t, eth.ETH67) } func TestTransactionPropagation67(t *testing.T) { testTransactionPropagation(t, eth.ETH67) }
func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) } func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) }
@ -428,7 +424,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) {
for i := 0; i < len(sinks); i++ { for i := 0; i < len(sinks); i++ {
txChs[i] = make(chan core.NewTxsEvent, 1024) txChs[i] = make(chan core.NewTxsEvent, 1024)
sub := sinks[i].txpool.SubscribeNewTxsEvent(txChs[i]) sub := sinks[i].txpool.SubscribeTransactions(txChs[i], false)
defer sub.Unsubscribe() defer sub.Unsubscribe()
} }
// Fill the source pool with transactions and wait for them at the sinks // Fill the source pool with transactions and wait for them at the sinks
@ -490,8 +486,8 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
defer sourcePipe.Close() defer sourcePipe.Close()
defer sinkPipe.Close() defer sinkPipe.Close()
sourcePeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) sourcePeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil)
sinkPeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) sinkPeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil)
defer sourcePeer.Close() defer sourcePeer.Close()
defer sinkPeer.Close() defer sinkPeer.Close()
@ -543,7 +539,6 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
// Tests that a propagated malformed block (uncles or transactions don't match // Tests that a propagated malformed block (uncles or transactions don't match
// with the hashes in the header) gets discarded and not broadcast forward. // with the hashes in the header) gets discarded and not broadcast forward.
func TestBroadcastMalformedBlock66(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH66) }
func TestBroadcastMalformedBlock67(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH67) } func TestBroadcastMalformedBlock67(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH67) }
func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) } func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) }

View File

@ -113,15 +113,17 @@ func (p *testTxPool) Pending(enforceTips bool) map[common.Address][]*txpool.Lazy
Time: tx.Time(), Time: tx.Time(),
GasFeeCap: tx.GasFeeCap(), GasFeeCap: tx.GasFeeCap(),
GasTipCap: tx.GasTipCap(), GasTipCap: tx.GasTipCap(),
Gas: tx.Gas(),
BlobGas: tx.BlobGas(),
}) })
} }
} }
return pending return pending
} }
// SubscribeNewTxsEvent should return an event subscription of NewTxsEvent and // SubscribeTransactions should return an event subscription of NewTxsEvent and
// send events to the given channel. // send events to the given channel.
func (p *testTxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { func (p *testTxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
return p.txFeed.Subscribe(ch) return p.txFeed.Subscribe(ch)
} }

View File

@ -18,6 +18,7 @@ package eth
import ( import (
"errors" "errors"
"fmt"
"math/big" "math/big"
"sync" "sync"
@ -74,7 +75,7 @@ func (ps *peerSet) registerSnapExtension(peer *snap.Peer) error {
// Reject the peer if it advertises `snap` without `eth` as `snap` is only a // Reject the peer if it advertises `snap` without `eth` as `snap` is only a
// satellite protocol meaningful with the chain selection of `eth` // satellite protocol meaningful with the chain selection of `eth`
if !peer.RunningCap(eth.ProtocolName, eth.ProtocolVersions) { if !peer.RunningCap(eth.ProtocolName, eth.ProtocolVersions) {
return errSnapWithoutEth return fmt.Errorf("%w: have %v", errSnapWithoutEth, peer.Caps())
} }
// Ensure nobody can double connect // Ensure nobody can double connect
ps.lock.Lock() ps.lock.Lock()

View File

@ -23,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
@ -45,10 +44,6 @@ const (
// nowadays, the practical limit will always be softResponseLimit. // nowadays, the practical limit will always be softResponseLimit.
maxBodiesServe = 1024 maxBodiesServe = 1024
// maxNodeDataServe is the maximum number of state trie nodes to serve. This
// number is there to limit the number of disk lookups.
maxNodeDataServe = 1024
// maxReceiptsServe is the maximum number of block receipts to serve. This // maxReceiptsServe is the maximum number of block receipts to serve. This
// number is mostly there to limit the number of disk lookups. With block // number is mostly there to limit the number of disk lookups. With block
// containing 200+ transactions nowadays, the practical limit will always // containing 200+ transactions nowadays, the practical limit will always
@ -98,12 +93,12 @@ type TxPool interface {
func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol { func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol {
protocols := make([]p2p.Protocol, 0, len(ProtocolVersions)) protocols := make([]p2p.Protocol, 0, len(ProtocolVersions))
for _, version := range ProtocolVersions { for _, version := range ProtocolVersions {
version := version // Closure // Blob transactions require eth/68 announcements, disable everything else
if version <= ETH67 && backend.Chain().Config().CancunTime != nil {
// Path scheme does not support GetNodeData, don't advertise eth66 on it
if version <= ETH66 && backend.Chain().TrieDB().Scheme() == rawdb.PathScheme {
continue continue
} }
version := version // Closure
protocols = append(protocols, p2p.Protocol{ protocols = append(protocols, p2p.Protocol{
Name: ProtocolName, Name: ProtocolName,
Version: version, Version: version,
@ -171,36 +166,19 @@ type Decoder interface {
Time() time.Time Time() time.Time
} }
var eth66 = map[uint64]msgHandler{
NewBlockHashesMsg: handleNewBlockhashes,
NewBlockMsg: handleNewBlock,
TransactionsMsg: handleTransactions,
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66,
GetBlockHeadersMsg: handleGetBlockHeaders66,
BlockHeadersMsg: handleBlockHeaders66,
GetBlockBodiesMsg: handleGetBlockBodies66,
BlockBodiesMsg: handleBlockBodies66,
GetNodeDataMsg: handleGetNodeData66,
NodeDataMsg: handleNodeData66,
GetReceiptsMsg: handleGetReceipts66,
ReceiptsMsg: handleReceipts66,
GetPooledTransactionsMsg: handleGetPooledTransactions66,
PooledTransactionsMsg: handlePooledTransactions66,
}
var eth67 = map[uint64]msgHandler{ var eth67 = map[uint64]msgHandler{
NewBlockHashesMsg: handleNewBlockhashes, NewBlockHashesMsg: handleNewBlockhashes,
NewBlockMsg: handleNewBlock, NewBlockMsg: handleNewBlock,
TransactionsMsg: handleTransactions, TransactionsMsg: handleTransactions,
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66, NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes67,
GetBlockHeadersMsg: handleGetBlockHeaders66, GetBlockHeadersMsg: handleGetBlockHeaders,
BlockHeadersMsg: handleBlockHeaders66, BlockHeadersMsg: handleBlockHeaders,
GetBlockBodiesMsg: handleGetBlockBodies66, GetBlockBodiesMsg: handleGetBlockBodies,
BlockBodiesMsg: handleBlockBodies66, BlockBodiesMsg: handleBlockBodies,
GetReceiptsMsg: handleGetReceipts66, GetReceiptsMsg: handleGetReceipts,
ReceiptsMsg: handleReceipts66, ReceiptsMsg: handleReceipts,
GetPooledTransactionsMsg: handleGetPooledTransactions66, GetPooledTransactionsMsg: handleGetPooledTransactions,
PooledTransactionsMsg: handlePooledTransactions66, PooledTransactionsMsg: handlePooledTransactions,
} }
var eth68 = map[uint64]msgHandler{ var eth68 = map[uint64]msgHandler{
@ -208,14 +186,14 @@ var eth68 = map[uint64]msgHandler{
NewBlockMsg: handleNewBlock, NewBlockMsg: handleNewBlock,
TransactionsMsg: handleTransactions, TransactionsMsg: handleTransactions,
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes68, NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes68,
GetBlockHeadersMsg: handleGetBlockHeaders66, GetBlockHeadersMsg: handleGetBlockHeaders,
BlockHeadersMsg: handleBlockHeaders66, BlockHeadersMsg: handleBlockHeaders,
GetBlockBodiesMsg: handleGetBlockBodies66, GetBlockBodiesMsg: handleGetBlockBodies,
BlockBodiesMsg: handleBlockBodies66, BlockBodiesMsg: handleBlockBodies,
GetReceiptsMsg: handleGetReceipts66, GetReceiptsMsg: handleGetReceipts,
ReceiptsMsg: handleReceipts66, ReceiptsMsg: handleReceipts,
GetPooledTransactionsMsg: handleGetPooledTransactions66, GetPooledTransactionsMsg: handleGetPooledTransactions,
PooledTransactionsMsg: handlePooledTransactions66, PooledTransactionsMsg: handlePooledTransactions,
} }
// handleMessage is invoked whenever an inbound message is received from a remote // handleMessage is invoked whenever an inbound message is received from a remote
@ -231,14 +209,10 @@ func handleMessage(backend Backend, peer *Peer) error {
} }
defer msg.Discard() defer msg.Discard()
var handlers = eth66 var handlers = eth67
if peer.Version() == ETH67 {
handlers = eth67
}
if peer.Version() >= ETH68 { if peer.Version() >= ETH68 {
handlers = eth68 handlers = eth68
} }
// Track the amount of time it takes to serve the request and run the handler // Track the amount of time it takes to serve the request and run the handler
if metrics.Enabled { if metrics.Enabled {
h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)

View File

@ -28,7 +28,6 @@ import (
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
@ -151,7 +150,6 @@ func (b *testBackend) Handle(*Peer, Packet) error {
} }
// Tests that block headers can be retrieved from a remote chain based on user queries. // Tests that block headers can be retrieved from a remote chain based on user queries.
func TestGetBlockHeaders66(t *testing.T) { testGetBlockHeaders(t, ETH66) }
func TestGetBlockHeaders67(t *testing.T) { testGetBlockHeaders(t, ETH67) } func TestGetBlockHeaders67(t *testing.T) { testGetBlockHeaders(t, ETH67) }
func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) } func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) }
@ -178,29 +176,29 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
// Create a batch of tests for various scenarios // Create a batch of tests for various scenarios
limit := uint64(maxHeadersServe) limit := uint64(maxHeadersServe)
tests := []struct { tests := []struct {
query *GetBlockHeadersPacket // The query to execute for header retrieval query *GetBlockHeadersRequest // The query to execute for header retrieval
expect []common.Hash // The hashes of the block whose headers are expected expect []common.Hash // The hashes of the block whose headers are expected
}{ }{
// A single random block should be retrievable by hash // A single random block should be retrievable by hash
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
[]common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},
}, },
// A single random block should be retrievable by number // A single random block should be retrievable by number
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 1}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 1},
[]common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},
}, },
// Multiple headers should be retrievable in both directions // Multiple headers should be retrievable in both directions
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit / 2).Hash(),
backend.chain.GetBlockByNumber(limit/2 + 1).Hash(), backend.chain.GetBlockByNumber(limit/2 + 1).Hash(),
backend.chain.GetBlockByNumber(limit/2 + 2).Hash(), backend.chain.GetBlockByNumber(limit/2 + 2).Hash(),
}, },
}, { }, {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit / 2).Hash(),
backend.chain.GetBlockByNumber(limit/2 - 1).Hash(), backend.chain.GetBlockByNumber(limit/2 - 1).Hash(),
@ -209,14 +207,14 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
}, },
// Multiple headers with skip lists should be retrievable // Multiple headers with skip lists should be retrievable
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit / 2).Hash(),
backend.chain.GetBlockByNumber(limit/2 + 4).Hash(), backend.chain.GetBlockByNumber(limit/2 + 4).Hash(),
backend.chain.GetBlockByNumber(limit/2 + 8).Hash(), backend.chain.GetBlockByNumber(limit/2 + 8).Hash(),
}, },
}, { }, {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit / 2).Hash(),
backend.chain.GetBlockByNumber(limit/2 - 4).Hash(), backend.chain.GetBlockByNumber(limit/2 - 4).Hash(),
@ -225,31 +223,31 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
}, },
// The chain endpoints should be retrievable // The chain endpoints should be retrievable
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 0}, Amount: 1}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 0}, Amount: 1},
[]common.Hash{backend.chain.GetBlockByNumber(0).Hash()}, []common.Hash{backend.chain.GetBlockByNumber(0).Hash()},
}, },
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1},
[]common.Hash{backend.chain.CurrentBlock().Hash()}, []common.Hash{backend.chain.CurrentBlock().Hash()},
}, },
{ // If the peer requests a bit into the future, we deliver what we have { // If the peer requests a bit into the future, we deliver what we have
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10},
[]common.Hash{backend.chain.CurrentBlock().Hash()}, []common.Hash{backend.chain.CurrentBlock().Hash()},
}, },
// Ensure protocol limits are honored // Ensure protocol limits are honored
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true},
getHashes(backend.chain.CurrentBlock().Number.Uint64(), limit), getHashes(backend.chain.CurrentBlock().Number.Uint64(), limit),
}, },
// Check that requesting more than available is handled gracefully // Check that requesting more than available is handled gracefully
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(),
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64()).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64()).Hash(),
}, },
}, { }, {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(4).Hash(), backend.chain.GetBlockByNumber(4).Hash(),
backend.chain.GetBlockByNumber(0).Hash(), backend.chain.GetBlockByNumber(0).Hash(),
@ -257,13 +255,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
}, },
// Check that requesting more than available is handled gracefully, even if mid skip // Check that requesting more than available is handled gracefully, even if mid skip
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(),
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 1).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 1).Hash(),
}, },
}, { }, {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(4).Hash(), backend.chain.GetBlockByNumber(4).Hash(),
backend.chain.GetBlockByNumber(1).Hash(), backend.chain.GetBlockByNumber(1).Hash(),
@ -271,7 +269,7 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
}, },
// Check a corner case where requesting more can iterate past the endpoints // Check a corner case where requesting more can iterate past the endpoints
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(2).Hash(), backend.chain.GetBlockByNumber(2).Hash(),
backend.chain.GetBlockByNumber(1).Hash(), backend.chain.GetBlockByNumber(1).Hash(),
@ -280,24 +278,24 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
}, },
// Check a corner case where skipping overflow loops back into the chain start // Check a corner case where skipping overflow loops back into the chain start
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1}, &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(3).Hash(), backend.chain.GetBlockByNumber(3).Hash(),
}, },
}, },
// Check a corner case where skipping overflow loops back to the same header // Check a corner case where skipping overflow loops back to the same header
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64}, &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(1).Hash(), backend.chain.GetBlockByNumber(1).Hash(),
}, },
}, },
// Check that non existing headers aren't returned // Check that non existing headers aren't returned
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: unknown}, Amount: 1}, &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: unknown}, Amount: 1},
[]common.Hash{}, []common.Hash{},
}, { }, {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1},
[]common.Hash{}, []common.Hash{},
}, },
} }
@ -309,13 +307,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
headers = append(headers, backend.chain.GetBlockByHash(hash).Header()) headers = append(headers, backend.chain.GetBlockByHash(hash).Header())
} }
// Send the hash request and verify the response // Send the hash request and verify the response
p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{
RequestId: 123, RequestId: 123,
GetBlockHeadersPacket: tt.query, GetBlockHeadersRequest: tt.query,
}) })
if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket66{ if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket{
RequestId: 123, RequestId: 123,
BlockHeadersPacket: headers, BlockHeadersRequest: headers,
}); err != nil { }); err != nil {
t.Errorf("test %d: headers mismatch: %v", i, err) t.Errorf("test %d: headers mismatch: %v", i, err)
} }
@ -324,11 +322,11 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil { if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil {
tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0 tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0
p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{
RequestId: 456, RequestId: 456,
GetBlockHeadersPacket: tt.query, GetBlockHeadersRequest: tt.query,
}) })
expected := &BlockHeadersPacket66{RequestId: 456, BlockHeadersPacket: headers} expected := &BlockHeadersPacket{RequestId: 456, BlockHeadersRequest: headers}
if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil { if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil {
t.Errorf("test %d by hash: headers mismatch: %v", i, err) t.Errorf("test %d by hash: headers mismatch: %v", i, err)
} }
@ -338,7 +336,6 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
} }
// Tests that block contents can be retrieved from a remote chain based on their hashes. // Tests that block contents can be retrieved from a remote chain based on their hashes.
func TestGetBlockBodies66(t *testing.T) { testGetBlockBodies(t, ETH66) }
func TestGetBlockBodies67(t *testing.T) { testGetBlockBodies(t, ETH67) } func TestGetBlockBodies67(t *testing.T) { testGetBlockBodies(t, ETH67) }
func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) } func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) }
@ -420,139 +417,20 @@ func testGetBlockBodies(t *testing.T, protocol uint) {
} }
// Send the hash request and verify the response // Send the hash request and verify the response
p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket66{ p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket{
RequestId: 123, RequestId: 123,
GetBlockBodiesPacket: hashes, GetBlockBodiesRequest: hashes,
}) })
if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket66{ if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket{
RequestId: 123, RequestId: 123,
BlockBodiesPacket: bodies, BlockBodiesResponse: bodies,
}); err != nil { }); err != nil {
t.Fatalf("test %d: bodies mismatch: %v", i, err) t.Fatalf("test %d: bodies mismatch: %v", i, err)
} }
} }
} }
// Tests that the state trie nodes can be retrieved based on hashes.
func TestGetNodeData66(t *testing.T) { testGetNodeData(t, ETH66, false) }
func TestGetNodeData67(t *testing.T) { testGetNodeData(t, ETH67, true) }
func TestGetNodeData68(t *testing.T) { testGetNodeData(t, ETH68, true) }
func testGetNodeData(t *testing.T, protocol uint, drop bool) {
t.Parallel()
// Define three accounts to simulate transactions with
acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
signer := types.HomesteadSigner{}
// Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_makers_test)
generator := func(i int, block *core.BlockGen) {
switch i {
case 0:
// In block 1, the test bank sends account #1 some ether.
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey)
block.AddTx(tx)
case 1:
// In block 2, the test bank sends some more ether to account #1.
// acc1Addr passes it on to account #2.
tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey)
tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, acc1Key)
block.AddTx(tx1)
block.AddTx(tx2)
case 2:
// Block 3 is empty but was mined by account #2.
block.SetCoinbase(acc2Addr)
block.SetExtra([]byte("yeehaw"))
case 3:
// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
b2 := block.PrevBlock(1).Header()
b2.Extra = []byte("foo")
block.AddUncle(b2)
b3 := block.PrevBlock(2).Header()
b3.Extra = []byte("foo")
block.AddUncle(b3)
}
}
// Assemble the test environment
backend := newTestBackendWithGenerator(4, false, generator)
defer backend.close()
peer, _ := newTestPeer("peer", protocol, backend)
defer peer.close()
// Collect all state tree hashes.
var hashes []common.Hash
it := backend.db.NewIterator(nil, nil)
for it.Next() {
if key := it.Key(); len(key) == common.HashLength {
hashes = append(hashes, common.BytesToHash(key))
}
}
it.Release()
// Request all hashes.
p2p.Send(peer.app, GetNodeDataMsg, &GetNodeDataPacket66{
RequestId: 123,
GetNodeDataPacket: hashes,
})
msg, err := peer.app.ReadMsg()
if !drop {
if err != nil {
t.Fatalf("failed to read node data response: %v", err)
}
} else {
if err != nil {
return
}
t.Fatalf("succeeded to read node data response on non-supporting protocol: %v", msg)
}
if msg.Code != NodeDataMsg {
t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, NodeDataMsg)
}
var res NodeDataPacket66
if err := msg.Decode(&res); err != nil {
t.Fatalf("failed to decode response node data: %v", err)
}
// Verify that all hashes correspond to the requested data.
data := res.NodeDataPacket
for i, want := range hashes {
if hash := crypto.Keccak256Hash(data[i]); hash != want {
t.Errorf("data hash mismatch: have %x, want %x", hash, want)
}
}
// Reconstruct state tree from the received data.
reconstructDB := rawdb.NewMemoryDatabase()
for i := 0; i < len(data); i++ {
rawdb.WriteLegacyTrieNode(reconstructDB, hashes[i], data[i])
}
// Sanity check whether all state matches.
accounts := []common.Address{testAddr, acc1Addr, acc2Addr}
for i := uint64(0); i <= backend.chain.CurrentBlock().Number.Uint64(); i++ {
root := backend.chain.GetBlockByNumber(i).Root()
reconstructed, _ := state.New(root, state.NewDatabase(reconstructDB), nil)
for j, acc := range accounts {
state, _ := backend.chain.StateAt(root)
bw := state.GetBalance(acc)
bh := reconstructed.GetBalance(acc)
if (bw == nil) != (bh == nil) {
t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
}
if bw != nil && bh != nil && bw.Cmp(bh) != 0 {
t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
}
}
}
}
// Tests that the transaction receipts can be retrieved based on hashes. // Tests that the transaction receipts can be retrieved based on hashes.
func TestGetBlockReceipts66(t *testing.T) { testGetBlockReceipts(t, ETH66) }
func TestGetBlockReceipts67(t *testing.T) { testGetBlockReceipts(t, ETH67) } func TestGetBlockReceipts67(t *testing.T) { testGetBlockReceipts(t, ETH67) }
func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) } func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) }
@ -613,13 +491,13 @@ func testGetBlockReceipts(t *testing.T, protocol uint) {
receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash())) receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash()))
} }
// Send the hash request and verify the response // Send the hash request and verify the response
p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket66{ p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket{
RequestId: 123, RequestId: 123,
GetReceiptsPacket: hashes, GetReceiptsRequest: hashes,
}) })
if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket66{ if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket{
RequestId: 123, RequestId: 123,
ReceiptsPacket: receipts, ReceiptsResponse: receipts,
}); err != nil { }); err != nil {
t.Errorf("receipts mismatch: %v", err) t.Errorf("receipts mismatch: %v", err)
} }

View File

@ -22,27 +22,25 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
) )
// handleGetBlockHeaders66 is the eth/66 version of handleGetBlockHeaders func handleGetBlockHeaders(backend Backend, msg Decoder, peer *Peer) error {
func handleGetBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {
// Decode the complex header query // Decode the complex header query
var query GetBlockHeadersPacket66 var query GetBlockHeadersPacket
if err := msg.Decode(&query); err != nil { if err := msg.Decode(&query); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
} }
response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersPacket, peer) response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersRequest, peer)
return peer.ReplyBlockHeadersRLP(query.RequestId, response) return peer.ReplyBlockHeadersRLP(query.RequestId, response)
} }
// ServiceGetBlockHeadersQuery assembles the response to a header query. It is // ServiceGetBlockHeadersQuery assembles the response to a header query. It is
// exposed to allow external packages to test protocol behavior. // exposed to allow external packages to test protocol behavior.
func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue { func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue {
if query.Skip == 0 { if query.Skip == 0 {
// The fast path: when the request is for a contiguous segment of headers. // The fast path: when the request is for a contiguous segment of headers.
return serviceContiguousBlockHeaderQuery(chain, query) return serviceContiguousBlockHeaderQuery(chain, query)
@ -51,7 +49,7 @@ func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersP
} }
} }
func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue { func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue {
hashMode := query.Origin.Hash != (common.Hash{}) hashMode := query.Origin.Hash != (common.Hash{})
first := true first := true
maxNonCanonical := uint64(100) maxNonCanonical := uint64(100)
@ -140,7 +138,7 @@ func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBloc
return headers return headers
} }
func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket) []rlp.RawValue { func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest) []rlp.RawValue {
count := query.Amount count := query.Amount
if count > maxHeadersServe { if count > maxHeadersServe {
count = maxHeadersServe count = maxHeadersServe
@ -203,19 +201,19 @@ func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHe
} }
} }
func handleGetBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { func handleGetBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
// Decode the block body retrieval message // Decode the block body retrieval message
var query GetBlockBodiesPacket66 var query GetBlockBodiesPacket
if err := msg.Decode(&query); err != nil { if err := msg.Decode(&query); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
} }
response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesPacket) response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesRequest)
return peer.ReplyBlockBodiesRLP(query.RequestId, response) return peer.ReplyBlockBodiesRLP(query.RequestId, response)
} }
// ServiceGetBlockBodiesQuery assembles the response to a body query. It is // ServiceGetBlockBodiesQuery assembles the response to a body query. It is
// exposed to allow external packages to test protocol behavior. // exposed to allow external packages to test protocol behavior.
func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPacket) []rlp.RawValue { func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesRequest) []rlp.RawValue {
// Gather blocks until the fetch or network limits is reached // Gather blocks until the fetch or network limits is reached
var ( var (
bytes int bytes int
@ -234,60 +232,19 @@ func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPack
return bodies return bodies
} }
func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error { func handleGetReceipts(backend Backend, msg Decoder, peer *Peer) error {
// Decode the trie node data retrieval message
var query GetNodeDataPacket66
if err := msg.Decode(&query); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
response := ServiceGetNodeDataQuery(backend.Chain(), query.GetNodeDataPacket)
return peer.ReplyNodeData(query.RequestId, response)
}
// ServiceGetNodeDataQuery assembles the response to a node data query. It is
// exposed to allow external packages to test protocol behavior.
func ServiceGetNodeDataQuery(chain *core.BlockChain, query GetNodeDataPacket) [][]byte {
// Request nodes by hash is not supported in path-based scheme.
if chain.TrieDB().Scheme() == rawdb.PathScheme {
return nil
}
// Gather state data until the fetch or network limits is reached
var (
bytes int
nodes [][]byte
)
for lookups, hash := range query {
if bytes >= softResponseLimit || len(nodes) >= maxNodeDataServe ||
lookups >= 2*maxNodeDataServe {
break
}
// Retrieve the requested state entry
entry, err := chain.TrieDB().Node(hash)
if len(entry) == 0 || err != nil {
// Read the contract code with prefix only to save unnecessary lookups.
entry, err = chain.ContractCodeWithPrefix(hash)
}
if err == nil && len(entry) > 0 {
nodes = append(nodes, entry)
bytes += len(entry)
}
}
return nodes
}
func handleGetReceipts66(backend Backend, msg Decoder, peer *Peer) error {
// Decode the block receipts retrieval message // Decode the block receipts retrieval message
var query GetReceiptsPacket66 var query GetReceiptsPacket
if err := msg.Decode(&query); err != nil { if err := msg.Decode(&query); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
} }
response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsPacket) response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsRequest)
return peer.ReplyReceiptsRLP(query.RequestId, response) return peer.ReplyReceiptsRLP(query.RequestId, response)
} }
// ServiceGetReceiptsQuery assembles the response to a receipt query. It is // ServiceGetReceiptsQuery assembles the response to a receipt query. It is
// exposed to allow external packages to test protocol behavior. // exposed to allow external packages to test protocol behavior.
func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsPacket) []rlp.RawValue { func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsRequest) []rlp.RawValue {
// Gather state data until the fetch or network limits is reached // Gather state data until the fetch or network limits is reached
var ( var (
bytes int bytes int
@ -356,15 +313,15 @@ func handleNewBlock(backend Backend, msg Decoder, peer *Peer) error {
return backend.Handle(peer, ann) return backend.Handle(peer, ann)
} }
func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { func handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error {
// A batch of headers arrived to one of our previous requests // A batch of headers arrived to one of our previous requests
res := new(BlockHeadersPacket66) res := new(BlockHeadersPacket)
if err := msg.Decode(res); err != nil { if err := msg.Decode(res); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
} }
metadata := func() interface{} { metadata := func() interface{} {
hashes := make([]common.Hash, len(res.BlockHeadersPacket)) hashes := make([]common.Hash, len(res.BlockHeadersRequest))
for i, header := range res.BlockHeadersPacket { for i, header := range res.BlockHeadersRequest {
hashes[i] = header.Hash() hashes[i] = header.Hash()
} }
return hashes return hashes
@ -372,24 +329,24 @@ func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {
return peer.dispatchResponse(&Response{ return peer.dispatchResponse(&Response{
id: res.RequestId, id: res.RequestId,
code: BlockHeadersMsg, code: BlockHeadersMsg,
Res: &res.BlockHeadersPacket, Res: &res.BlockHeadersRequest,
}, metadata) }, metadata)
} }
func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
// A batch of block bodies arrived to one of our previous requests // A batch of block bodies arrived to one of our previous requests
res := new(BlockBodiesPacket66) res := new(BlockBodiesPacket)
if err := msg.Decode(res); err != nil { if err := msg.Decode(res); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
} }
metadata := func() interface{} { metadata := func() interface{} {
var ( var (
txsHashes = make([]common.Hash, len(res.BlockBodiesPacket)) txsHashes = make([]common.Hash, len(res.BlockBodiesResponse))
uncleHashes = make([]common.Hash, len(res.BlockBodiesPacket)) uncleHashes = make([]common.Hash, len(res.BlockBodiesResponse))
withdrawalHashes = make([]common.Hash, len(res.BlockBodiesPacket)) withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse))
) )
hasher := trie.NewStackTrie(nil) hasher := trie.NewStackTrie(nil)
for i, body := range res.BlockBodiesPacket { for i, body := range res.BlockBodiesResponse {
txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher)
uncleHashes[i] = types.CalcUncleHash(body.Uncles) uncleHashes[i] = types.CalcUncleHash(body.Uncles)
if body.Withdrawals != nil { if body.Withdrawals != nil {
@ -401,33 +358,20 @@ func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
return peer.dispatchResponse(&Response{ return peer.dispatchResponse(&Response{
id: res.RequestId, id: res.RequestId,
code: BlockBodiesMsg, code: BlockBodiesMsg,
Res: &res.BlockBodiesPacket, Res: &res.BlockBodiesResponse,
}, metadata) }, metadata)
} }
func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error { func handleReceipts(backend Backend, msg Decoder, peer *Peer) error {
// A batch of node state data arrived to one of our previous requests
res := new(NodeDataPacket66)
if err := msg.Decode(res); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
return peer.dispatchResponse(&Response{
id: res.RequestId,
code: NodeDataMsg,
Res: &res.NodeDataPacket,
}, nil) // No post-processing, we're not using this packet anymore
}
func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error {
// A batch of receipts arrived to one of our previous requests // A batch of receipts arrived to one of our previous requests
res := new(ReceiptsPacket66) res := new(ReceiptsPacket)
if err := msg.Decode(res); err != nil { if err := msg.Decode(res); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
} }
metadata := func() interface{} { metadata := func() interface{} {
hasher := trie.NewStackTrie(nil) hasher := trie.NewStackTrie(nil)
hashes := make([]common.Hash, len(res.ReceiptsPacket)) hashes := make([]common.Hash, len(res.ReceiptsResponse))
for i, receipt := range res.ReceiptsPacket { for i, receipt := range res.ReceiptsResponse {
hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher)
} }
return hashes return hashes
@ -435,17 +379,17 @@ func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error {
return peer.dispatchResponse(&Response{ return peer.dispatchResponse(&Response{
id: res.RequestId, id: res.RequestId,
code: ReceiptsMsg, code: ReceiptsMsg,
Res: &res.ReceiptsPacket, Res: &res.ReceiptsResponse,
}, metadata) }, metadata)
} }
func handleNewPooledTransactionHashes66(backend Backend, msg Decoder, peer *Peer) error { func handleNewPooledTransactionHashes67(backend Backend, msg Decoder, peer *Peer) error {
// New transaction announcement arrived, make sure we have // New transaction announcement arrived, make sure we have
// a valid and fresh chain to handle them // a valid and fresh chain to handle them
if !backend.AcceptTxs() { if !backend.AcceptTxs() {
return nil return nil
} }
ann := new(NewPooledTransactionHashesPacket66) ann := new(NewPooledTransactionHashesPacket67)
if err := msg.Decode(ann); err != nil { if err := msg.Decode(ann); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
} }
@ -476,17 +420,17 @@ func handleNewPooledTransactionHashes68(backend Backend, msg Decoder, peer *Peer
return backend.Handle(peer, ann) return backend.Handle(peer, ann)
} }
func handleGetPooledTransactions66(backend Backend, msg Decoder, peer *Peer) error { func handleGetPooledTransactions(backend Backend, msg Decoder, peer *Peer) error {
// Decode the pooled transactions retrieval message // Decode the pooled transactions retrieval message
var query GetPooledTransactionsPacket66 var query GetPooledTransactionsPacket
if err := msg.Decode(&query); err != nil { if err := msg.Decode(&query); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
} }
hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsPacket, peer) hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsRequest)
return peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs) return peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs)
} }
func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsPacket, peer *Peer) ([]common.Hash, []rlp.RawValue) { func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsRequest) ([]common.Hash, []rlp.RawValue) {
// Gather transactions until the fetch or network limits is reached // Gather transactions until the fetch or network limits is reached
var ( var (
bytes int bytes int
@ -534,17 +478,17 @@ func handleTransactions(backend Backend, msg Decoder, peer *Peer) error {
return backend.Handle(peer, &txs) return backend.Handle(peer, &txs)
} }
func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error { func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error {
// Transactions arrived, make sure we have a valid and fresh chain to handle them // Transactions arrived, make sure we have a valid and fresh chain to handle them
if !backend.AcceptTxs() { if !backend.AcceptTxs() {
return nil return nil
} }
// Transactions can be processed, parse all of them and deliver to the pool // Transactions can be processed, parse all of them and deliver to the pool
var txs PooledTransactionsPacket66 var txs PooledTransactionsPacket
if err := msg.Decode(&txs); err != nil { if err := msg.Decode(&txs); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
} }
for i, tx := range txs.PooledTransactionsPacket { for i, tx := range txs.PooledTransactionsResponse {
// Validate and mark the remote transaction // Validate and mark the remote transaction
if tx == nil { if tx == nil {
return fmt.Errorf("%w: transaction %d is nil", errDecode, i) return fmt.Errorf("%w: transaction %d is nil", errDecode, i)
@ -553,5 +497,5 @@ func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error
} }
requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId) requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId)
return backend.Handle(peer, &txs.PooledTransactionsPacket) return backend.Handle(peer, &txs.PooledTransactionsResponse)
} }

View File

@ -27,7 +27,8 @@ import (
) )
// Tests that handshake failures are detected and reported correctly. // Tests that handshake failures are detected and reported correctly.
func TestHandshake66(t *testing.T) { testHandshake(t, ETH66) } func TestHandshake67(t *testing.T) { testHandshake(t, ETH67) }
func TestHandshake68(t *testing.T) { testHandshake(t, ETH68) }
func testHandshake(t *testing.T, protocol uint) { func testHandshake(t *testing.T, protocol uint) {
t.Parallel() t.Parallel()

View File

@ -219,7 +219,7 @@ func (p *Peer) AsyncSendTransactions(hashes []common.Hash) {
func (p *Peer) sendPooledTransactionHashes66(hashes []common.Hash) error { func (p *Peer) sendPooledTransactionHashes66(hashes []common.Hash) error {
// Mark all the transactions as known, but ensure we don't overflow our limits // Mark all the transactions as known, but ensure we don't overflow our limits
p.knownTxs.Add(hashes...) p.knownTxs.Add(hashes...)
return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket66(hashes)) return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket67(hashes))
} }
// sendPooledTransactionHashes68 sends transaction hashes (tagged with their type // sendPooledTransactionHashes68 sends transaction hashes (tagged with their type
@ -248,15 +248,15 @@ func (p *Peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) {
} }
} }
// ReplyPooledTransactionsRLP is the eth/66 version of SendPooledTransactionsRLP. // ReplyPooledTransactionsRLP is the response to RequestTxs.
func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs []rlp.RawValue) error { func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs []rlp.RawValue) error {
// Mark all the transactions as known, but ensure we don't overflow our limits // Mark all the transactions as known, but ensure we don't overflow our limits
p.knownTxs.Add(hashes...) p.knownTxs.Add(hashes...)
// Not packed into PooledTransactionsPacket to avoid RLP decoding // Not packed into PooledTransactionsResponse to avoid RLP decoding
return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket66{ return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket{
RequestId: id, RequestId: id,
PooledTransactionsRLPPacket: txs, PooledTransactionsRLPResponse: txs,
}) })
} }
@ -309,36 +309,28 @@ func (p *Peer) AsyncSendNewBlock(block *types.Block, td *big.Int) {
} }
} }
// ReplyBlockHeadersRLP is the eth/66 response to GetBlockHeaders. // ReplyBlockHeadersRLP is the response to GetBlockHeaders.
func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error { func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error {
return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket66{ return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket{
RequestId: id, RequestId: id,
BlockHeadersRLPPacket: headers, BlockHeadersRLPResponse: headers,
}) })
} }
// ReplyBlockBodiesRLP is the eth/66 response to GetBlockBodies. // ReplyBlockBodiesRLP is the response to GetBlockBodies.
func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error { func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error {
// Not packed into BlockBodiesPacket to avoid RLP decoding // Not packed into BlockBodiesResponse to avoid RLP decoding
return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket66{ return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket{
RequestId: id, RequestId: id,
BlockBodiesRLPPacket: bodies, BlockBodiesRLPResponse: bodies,
}) })
} }
// ReplyNodeData is the eth/66 response to GetNodeData. // ReplyReceiptsRLP is the response to GetReceipts.
func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error {
return p2p.Send(p.rw, NodeDataMsg, &NodeDataPacket66{
RequestId: id,
NodeDataPacket: data,
})
}
// ReplyReceiptsRLP is the eth/66 response to GetReceipts.
func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error { func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error {
return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket66{ return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket{
RequestId: id, RequestId: id,
ReceiptsRLPPacket: receipts, ReceiptsRLPResponse: receipts,
}) })
} }
@ -353,9 +345,9 @@ func (p *Peer) RequestOneHeader(hash common.Hash, sink chan *Response) (*Request
sink: sink, sink: sink,
code: GetBlockHeadersMsg, code: GetBlockHeadersMsg,
want: BlockHeadersMsg, want: BlockHeadersMsg,
data: &GetBlockHeadersPacket66{ data: &GetBlockHeadersPacket{
RequestId: id, RequestId: id,
GetBlockHeadersPacket: &GetBlockHeadersPacket{ GetBlockHeadersRequest: &GetBlockHeadersRequest{
Origin: HashOrNumber{Hash: hash}, Origin: HashOrNumber{Hash: hash},
Amount: uint64(1), Amount: uint64(1),
Skip: uint64(0), Skip: uint64(0),
@ -380,9 +372,9 @@ func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, re
sink: sink, sink: sink,
code: GetBlockHeadersMsg, code: GetBlockHeadersMsg,
want: BlockHeadersMsg, want: BlockHeadersMsg,
data: &GetBlockHeadersPacket66{ data: &GetBlockHeadersPacket{
RequestId: id, RequestId: id,
GetBlockHeadersPacket: &GetBlockHeadersPacket{ GetBlockHeadersRequest: &GetBlockHeadersRequest{
Origin: HashOrNumber{Hash: origin}, Origin: HashOrNumber{Hash: origin},
Amount: uint64(amount), Amount: uint64(amount),
Skip: uint64(skip), Skip: uint64(skip),
@ -407,9 +399,9 @@ func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, rever
sink: sink, sink: sink,
code: GetBlockHeadersMsg, code: GetBlockHeadersMsg,
want: BlockHeadersMsg, want: BlockHeadersMsg,
data: &GetBlockHeadersPacket66{ data: &GetBlockHeadersPacket{
RequestId: id, RequestId: id,
GetBlockHeadersPacket: &GetBlockHeadersPacket{ GetBlockHeadersRequest: &GetBlockHeadersRequest{
Origin: HashOrNumber{Number: origin}, Origin: HashOrNumber{Number: origin},
Amount: uint64(amount), Amount: uint64(amount),
Skip: uint64(skip), Skip: uint64(skip),
@ -434,31 +426,9 @@ func (p *Peer) RequestBodies(hashes []common.Hash, sink chan *Response) (*Reques
sink: sink, sink: sink,
code: GetBlockBodiesMsg, code: GetBlockBodiesMsg,
want: BlockBodiesMsg, want: BlockBodiesMsg,
data: &GetBlockBodiesPacket66{ data: &GetBlockBodiesPacket{
RequestId: id, RequestId: id,
GetBlockBodiesPacket: hashes, GetBlockBodiesRequest: hashes,
},
}
if err := p.dispatchRequest(req); err != nil {
return nil, err
}
return req, nil
}
// RequestNodeData fetches a batch of arbitrary data from a node's known state
// data, corresponding to the specified hashes.
func (p *Peer) RequestNodeData(hashes []common.Hash, sink chan *Response) (*Request, error) {
p.Log().Debug("Fetching batch of state data", "count", len(hashes))
id := rand.Uint64()
req := &Request{
id: id,
sink: sink,
code: GetNodeDataMsg,
want: NodeDataMsg,
data: &GetNodeDataPacket66{
RequestId: id,
GetNodeDataPacket: hashes,
}, },
} }
if err := p.dispatchRequest(req); err != nil { if err := p.dispatchRequest(req); err != nil {
@ -477,9 +447,9 @@ func (p *Peer) RequestReceipts(hashes []common.Hash, sink chan *Response) (*Requ
sink: sink, sink: sink,
code: GetReceiptsMsg, code: GetReceiptsMsg,
want: ReceiptsMsg, want: ReceiptsMsg,
data: &GetReceiptsPacket66{ data: &GetReceiptsPacket{
RequestId: id, RequestId: id,
GetReceiptsPacket: hashes, GetReceiptsRequest: hashes,
}, },
} }
if err := p.dispatchRequest(req); err != nil { if err := p.dispatchRequest(req); err != nil {
@ -494,9 +464,9 @@ func (p *Peer) RequestTxs(hashes []common.Hash) error {
id := rand.Uint64() id := rand.Uint64()
requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id) requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id)
return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket66{ return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket{
RequestId: id, RequestId: id,
GetPooledTransactionsPacket: hashes, GetPooledTransactionsRequest: hashes,
}) })
} }

View File

@ -30,7 +30,6 @@ import (
// Constants to match up protocol versions and messages // Constants to match up protocol versions and messages
const ( const (
ETH66 = 66
ETH67 = 67 ETH67 = 67
ETH68 = 68 ETH68 = 68
) )
@ -41,11 +40,11 @@ const ProtocolName = "eth"
// ProtocolVersions are the supported versions of the `eth` protocol (first // ProtocolVersions are the supported versions of the `eth` protocol (first
// is primary). // is primary).
var ProtocolVersions = []uint{ETH68, ETH67, ETH66} var ProtocolVersions = []uint{ETH68, ETH67}
// protocolLengths are the number of implemented message corresponding to // protocolLengths are the number of implemented message corresponding to
// different protocol versions. // different protocol versions.
var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17, ETH66: 17} var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17}
// maxMessageSize is the maximum cap on the size of a protocol message. // maxMessageSize is the maximum cap on the size of a protocol message.
const maxMessageSize = 10 * 1024 * 1024 const maxMessageSize = 10 * 1024 * 1024
@ -62,8 +61,6 @@ const (
NewPooledTransactionHashesMsg = 0x08 NewPooledTransactionHashesMsg = 0x08
GetPooledTransactionsMsg = 0x09 GetPooledTransactionsMsg = 0x09
PooledTransactionsMsg = 0x0a PooledTransactionsMsg = 0x0a
GetNodeDataMsg = 0x0d
NodeDataMsg = 0x0e
GetReceiptsMsg = 0x0f GetReceiptsMsg = 0x0f
ReceiptsMsg = 0x10 ReceiptsMsg = 0x10
) )
@ -85,7 +82,7 @@ type Packet interface {
Kind() byte // Kind returns the message type. Kind() byte // Kind returns the message type.
} }
// StatusPacket is the network packet for the status message for eth/64 and later. // StatusPacket is the network packet for the status message.
type StatusPacket struct { type StatusPacket struct {
ProtocolVersion uint32 ProtocolVersion uint32
NetworkID uint64 NetworkID uint64
@ -118,18 +115,18 @@ func (p *NewBlockHashesPacket) Unpack() ([]common.Hash, []uint64) {
// TransactionsPacket is the network packet for broadcasting new transactions. // TransactionsPacket is the network packet for broadcasting new transactions.
type TransactionsPacket []*types.Transaction type TransactionsPacket []*types.Transaction
// GetBlockHeadersPacket represents a block header query. // GetBlockHeadersRequest represents a block header query.
type GetBlockHeadersPacket struct { type GetBlockHeadersRequest struct {
Origin HashOrNumber // Block from which to retrieve headers Origin HashOrNumber // Block from which to retrieve headers
Amount uint64 // Maximum number of headers to retrieve Amount uint64 // Maximum number of headers to retrieve
Skip uint64 // Blocks to skip between consecutive headers Skip uint64 // Blocks to skip between consecutive headers
Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis) Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis)
} }
// GetBlockHeadersPacket66 represents a block header query over eth/66 // GetBlockHeadersPacket represents a block header query with request ID wrapping.
type GetBlockHeadersPacket66 struct { type GetBlockHeadersPacket struct {
RequestId uint64 RequestId uint64
*GetBlockHeadersPacket *GetBlockHeadersRequest
} }
// HashOrNumber is a combined field for specifying an origin block. // HashOrNumber is a combined field for specifying an origin block.
@ -168,23 +165,23 @@ func (hn *HashOrNumber) DecodeRLP(s *rlp.Stream) error {
} }
} }
// BlockHeadersPacket represents a block header response. // BlockHeadersRequest represents a block header response.
type BlockHeadersPacket []*types.Header type BlockHeadersRequest []*types.Header
// BlockHeadersPacket66 represents a block header response over eth/66. // BlockHeadersPacket represents a block header response over with request ID wrapping.
type BlockHeadersPacket66 struct { type BlockHeadersPacket struct {
RequestId uint64 RequestId uint64
BlockHeadersPacket BlockHeadersRequest
} }
// BlockHeadersRLPPacket represents a block header response, to use when we already // BlockHeadersRLPResponse represents a block header response, to use when we already
// have the headers rlp encoded. // have the headers rlp encoded.
type BlockHeadersRLPPacket []rlp.RawValue type BlockHeadersRLPResponse []rlp.RawValue
// BlockHeadersRLPPacket66 represents a block header response over eth/66. // BlockHeadersRLPPacket represents a block header response with request ID wrapping.
type BlockHeadersRLPPacket66 struct { type BlockHeadersRLPPacket struct {
RequestId uint64 RequestId uint64
BlockHeadersRLPPacket BlockHeadersRLPResponse
} }
// NewBlockPacket is the network packet for the block propagation message. // NewBlockPacket is the network packet for the block propagation message.
@ -206,33 +203,34 @@ func (request *NewBlockPacket) sanityCheck() error {
return nil return nil
} }
// GetBlockBodiesPacket represents a block body query. // GetBlockBodiesRequest represents a block body query.
type GetBlockBodiesPacket []common.Hash type GetBlockBodiesRequest []common.Hash
// GetBlockBodiesPacket66 represents a block body query over eth/66. // GetBlockBodiesPacket represents a block body query with request ID wrapping.
type GetBlockBodiesPacket66 struct { type GetBlockBodiesPacket struct {
RequestId uint64 RequestId uint64
GetBlockBodiesPacket GetBlockBodiesRequest
} }
// BlockBodiesPacket is the network packet for block content distribution. // BlockBodiesResponse is the network packet for block content distribution.
type BlockBodiesPacket []*BlockBody type BlockBodiesResponse []*BlockBody
// BlockBodiesPacket66 is the network packet for block content distribution over eth/66. // BlockBodiesPacket is the network packet for block content distribution with
type BlockBodiesPacket66 struct { // request ID wrapping.
type BlockBodiesPacket struct {
RequestId uint64 RequestId uint64
BlockBodiesPacket BlockBodiesResponse
} }
// BlockBodiesRLPPacket is used for replying to block body requests, in cases // BlockBodiesRLPResponse is used for replying to block body requests, in cases
// where we already have them RLP-encoded, and thus can avoid the decode-encode // where we already have them RLP-encoded, and thus can avoid the decode-encode
// roundtrip. // roundtrip.
type BlockBodiesRLPPacket []rlp.RawValue type BlockBodiesRLPResponse []rlp.RawValue
// BlockBodiesRLPPacket66 is the BlockBodiesRLPPacket over eth/66 // BlockBodiesRLPPacket is the BlockBodiesRLPResponse with request ID wrapping.
type BlockBodiesRLPPacket66 struct { type BlockBodiesRLPPacket struct {
RequestId uint64 RequestId uint64
BlockBodiesRLPPacket BlockBodiesRLPResponse
} }
// BlockBody represents the data content of a single block. // BlockBody represents the data content of a single block.
@ -244,7 +242,7 @@ type BlockBody struct {
// Unpack retrieves the transactions and uncles from the range packet and returns // Unpack retrieves the transactions and uncles from the range packet and returns
// them in a split flat format that's more consistent with the internal data structures. // them in a split flat format that's more consistent with the internal data structures.
func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) { func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) {
// TODO(matt): add support for withdrawals to fetchers // TODO(matt): add support for withdrawals to fetchers
var ( var (
txset = make([][]*types.Transaction, len(*p)) txset = make([][]*types.Transaction, len(*p))
@ -257,53 +255,36 @@ func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header,
return txset, uncleset, withdrawalset return txset, uncleset, withdrawalset
} }
// GetNodeDataPacket represents a trie node data query. // GetReceiptsRequest represents a block receipts query.
type GetNodeDataPacket []common.Hash type GetReceiptsRequest []common.Hash
// GetNodeDataPacket66 represents a trie node data query over eth/66. // GetReceiptsPacket represents a block receipts query with request ID wrapping.
type GetNodeDataPacket66 struct { type GetReceiptsPacket struct {
RequestId uint64 RequestId uint64
GetNodeDataPacket GetReceiptsRequest
} }
// NodeDataPacket is the network packet for trie node data distribution. // ReceiptsResponse is the network packet for block receipts distribution.
type NodeDataPacket [][]byte type ReceiptsResponse [][]*types.Receipt
// NodeDataPacket66 is the network packet for trie node data distribution over eth/66. // ReceiptsPacket is the network packet for block receipts distribution with
type NodeDataPacket66 struct { // request ID wrapping.
type ReceiptsPacket struct {
RequestId uint64 RequestId uint64
NodeDataPacket ReceiptsResponse
} }
// GetReceiptsPacket represents a block receipts query. // ReceiptsRLPResponse is used for receipts, when we already have it encoded
type GetReceiptsPacket []common.Hash type ReceiptsRLPResponse []rlp.RawValue
// GetReceiptsPacket66 represents a block receipts query over eth/66. // ReceiptsRLPPacket is ReceiptsRLPResponse with request ID wrapping.
type GetReceiptsPacket66 struct { type ReceiptsRLPPacket struct {
RequestId uint64 RequestId uint64
GetReceiptsPacket ReceiptsRLPResponse
} }
// ReceiptsPacket is the network packet for block receipts distribution. // NewPooledTransactionHashesPacket67 represents a transaction announcement packet on eth/67.
type ReceiptsPacket [][]*types.Receipt type NewPooledTransactionHashesPacket67 []common.Hash
// ReceiptsPacket66 is the network packet for block receipts distribution over eth/66.
type ReceiptsPacket66 struct {
RequestId uint64
ReceiptsPacket
}
// ReceiptsRLPPacket is used for receipts, when we already have it encoded
type ReceiptsRLPPacket []rlp.RawValue
// ReceiptsRLPPacket66 is the eth-66 version of ReceiptsRLPPacket
type ReceiptsRLPPacket66 struct {
RequestId uint64
ReceiptsRLPPacket
}
// NewPooledTransactionHashesPacket66 represents a transaction announcement packet on eth/66 and eth/67.
type NewPooledTransactionHashesPacket66 []common.Hash
// NewPooledTransactionHashesPacket68 represents a transaction announcement packet on eth/68 and newer. // NewPooledTransactionHashesPacket68 represents a transaction announcement packet on eth/68 and newer.
type NewPooledTransactionHashesPacket68 struct { type NewPooledTransactionHashesPacket68 struct {
@ -312,31 +293,33 @@ type NewPooledTransactionHashesPacket68 struct {
Hashes []common.Hash Hashes []common.Hash
} }
// GetPooledTransactionsPacket represents a transaction query. // GetPooledTransactionsRequest represents a transaction query.
type GetPooledTransactionsPacket []common.Hash type GetPooledTransactionsRequest []common.Hash
type GetPooledTransactionsPacket66 struct { // GetPooledTransactionsPacket represents a transaction query with request ID wrapping.
type GetPooledTransactionsPacket struct {
RequestId uint64 RequestId uint64
GetPooledTransactionsPacket GetPooledTransactionsRequest
} }
// PooledTransactionsPacket is the network packet for transaction distribution. // PooledTransactionsResponse is the network packet for transaction distribution.
type PooledTransactionsPacket []*types.Transaction type PooledTransactionsResponse []*types.Transaction
// PooledTransactionsPacket66 is the network packet for transaction distribution over eth/66. // PooledTransactionsPacket is the network packet for transaction distribution
type PooledTransactionsPacket66 struct { // with request ID wrapping.
type PooledTransactionsPacket struct {
RequestId uint64 RequestId uint64
PooledTransactionsPacket PooledTransactionsResponse
} }
// PooledTransactionsRLPPacket is the network packet for transaction distribution, used // PooledTransactionsRLPResponse is the network packet for transaction distribution, used
// in the cases we already have them in rlp-encoded form // in the cases we already have them in rlp-encoded form
type PooledTransactionsRLPPacket []rlp.RawValue type PooledTransactionsRLPResponse []rlp.RawValue
// PooledTransactionsRLPPacket66 is the eth/66 form of PooledTransactionsRLPPacket // PooledTransactionsRLPPacket is PooledTransactionsRLPResponse with request ID wrapping.
type PooledTransactionsRLPPacket66 struct { type PooledTransactionsRLPPacket struct {
RequestId uint64 RequestId uint64
PooledTransactionsRLPPacket PooledTransactionsRLPResponse
} }
func (*StatusPacket) Name() string { return "Status" } func (*StatusPacket) Name() string { return "Status" }
@ -348,40 +331,34 @@ func (*NewBlockHashesPacket) Kind() byte { return NewBlockHashesMsg }
func (*TransactionsPacket) Name() string { return "Transactions" } func (*TransactionsPacket) Name() string { return "Transactions" }
func (*TransactionsPacket) Kind() byte { return TransactionsMsg } func (*TransactionsPacket) Kind() byte { return TransactionsMsg }
func (*GetBlockHeadersPacket) Name() string { return "GetBlockHeaders" } func (*GetBlockHeadersRequest) Name() string { return "GetBlockHeaders" }
func (*GetBlockHeadersPacket) Kind() byte { return GetBlockHeadersMsg } func (*GetBlockHeadersRequest) Kind() byte { return GetBlockHeadersMsg }
func (*BlockHeadersPacket) Name() string { return "BlockHeaders" } func (*BlockHeadersRequest) Name() string { return "BlockHeaders" }
func (*BlockHeadersPacket) Kind() byte { return BlockHeadersMsg } func (*BlockHeadersRequest) Kind() byte { return BlockHeadersMsg }
func (*GetBlockBodiesPacket) Name() string { return "GetBlockBodies" } func (*GetBlockBodiesRequest) Name() string { return "GetBlockBodies" }
func (*GetBlockBodiesPacket) Kind() byte { return GetBlockBodiesMsg } func (*GetBlockBodiesRequest) Kind() byte { return GetBlockBodiesMsg }
func (*BlockBodiesPacket) Name() string { return "BlockBodies" } func (*BlockBodiesResponse) Name() string { return "BlockBodies" }
func (*BlockBodiesPacket) Kind() byte { return BlockBodiesMsg } func (*BlockBodiesResponse) Kind() byte { return BlockBodiesMsg }
func (*NewBlockPacket) Name() string { return "NewBlock" } func (*NewBlockPacket) Name() string { return "NewBlock" }
func (*NewBlockPacket) Kind() byte { return NewBlockMsg } func (*NewBlockPacket) Kind() byte { return NewBlockMsg }
func (*NewPooledTransactionHashesPacket66) Name() string { return "NewPooledTransactionHashes" } func (*NewPooledTransactionHashesPacket67) Name() string { return "NewPooledTransactionHashes" }
func (*NewPooledTransactionHashesPacket66) Kind() byte { return NewPooledTransactionHashesMsg } func (*NewPooledTransactionHashesPacket67) Kind() byte { return NewPooledTransactionHashesMsg }
func (*NewPooledTransactionHashesPacket68) Name() string { return "NewPooledTransactionHashes" } func (*NewPooledTransactionHashesPacket68) Name() string { return "NewPooledTransactionHashes" }
func (*NewPooledTransactionHashesPacket68) Kind() byte { return NewPooledTransactionHashesMsg } func (*NewPooledTransactionHashesPacket68) Kind() byte { return NewPooledTransactionHashesMsg }
func (*GetPooledTransactionsPacket) Name() string { return "GetPooledTransactions" } func (*GetPooledTransactionsRequest) Name() string { return "GetPooledTransactions" }
func (*GetPooledTransactionsPacket) Kind() byte { return GetPooledTransactionsMsg } func (*GetPooledTransactionsRequest) Kind() byte { return GetPooledTransactionsMsg }
func (*PooledTransactionsPacket) Name() string { return "PooledTransactions" } func (*PooledTransactionsResponse) Name() string { return "PooledTransactions" }
func (*PooledTransactionsPacket) Kind() byte { return PooledTransactionsMsg } func (*PooledTransactionsResponse) Kind() byte { return PooledTransactionsMsg }
func (*GetNodeDataPacket) Name() string { return "GetNodeData" } func (*GetReceiptsRequest) Name() string { return "GetReceipts" }
func (*GetNodeDataPacket) Kind() byte { return GetNodeDataMsg } func (*GetReceiptsRequest) Kind() byte { return GetReceiptsMsg }
func (*NodeDataPacket) Name() string { return "NodeData" } func (*ReceiptsResponse) Name() string { return "Receipts" }
func (*NodeDataPacket) Kind() byte { return NodeDataMsg } func (*ReceiptsResponse) Kind() byte { return ReceiptsMsg }
func (*GetReceiptsPacket) Name() string { return "GetReceipts" }
func (*GetReceiptsPacket) Kind() byte { return GetReceiptsMsg }
func (*ReceiptsPacket) Name() string { return "Receipts" }
func (*ReceiptsPacket) Kind() byte { return ReceiptsMsg }

View File

@ -35,19 +35,19 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
} }
// Assemble some table driven tests // Assemble some table driven tests
tests := []struct { tests := []struct {
packet *GetBlockHeadersPacket packet *GetBlockHeadersRequest
fail bool fail bool
}{ }{
// Providing the origin as either a hash or a number should both work // Providing the origin as either a hash or a number should both work
{fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}}}, {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}}},
{fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}}}, {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}}},
// Providing arbitrary query field should also work // Providing arbitrary query field should also work
{fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}}, {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}},
{fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}}, {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}},
// Providing both the origin hash and origin number must fail // Providing both the origin hash and origin number must fail
{fail: true, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash, Number: 314}}}, {fail: true, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash, Number: 314}}},
} }
// Iterate over each of the tests and try to encode and then decode // Iterate over each of the tests and try to encode and then decode
for i, tt := range tests { for i, tt := range tests {
@ -58,7 +58,7 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
t.Fatalf("test %d: encode should have failed", i) t.Fatalf("test %d: encode should have failed", i)
} }
if !tt.fail { if !tt.fail {
packet := new(GetBlockHeadersPacket) packet := new(GetBlockHeadersRequest)
if err := rlp.DecodeBytes(bytes, packet); err != nil { if err := rlp.DecodeBytes(bytes, packet); err != nil {
t.Fatalf("test %d: failed to decode packet: %v", i, err) t.Fatalf("test %d: failed to decode packet: %v", i, err)
} }
@ -70,46 +70,40 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
} }
} }
// TestEth66EmptyMessages tests encoding of empty eth66 messages // TestEmptyMessages tests encoding of empty messages.
func TestEth66EmptyMessages(t *testing.T) { func TestEmptyMessages(t *testing.T) {
// All empty messages encodes to the same format // All empty messages encodes to the same format
want := common.FromHex("c4820457c0") want := common.FromHex("c4820457c0")
for i, msg := range []interface{}{ for i, msg := range []interface{}{
// Headers // Headers
GetBlockHeadersPacket66{1111, nil}, GetBlockHeadersPacket{1111, nil},
BlockHeadersPacket66{1111, nil}, BlockHeadersPacket{1111, nil},
// Bodies // Bodies
GetBlockBodiesPacket66{1111, nil}, GetBlockBodiesPacket{1111, nil},
BlockBodiesPacket66{1111, nil}, BlockBodiesPacket{1111, nil},
BlockBodiesRLPPacket66{1111, nil}, BlockBodiesRLPPacket{1111, nil},
// Node data
GetNodeDataPacket66{1111, nil},
NodeDataPacket66{1111, nil},
// Receipts // Receipts
GetReceiptsPacket66{1111, nil}, GetReceiptsPacket{1111, nil},
ReceiptsPacket66{1111, nil}, ReceiptsPacket{1111, nil},
// Transactions // Transactions
GetPooledTransactionsPacket66{1111, nil}, GetPooledTransactionsPacket{1111, nil},
PooledTransactionsPacket66{1111, nil}, PooledTransactionsPacket{1111, nil},
PooledTransactionsRLPPacket66{1111, nil}, PooledTransactionsRLPPacket{1111, nil},
// Headers // Headers
BlockHeadersPacket66{1111, BlockHeadersPacket([]*types.Header{})}, BlockHeadersPacket{1111, BlockHeadersRequest([]*types.Header{})},
// Bodies // Bodies
GetBlockBodiesPacket66{1111, GetBlockBodiesPacket([]common.Hash{})}, GetBlockBodiesPacket{1111, GetBlockBodiesRequest([]common.Hash{})},
BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{})}, BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{})},
BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{})}, BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{})},
// Node data
GetNodeDataPacket66{1111, GetNodeDataPacket([]common.Hash{})},
NodeDataPacket66{1111, NodeDataPacket([][]byte{})},
// Receipts // Receipts
GetReceiptsPacket66{1111, GetReceiptsPacket([]common.Hash{})}, GetReceiptsPacket{1111, GetReceiptsRequest([]common.Hash{})},
ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{})}, ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{})},
// Transactions // Transactions
GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket([]common.Hash{})}, GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest([]common.Hash{})},
PooledTransactionsPacket66{1111, PooledTransactionsPacket([]*types.Transaction{})}, PooledTransactionsPacket{1111, PooledTransactionsResponse([]*types.Transaction{})},
PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket([]rlp.RawValue{})}, PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse([]rlp.RawValue{})},
} { } {
if have, _ := rlp.EncodeToBytes(msg); !bytes.Equal(have, want) { if have, _ := rlp.EncodeToBytes(msg); !bytes.Equal(have, want) {
t.Errorf("test %d, type %T, have\n\t%x\nwant\n\t%x", i, msg, have, want) t.Errorf("test %d, type %T, have\n\t%x\nwant\n\t%x", i, msg, have, want)
@ -117,8 +111,8 @@ func TestEth66EmptyMessages(t *testing.T) {
} }
} }
// TestEth66Messages tests the encoding of all redefined eth66 messages // TestMessages tests the encoding of all messages.
func TestEth66Messages(t *testing.T) { func TestMessages(t *testing.T) {
// Some basic structs used during testing // Some basic structs used during testing
var ( var (
header *types.Header header *types.Header
@ -169,10 +163,6 @@ func TestEth66Messages(t *testing.T) {
common.HexToHash("deadc0de"), common.HexToHash("deadc0de"),
common.HexToHash("feedbeef"), common.HexToHash("feedbeef"),
} }
byteSlices := [][]byte{
common.FromHex("deadc0de"),
common.FromHex("feedbeef"),
}
// init the receipts // init the receipts
{ {
receipts = []*types.Receipt{ receipts = []*types.Receipt{
@ -203,59 +193,51 @@ func TestEth66Messages(t *testing.T) {
want []byte want []byte
}{ }{
{ {
GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{hashes[0], 0}, 5, 5, false}}, GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{hashes[0], 0}, 5, 5, false}},
common.FromHex("e8820457e4a000000000000000000000000000000000000000000000000000000000deadc0de050580"), common.FromHex("e8820457e4a000000000000000000000000000000000000000000000000000000000deadc0de050580"),
}, },
{ {
GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}}, GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}},
common.FromHex("ca820457c682270f050580"), common.FromHex("ca820457c682270f050580"),
}, },
{ {
BlockHeadersPacket66{1111, BlockHeadersPacket{header}}, BlockHeadersPacket{1111, BlockHeadersRequest{header}},
common.FromHex("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), common.FromHex("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"),
}, },
{ {
GetBlockBodiesPacket66{1111, GetBlockBodiesPacket(hashes)}, GetBlockBodiesPacket{1111, GetBlockBodiesRequest(hashes)},
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
}, },
{ {
BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{blockBody})}, BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{blockBody})},
common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"),
}, },
{ // Identical to non-rlp-shortcut version { // Identical to non-rlp-shortcut version
BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{blockBodyRlp})}, BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{blockBodyRlp})},
common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"),
}, },
{ {
GetNodeDataPacket66{1111, GetNodeDataPacket(hashes)}, GetReceiptsPacket{1111, GetReceiptsRequest(hashes)},
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
}, },
{ {
NodeDataPacket66{1111, NodeDataPacket(byteSlices)}, ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{receipts})},
common.FromHex("ce820457ca84deadc0de84feedbeef"),
},
{
GetReceiptsPacket66{1111, GetReceiptsPacket(hashes)},
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
},
{
ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{receipts})},
common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"), common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"),
}, },
{ {
ReceiptsRLPPacket66{1111, ReceiptsRLPPacket([]rlp.RawValue{receiptsRlp})}, ReceiptsRLPPacket{1111, ReceiptsRLPResponse([]rlp.RawValue{receiptsRlp})},
common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"), common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"),
}, },
{ {
GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket(hashes)}, GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest(hashes)},
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
}, },
{ {
PooledTransactionsPacket66{1111, PooledTransactionsPacket(txs)}, PooledTransactionsPacket{1111, PooledTransactionsResponse(txs)},
common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"), common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"),
}, },
{ {
PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket(txRlps)}, PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse(txRlps)},
common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"), common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"),
}, },
} { } {

View File

@ -24,13 +24,13 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
) )
const ( const (
@ -321,7 +321,7 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac
it.Release() it.Release()
// Generate the Merkle proofs for the first and last account // Generate the Merkle proofs for the first and last account
proof := light.NewNodeSet() proof := trienode.NewProofSet()
if err := tr.Prove(req.Origin[:], proof); err != nil { if err := tr.Prove(req.Origin[:], proof); err != nil {
log.Warn("Failed to prove account range", "origin", req.Origin, "err", err) log.Warn("Failed to prove account range", "origin", req.Origin, "err", err)
return nil, nil return nil, nil
@ -333,7 +333,7 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac
} }
} }
var proofs [][]byte var proofs [][]byte
for _, blob := range proof.NodeList() { for _, blob := range proof.List() {
proofs = append(proofs, blob) proofs = append(proofs, blob)
} }
return accounts, proofs return accounts, proofs
@ -427,7 +427,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP
if err != nil { if err != nil {
return nil, nil return nil, nil
} }
proof := light.NewNodeSet() proof := trienode.NewProofSet()
if err := stTrie.Prove(origin[:], proof); err != nil { if err := stTrie.Prove(origin[:], proof); err != nil {
log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err) log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err)
return nil, nil return nil, nil
@ -438,7 +438,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP
return nil, nil return nil, nil
} }
} }
for _, blob := range proof.NodeList() { for _, blob := range proof.List() {
proofs = append(proofs, blob) proofs = append(proofs, blob)
} }
// Proof terminates the reply as proofs are only added if a node // Proof terminates the reply as proofs are only added if a node

View File

@ -37,11 +37,11 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/msgrate" "github.com/ethereum/go-ethereum/p2p/msgrate"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3" "golang.org/x/crypto/sha3"
) )
@ -738,8 +738,8 @@ func (s *Syncer) loadSyncStatus() {
s.accountBytes += common.StorageSize(len(key) + len(value)) s.accountBytes += common.StorageSize(len(key) + len(value))
}, },
} }
task.genTrie = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { task.genTrie = trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(task.genBatch, owner, path, hash, val, s.scheme) rawdb.WriteTrieNode(task.genBatch, common.Hash{}, path, hash, val, s.scheme)
}) })
for accountHash, subtasks := range task.SubTasks { for accountHash, subtasks := range task.SubTasks {
for _, subtask := range subtasks { for _, subtask := range subtasks {
@ -751,9 +751,10 @@ func (s *Syncer) loadSyncStatus() {
s.storageBytes += common.StorageSize(len(key) + len(value)) s.storageBytes += common.StorageSize(len(key) + len(value))
}, },
} }
subtask.genTrie = trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { owner := accountHash // local assignment for stacktrie writer closure
subtask.genTrie = trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(subtask.genBatch, owner, path, hash, val, s.scheme) rawdb.WriteTrieNode(subtask.genBatch, owner, path, hash, val, s.scheme)
}, accountHash) })
} }
} }
} }
@ -810,8 +811,8 @@ func (s *Syncer) loadSyncStatus() {
Last: last, Last: last,
SubTasks: make(map[common.Hash][]*storageTask), SubTasks: make(map[common.Hash][]*storageTask),
genBatch: batch, genBatch: batch,
genTrie: trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { genTrie: trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme) rawdb.WriteTrieNode(batch, common.Hash{}, path, hash, val, s.scheme)
}), }),
}) })
log.Debug("Created account sync task", "from", next, "last", last) log.Debug("Created account sync task", "from", next, "last", last)
@ -2004,14 +2005,15 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
s.storageBytes += common.StorageSize(len(key) + len(value)) s.storageBytes += common.StorageSize(len(key) + len(value))
}, },
} }
owner := account // local assignment for stacktrie writer closure
tasks = append(tasks, &storageTask{ tasks = append(tasks, &storageTask{
Next: common.Hash{}, Next: common.Hash{},
Last: r.End(), Last: r.End(),
root: acc.Root, root: acc.Root,
genBatch: batch, genBatch: batch,
genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { genTrie: trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme) rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
}, account), }),
}) })
for r.Next() { for r.Next() {
batch := ethdb.HookedBatch{ batch := ethdb.HookedBatch{
@ -2025,9 +2027,9 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
Last: r.End(), Last: r.End(),
root: acc.Root, root: acc.Root,
genBatch: batch, genBatch: batch,
genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { genTrie: trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme) rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
}, account), }),
}) })
} }
for _, task := range tasks { for _, task := range tasks {
@ -2072,9 +2074,10 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
slots += len(res.hashes[i]) slots += len(res.hashes[i])
if i < len(res.hashes)-1 || res.subTask == nil { if i < len(res.hashes)-1 || res.subTask == nil {
tr := trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { // no need to make local reassignment of account: this closure does not outlive the loop
rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme) tr := trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
}, account) rawdb.WriteTrieNode(batch, account, path, hash, val, s.scheme)
})
for j := 0; j < len(res.hashes[i]); j++ { for j := 0; j < len(res.hashes[i]); j++ {
tr.Update(res.hashes[i][j][:], res.slots[i][j]) tr.Update(res.hashes[i][j][:], res.slots[i][j])
} }
@ -2394,11 +2397,11 @@ func (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, acco
for i, key := range hashes { for i, key := range hashes {
keys[i] = common.CopyBytes(key[:]) keys[i] = common.CopyBytes(key[:])
} }
nodes := make(light.NodeList, len(proof)) nodes := make(trienode.ProofList, len(proof))
for i, node := range proof { for i, node := range proof {
nodes[i] = node nodes[i] = node
} }
proofdb := nodes.NodeSet() proofdb := nodes.Set()
var end []byte var end []byte
if len(keys) > 0 { if len(keys) > 0 {
@ -2639,7 +2642,7 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo
for j, key := range hashes[i] { for j, key := range hashes[i] {
keys[j] = common.CopyBytes(key[:]) keys[j] = common.CopyBytes(key[:])
} }
nodes := make(light.NodeList, 0, len(proof)) nodes := make(trienode.ProofList, 0, len(proof))
if i == len(hashes)-1 { if i == len(hashes)-1 {
for _, node := range proof { for _, node := range proof {
nodes = append(nodes, node) nodes = append(nodes, node)
@ -2658,7 +2661,7 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo
} else { } else {
// A proof was attached, the response is only partial, check that the // A proof was attached, the response is only partial, check that the
// returned data is indeed part of the storage trie // returned data is indeed part of the storage trie
proofdb := nodes.NodeSet() proofdb := nodes.Set()
var end []byte var end []byte
if len(keys) > 0 { if len(keys) > 0 {

View File

@ -31,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
@ -273,7 +272,7 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H
// Unless we send the entire trie, we need to supply proofs // Unless we send the entire trie, we need to supply proofs
// Actually, we need to supply proofs either way! This seems to be an implementation // Actually, we need to supply proofs either way! This seems to be an implementation
// quirk in go-ethereum // quirk in go-ethereum
proof := light.NewNodeSet() proof := trienode.NewProofSet()
if err := t.accountTrie.Prove(origin[:], proof); err != nil { if err := t.accountTrie.Prove(origin[:], proof); err != nil {
t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err) t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
} }
@ -283,7 +282,7 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H
t.logger.Error("Could not prove last item", "error", err) t.logger.Error("Could not prove last item", "error", err)
} }
} }
for _, blob := range proof.NodeList() { for _, blob := range proof.List() {
proofs = append(proofs, blob) proofs = append(proofs, blob)
} }
return keys, vals, proofs return keys, vals, proofs
@ -353,7 +352,7 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm
if originHash != (common.Hash{}) || (abort && len(keys) > 0) { if originHash != (common.Hash{}) || (abort && len(keys) > 0) {
// If we're aborting, we need to prove the first and last item // If we're aborting, we need to prove the first and last item
// This terminates the response (and thus the loop) // This terminates the response (and thus the loop)
proof := light.NewNodeSet() proof := trienode.NewProofSet()
stTrie := t.storageTries[account] stTrie := t.storageTries[account]
// Here's a potential gotcha: when constructing the proof, we cannot // Here's a potential gotcha: when constructing the proof, we cannot
@ -368,7 +367,7 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm
t.logger.Error("Could not prove last item", "error", err) t.logger.Error("Could not prove last item", "error", err)
} }
} }
for _, blob := range proof.NodeList() { for _, blob := range proof.List() {
proofs = append(proofs, blob) proofs = append(proofs, blob)
} }
break break
@ -411,7 +410,7 @@ func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, acco
if exit { if exit {
// If we're aborting, we need to prove the first and last item // If we're aborting, we need to prove the first and last item
// This terminates the response (and thus the loop) // This terminates the response (and thus the loop)
proof := light.NewNodeSet() proof := trienode.NewProofSet()
stTrie := t.storageTries[account] stTrie := t.storageTries[account]
// Here's a potential gotcha: when constructing the proof, we cannot // Here's a potential gotcha: when constructing the proof, we cannot
@ -427,7 +426,7 @@ func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, acco
t.logger.Error("Could not prove last item", "error", err) t.logger.Error("Could not prove last item", "error", err)
} }
} }
for _, blob := range proof.NodeList() { for _, blob := range proof.List() {
proofs = append(proofs, blob) proofs = append(proofs, blob)
} }
break break
@ -599,9 +598,10 @@ func testSyncBloatedProof(t *testing.T, scheme string) {
vals = append(vals, entry.v) vals = append(vals, entry.v)
} }
// The proofs // The proofs
proof := light.NewNodeSet() proof := trienode.NewProofSet()
if err := t.accountTrie.Prove(origin[:], proof); err != nil { if err := t.accountTrie.Prove(origin[:], proof); err != nil {
t.logger.Error("Could not prove origin", "origin", origin, "error", err) t.logger.Error("Could not prove origin", "origin", origin, "error", err)
t.logger.Error("Could not prove origin", "origin", origin, "error", err)
} }
// The bloat: add proof of every single element // The bloat: add proof of every single element
for _, entry := range t.accountValues { for _, entry := range t.accountValues {
@ -614,7 +614,7 @@ func testSyncBloatedProof(t *testing.T, scheme string) {
keys = append(keys[:1], keys[2:]...) keys = append(keys[:1], keys[2:]...)
vals = append(vals[:1], vals[2:]...) vals = append(vals[:1], vals[2:]...)
} }
for _, blob := range proof.NodeList() { for _, blob := range proof.List() {
proofs = append(proofs, blob) proofs = append(proofs, blob)
} }
if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil { if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {

View File

@ -28,8 +28,8 @@ import (
) )
// Tests that snap sync is disabled after a successful sync cycle. // Tests that snap sync is disabled after a successful sync cycle.
func TestSnapSyncDisabling66(t *testing.T) { testSnapSyncDisabling(t, eth.ETH66, snap.SNAP1) }
func TestSnapSyncDisabling67(t *testing.T) { testSnapSyncDisabling(t, eth.ETH67, snap.SNAP1) } func TestSnapSyncDisabling67(t *testing.T) { testSnapSyncDisabling(t, eth.ETH67, snap.SNAP1) }
func TestSnapSyncDisabling68(t *testing.T) { testSnapSyncDisabling(t, eth.ETH68, snap.SNAP1) }
// Tests that snap sync gets disabled as soon as a real block is successfully // Tests that snap sync gets disabled as soon as a real block is successfully
// imported into the blockchain. // imported into the blockchain.

View File

@ -22,6 +22,7 @@ package leveldb
import ( import (
"fmt" "fmt"
"strings"
"sync" "sync"
"time" "time"
@ -245,6 +246,11 @@ func (db *Database) NewSnapshot() (ethdb.Snapshot, error) {
// Stat returns a particular internal stat of the database. // Stat returns a particular internal stat of the database.
func (db *Database) Stat(property string) (string, error) { func (db *Database) Stat(property string) (string, error) {
if property == "" {
property = "leveldb.stats"
} else if !strings.HasPrefix(property, "leveldb.") {
property = "leveldb." + property
}
return db.db.GetProperty(property) return db.db.GetProperty(property)
} }

Some files were not shown because too many files have changed in this diff Show More