forked from cerc-io/plugeth
cmd/devp2p, eth: drop eth/66 (#28239)
* cmd/devp2p, eth: drop eth/66 * eth/protocols/eth: yes sir, linter
This commit is contained in:
parent
7963c4e808
commit
bc6d184872
@ -145,7 +145,7 @@ func TestChain_GetHeaders(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
req: GetBlockHeaders{
|
req: GetBlockHeaders{
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||||
Origin: eth.HashOrNumber{Number: uint64(2)},
|
Origin: eth.HashOrNumber{Number: uint64(2)},
|
||||||
Amount: uint64(5),
|
Amount: uint64(5),
|
||||||
Skip: 1,
|
Skip: 1,
|
||||||
@ -162,7 +162,7 @@ func TestChain_GetHeaders(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
req: GetBlockHeaders{
|
req: GetBlockHeaders{
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||||
Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)},
|
Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)},
|
||||||
Amount: uint64(3),
|
Amount: uint64(3),
|
||||||
Skip: 0,
|
Skip: 0,
|
||||||
@ -177,7 +177,7 @@ func TestChain_GetHeaders(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
req: GetBlockHeaders{
|
req: GetBlockHeaders{
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||||
Origin: eth.HashOrNumber{Hash: chain.Head().Hash()},
|
Origin: eth.HashOrNumber{Hash: chain.Head().Hash()},
|
||||||
Amount: uint64(1),
|
Amount: uint64(1),
|
||||||
Skip: 0,
|
Skip: 0,
|
||||||
|
@ -62,7 +62,6 @@ func (s *Suite) dial() (*Conn, error) {
|
|||||||
}
|
}
|
||||||
// set default p2p capabilities
|
// set default p2p capabilities
|
||||||
conn.caps = []p2p.Cap{
|
conn.caps = []p2p.Cap{
|
||||||
{Name: "eth", Version: 66},
|
|
||||||
{Name: "eth", Version: 67},
|
{Name: "eth", Version: 67},
|
||||||
{Name: "eth", Version: 68},
|
{Name: "eth", Version: 68},
|
||||||
}
|
}
|
||||||
@ -238,7 +237,7 @@ func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message {
|
|||||||
}
|
}
|
||||||
resp := &BlockHeaders{
|
resp := &BlockHeaders{
|
||||||
RequestId: msg.ReqID(),
|
RequestId: msg.ReqID(),
|
||||||
BlockHeadersPacket: eth.BlockHeadersPacket(headers),
|
BlockHeadersRequest: eth.BlockHeadersRequest(headers),
|
||||||
}
|
}
|
||||||
if err := c.Write(resp); err != nil {
|
if err := c.Write(resp); err != nil {
|
||||||
return errorf("could not write to connection: %v", err)
|
return errorf("could not write to connection: %v", err)
|
||||||
@ -267,7 +266,7 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, reqID uint
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg))
|
return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg))
|
||||||
}
|
}
|
||||||
headers := []*types.Header(resp.BlockHeadersPacket)
|
headers := []*types.Header(resp.BlockHeadersRequest)
|
||||||
return headers, nil
|
return headers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -379,7 +378,7 @@ func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block) error {
|
|||||||
conn.SetReadDeadline(time.Now().Add(20 * time.Second))
|
conn.SetReadDeadline(time.Now().Add(20 * time.Second))
|
||||||
// create request
|
// create request
|
||||||
req := &GetBlockHeaders{
|
req := &GetBlockHeaders{
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||||
Origin: eth.HashOrNumber{Hash: block.Hash()},
|
Origin: eth.HashOrNumber{Hash: block.Hash()},
|
||||||
Amount: 1,
|
Amount: 1,
|
||||||
},
|
},
|
||||||
@ -605,7 +604,7 @@ func (s *Suite) hashAnnounce() error {
|
|||||||
}
|
}
|
||||||
err = sendConn.Write(&BlockHeaders{
|
err = sendConn.Write(&BlockHeaders{
|
||||||
RequestId: blockHeaderReq.ReqID(),
|
RequestId: blockHeaderReq.ReqID(),
|
||||||
BlockHeadersPacket: eth.BlockHeadersPacket{nextBlock.Header()},
|
BlockHeadersRequest: eth.BlockHeadersRequest{nextBlock.Header()},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to write to connection: %v", err)
|
return fmt.Errorf("failed to write to connection: %v", err)
|
||||||
|
@ -112,7 +112,7 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
// write request
|
// write request
|
||||||
req := &GetBlockHeaders{
|
req := &GetBlockHeaders{
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||||
Origin: eth.HashOrNumber{Hash: s.chain.blocks[1].Hash()},
|
Origin: eth.HashOrNumber{Hash: s.chain.blocks[1].Hash()},
|
||||||
Amount: 2,
|
Amount: 2,
|
||||||
Skip: 1,
|
Skip: 1,
|
||||||
@ -150,7 +150,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
|
|||||||
// create two requests
|
// create two requests
|
||||||
req1 := &GetBlockHeaders{
|
req1 := &GetBlockHeaders{
|
||||||
RequestId: uint64(111),
|
RequestId: uint64(111),
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||||
Origin: eth.HashOrNumber{
|
Origin: eth.HashOrNumber{
|
||||||
Hash: s.chain.blocks[1].Hash(),
|
Hash: s.chain.blocks[1].Hash(),
|
||||||
},
|
},
|
||||||
@ -161,7 +161,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
req2 := &GetBlockHeaders{
|
req2 := &GetBlockHeaders{
|
||||||
RequestId: uint64(222),
|
RequestId: uint64(222),
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||||
Origin: eth.HashOrNumber{
|
Origin: eth.HashOrNumber{
|
||||||
Hash: s.chain.blocks[1].Hash(),
|
Hash: s.chain.blocks[1].Hash(),
|
||||||
},
|
},
|
||||||
@ -201,10 +201,10 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to get expected headers for request 2: %v", err)
|
t.Fatalf("failed to get expected headers for request 2: %v", err)
|
||||||
}
|
}
|
||||||
if !headersMatch(expected1, headers1.BlockHeadersPacket) {
|
if !headersMatch(expected1, headers1.BlockHeadersRequest) {
|
||||||
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
|
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
|
||||||
}
|
}
|
||||||
if !headersMatch(expected2, headers2.BlockHeadersPacket) {
|
if !headersMatch(expected2, headers2.BlockHeadersRequest) {
|
||||||
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
|
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -224,7 +224,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
|
|||||||
reqID := uint64(1234)
|
reqID := uint64(1234)
|
||||||
request1 := &GetBlockHeaders{
|
request1 := &GetBlockHeaders{
|
||||||
RequestId: reqID,
|
RequestId: reqID,
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||||
Origin: eth.HashOrNumber{
|
Origin: eth.HashOrNumber{
|
||||||
Number: 1,
|
Number: 1,
|
||||||
},
|
},
|
||||||
@ -233,7 +233,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
request2 := &GetBlockHeaders{
|
request2 := &GetBlockHeaders{
|
||||||
RequestId: reqID,
|
RequestId: reqID,
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||||
Origin: eth.HashOrNumber{
|
Origin: eth.HashOrNumber{
|
||||||
Number: 33,
|
Number: 33,
|
||||||
},
|
},
|
||||||
@ -270,10 +270,10 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to get expected block headers: %v", err)
|
t.Fatalf("failed to get expected block headers: %v", err)
|
||||||
}
|
}
|
||||||
if !headersMatch(expected1, headers1.BlockHeadersPacket) {
|
if !headersMatch(expected1, headers1.BlockHeadersRequest) {
|
||||||
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
|
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
|
||||||
}
|
}
|
||||||
if !headersMatch(expected2, headers2.BlockHeadersPacket) {
|
if !headersMatch(expected2, headers2.BlockHeadersRequest) {
|
||||||
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
|
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -290,7 +290,7 @@ func (s *Suite) TestZeroRequestID(t *utesting.T) {
|
|||||||
t.Fatalf("peering failed: %v", err)
|
t.Fatalf("peering failed: %v", err)
|
||||||
}
|
}
|
||||||
req := &GetBlockHeaders{
|
req := &GetBlockHeaders{
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||||
Origin: eth.HashOrNumber{Number: 0},
|
Origin: eth.HashOrNumber{Number: 0},
|
||||||
Amount: 2,
|
Amount: 2,
|
||||||
},
|
},
|
||||||
@ -322,7 +322,7 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) {
|
|||||||
// create block bodies request
|
// create block bodies request
|
||||||
req := &GetBlockBodies{
|
req := &GetBlockBodies{
|
||||||
RequestId: uint64(55),
|
RequestId: uint64(55),
|
||||||
GetBlockBodiesPacket: eth.GetBlockBodiesPacket{
|
GetBlockBodiesRequest: eth.GetBlockBodiesRequest{
|
||||||
s.chain.blocks[54].Hash(),
|
s.chain.blocks[54].Hash(),
|
||||||
s.chain.blocks[75].Hash(),
|
s.chain.blocks[75].Hash(),
|
||||||
},
|
},
|
||||||
@ -336,11 +336,11 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("unexpected: %s", pretty.Sdump(msg))
|
t.Fatalf("unexpected: %s", pretty.Sdump(msg))
|
||||||
}
|
}
|
||||||
bodies := resp.BlockBodiesPacket
|
bodies := resp.BlockBodiesResponse
|
||||||
t.Logf("received %d block bodies", len(bodies))
|
t.Logf("received %d block bodies", len(bodies))
|
||||||
if len(bodies) != len(req.GetBlockBodiesPacket) {
|
if len(bodies) != len(req.GetBlockBodiesRequest) {
|
||||||
t.Fatalf("wrong bodies in response: expected %d bodies, "+
|
t.Fatalf("wrong bodies in response: expected %d bodies, "+
|
||||||
"got %d", len(req.GetBlockBodiesPacket), len(bodies))
|
"got %d", len(req.GetBlockBodiesRequest), len(bodies))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -482,7 +482,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
getTxReq := &GetPooledTransactions{
|
getTxReq := &GetPooledTransactions{
|
||||||
RequestId: 1234,
|
RequestId: 1234,
|
||||||
GetPooledTransactionsPacket: hashes,
|
GetPooledTransactionsRequest: hashes,
|
||||||
}
|
}
|
||||||
if err = conn.Write(getTxReq); err != nil {
|
if err = conn.Write(getTxReq); err != nil {
|
||||||
t.Fatalf("could not write to conn: %v", err)
|
t.Fatalf("could not write to conn: %v", err)
|
||||||
@ -490,7 +490,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
|
|||||||
// check that all received transactions match those that were sent to node
|
// check that all received transactions match those that were sent to node
|
||||||
switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) {
|
switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) {
|
||||||
case *PooledTransactions:
|
case *PooledTransactions:
|
||||||
for _, gotTx := range msg.PooledTransactionsPacket {
|
for _, gotTx := range msg.PooledTransactionsResponse {
|
||||||
if _, exists := hashMap[gotTx.Hash()]; !exists {
|
if _, exists := hashMap[gotTx.Hash()]; !exists {
|
||||||
t.Fatalf("unexpected tx received: %v", gotTx.Hash())
|
t.Fatalf("unexpected tx received: %v", gotTx.Hash())
|
||||||
}
|
}
|
||||||
@ -547,8 +547,8 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
|
|||||||
msg := conn.readAndServe(s.chain, timeout)
|
msg := conn.readAndServe(s.chain, timeout)
|
||||||
switch msg := msg.(type) {
|
switch msg := msg.(type) {
|
||||||
case *GetPooledTransactions:
|
case *GetPooledTransactions:
|
||||||
if len(msg.GetPooledTransactionsPacket) != len(hashes) {
|
if len(msg.GetPooledTransactionsRequest) != len(hashes) {
|
||||||
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket))
|
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -99,24 +99,24 @@ func (msg Transactions) Code() int { return 18 }
|
|||||||
func (msg Transactions) ReqID() uint64 { return 18 }
|
func (msg Transactions) ReqID() uint64 { return 18 }
|
||||||
|
|
||||||
// GetBlockHeaders represents a block header query.
|
// GetBlockHeaders represents a block header query.
|
||||||
type GetBlockHeaders eth.GetBlockHeadersPacket66
|
type GetBlockHeaders eth.GetBlockHeadersPacket
|
||||||
|
|
||||||
func (msg GetBlockHeaders) Code() int { return 19 }
|
func (msg GetBlockHeaders) Code() int { return 19 }
|
||||||
func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId }
|
func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId }
|
||||||
|
|
||||||
type BlockHeaders eth.BlockHeadersPacket66
|
type BlockHeaders eth.BlockHeadersPacket
|
||||||
|
|
||||||
func (msg BlockHeaders) Code() int { return 20 }
|
func (msg BlockHeaders) Code() int { return 20 }
|
||||||
func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId }
|
func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId }
|
||||||
|
|
||||||
// GetBlockBodies represents a GetBlockBodies request
|
// GetBlockBodies represents a GetBlockBodies request
|
||||||
type GetBlockBodies eth.GetBlockBodiesPacket66
|
type GetBlockBodies eth.GetBlockBodiesPacket
|
||||||
|
|
||||||
func (msg GetBlockBodies) Code() int { return 21 }
|
func (msg GetBlockBodies) Code() int { return 21 }
|
||||||
func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId }
|
func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId }
|
||||||
|
|
||||||
// BlockBodies is the network packet for block content distribution.
|
// BlockBodies is the network packet for block content distribution.
|
||||||
type BlockBodies eth.BlockBodiesPacket66
|
type BlockBodies eth.BlockBodiesPacket
|
||||||
|
|
||||||
func (msg BlockBodies) Code() int { return 22 }
|
func (msg BlockBodies) Code() int { return 22 }
|
||||||
func (msg BlockBodies) ReqID() uint64 { return msg.RequestId }
|
func (msg BlockBodies) ReqID() uint64 { return msg.RequestId }
|
||||||
@ -128,7 +128,7 @@ func (msg NewBlock) Code() int { return 23 }
|
|||||||
func (msg NewBlock) ReqID() uint64 { return 0 }
|
func (msg NewBlock) ReqID() uint64 { return 0 }
|
||||||
|
|
||||||
// NewPooledTransactionHashes66 is the network packet for the tx hash propagation message.
|
// NewPooledTransactionHashes66 is the network packet for the tx hash propagation message.
|
||||||
type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket66
|
type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket67
|
||||||
|
|
||||||
func (msg NewPooledTransactionHashes66) Code() int { return 24 }
|
func (msg NewPooledTransactionHashes66) Code() int { return 24 }
|
||||||
func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 }
|
func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 }
|
||||||
@ -139,12 +139,12 @@ type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket68
|
|||||||
func (msg NewPooledTransactionHashes) Code() int { return 24 }
|
func (msg NewPooledTransactionHashes) Code() int { return 24 }
|
||||||
func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 }
|
func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 }
|
||||||
|
|
||||||
type GetPooledTransactions eth.GetPooledTransactionsPacket66
|
type GetPooledTransactions eth.GetPooledTransactionsPacket
|
||||||
|
|
||||||
func (msg GetPooledTransactions) Code() int { return 25 }
|
func (msg GetPooledTransactions) Code() int { return 25 }
|
||||||
func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId }
|
func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId }
|
||||||
|
|
||||||
type PooledTransactions eth.PooledTransactionsPacket66
|
type PooledTransactions eth.PooledTransactionsPacket
|
||||||
|
|
||||||
func (msg PooledTransactions) Code() int { return 26 }
|
func (msg PooledTransactions) Code() int { return 26 }
|
||||||
func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId }
|
func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId }
|
||||||
@ -180,25 +180,25 @@ func (c *Conn) Read() Message {
|
|||||||
case (Status{}).Code():
|
case (Status{}).Code():
|
||||||
msg = new(Status)
|
msg = new(Status)
|
||||||
case (GetBlockHeaders{}).Code():
|
case (GetBlockHeaders{}).Code():
|
||||||
ethMsg := new(eth.GetBlockHeadersPacket66)
|
ethMsg := new(eth.GetBlockHeadersPacket)
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||||
return errorf("could not rlp decode message: %v", err)
|
return errorf("could not rlp decode message: %v", err)
|
||||||
}
|
}
|
||||||
return (*GetBlockHeaders)(ethMsg)
|
return (*GetBlockHeaders)(ethMsg)
|
||||||
case (BlockHeaders{}).Code():
|
case (BlockHeaders{}).Code():
|
||||||
ethMsg := new(eth.BlockHeadersPacket66)
|
ethMsg := new(eth.BlockHeadersPacket)
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||||
return errorf("could not rlp decode message: %v", err)
|
return errorf("could not rlp decode message: %v", err)
|
||||||
}
|
}
|
||||||
return (*BlockHeaders)(ethMsg)
|
return (*BlockHeaders)(ethMsg)
|
||||||
case (GetBlockBodies{}).Code():
|
case (GetBlockBodies{}).Code():
|
||||||
ethMsg := new(eth.GetBlockBodiesPacket66)
|
ethMsg := new(eth.GetBlockBodiesPacket)
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||||
return errorf("could not rlp decode message: %v", err)
|
return errorf("could not rlp decode message: %v", err)
|
||||||
}
|
}
|
||||||
return (*GetBlockBodies)(ethMsg)
|
return (*GetBlockBodies)(ethMsg)
|
||||||
case (BlockBodies{}).Code():
|
case (BlockBodies{}).Code():
|
||||||
ethMsg := new(eth.BlockBodiesPacket66)
|
ethMsg := new(eth.BlockBodiesPacket)
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||||
return errorf("could not rlp decode message: %v", err)
|
return errorf("could not rlp decode message: %v", err)
|
||||||
}
|
}
|
||||||
@ -217,13 +217,13 @@ func (c *Conn) Read() Message {
|
|||||||
}
|
}
|
||||||
msg = new(NewPooledTransactionHashes66)
|
msg = new(NewPooledTransactionHashes66)
|
||||||
case (GetPooledTransactions{}.Code()):
|
case (GetPooledTransactions{}.Code()):
|
||||||
ethMsg := new(eth.GetPooledTransactionsPacket66)
|
ethMsg := new(eth.GetPooledTransactionsPacket)
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||||
return errorf("could not rlp decode message: %v", err)
|
return errorf("could not rlp decode message: %v", err)
|
||||||
}
|
}
|
||||||
return (*GetPooledTransactions)(ethMsg)
|
return (*GetPooledTransactions)(ethMsg)
|
||||||
case (PooledTransactions{}.Code()):
|
case (PooledTransactions{}.Code()):
|
||||||
ethMsg := new(eth.PooledTransactionsPacket66)
|
ethMsg := new(eth.PooledTransactionsPacket)
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||||
return errorf("could not rlp decode message: %v", err)
|
return errorf("could not rlp decode message: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -177,7 +177,7 @@ func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header {
|
|||||||
// function can be used to retrieve batches of headers from the particular peer.
|
// function can be used to retrieve batches of headers from the particular peer.
|
||||||
func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
|
func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
|
||||||
// Service the header query via the live handler code
|
// Service the header query via the live handler code
|
||||||
rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{
|
rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{
|
||||||
Origin: eth.HashOrNumber{
|
Origin: eth.HashOrNumber{
|
||||||
Hash: origin,
|
Hash: origin,
|
||||||
},
|
},
|
||||||
@ -205,7 +205,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i
|
|||||||
}
|
}
|
||||||
res := ð.Response{
|
res := ð.Response{
|
||||||
Req: req,
|
Req: req,
|
||||||
Res: (*eth.BlockHeadersPacket)(&headers),
|
Res: (*eth.BlockHeadersRequest)(&headers),
|
||||||
Meta: hashes,
|
Meta: hashes,
|
||||||
Time: 1,
|
Time: 1,
|
||||||
Done: make(chan error, 1), // Ignore the returned status
|
Done: make(chan error, 1), // Ignore the returned status
|
||||||
@ -221,7 +221,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i
|
|||||||
// function can be used to retrieve batches of headers from the particular peer.
|
// function can be used to retrieve batches of headers from the particular peer.
|
||||||
func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
|
func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
|
||||||
// Service the header query via the live handler code
|
// Service the header query via the live handler code
|
||||||
rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{
|
rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{
|
||||||
Origin: eth.HashOrNumber{
|
Origin: eth.HashOrNumber{
|
||||||
Number: origin,
|
Number: origin,
|
||||||
},
|
},
|
||||||
@ -249,7 +249,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int,
|
|||||||
}
|
}
|
||||||
res := ð.Response{
|
res := ð.Response{
|
||||||
Req: req,
|
Req: req,
|
||||||
Res: (*eth.BlockHeadersPacket)(&headers),
|
Res: (*eth.BlockHeadersRequest)(&headers),
|
||||||
Meta: hashes,
|
Meta: hashes,
|
||||||
Time: 1,
|
Time: 1,
|
||||||
Done: make(chan error, 1), // Ignore the returned status
|
Done: make(chan error, 1), // Ignore the returned status
|
||||||
@ -286,7 +286,7 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et
|
|||||||
}
|
}
|
||||||
res := ð.Response{
|
res := ð.Response{
|
||||||
Req: req,
|
Req: req,
|
||||||
Res: (*eth.BlockBodiesPacket)(&bodies),
|
Res: (*eth.BlockBodiesResponse)(&bodies),
|
||||||
Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes},
|
Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes},
|
||||||
Time: 1,
|
Time: 1,
|
||||||
Done: make(chan error, 1), // Ignore the returned status
|
Done: make(chan error, 1), // Ignore the returned status
|
||||||
@ -317,7 +317,7 @@ func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *
|
|||||||
}
|
}
|
||||||
res := ð.Response{
|
res := ð.Response{
|
||||||
Req: req,
|
Req: req,
|
||||||
Res: (*eth.ReceiptsPacket)(&receipts),
|
Res: (*eth.ReceiptsResponse)(&receipts),
|
||||||
Meta: hashes,
|
Meta: hashes,
|
||||||
Time: 1,
|
Time: 1,
|
||||||
Done: make(chan error, 1), // Ignore the returned status
|
Done: make(chan error, 1), // Ignore the returned status
|
||||||
@ -437,9 +437,9 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) }
|
func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) }
|
||||||
func TestCanonicalSynchronisation66Snap(t *testing.T) { testCanonSync(t, eth.ETH66, SnapSync) }
|
func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) }
|
||||||
func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) }
|
func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) }
|
||||||
func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) }
|
func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) }
|
||||||
func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) }
|
func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) }
|
||||||
func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) }
|
func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) }
|
||||||
@ -461,8 +461,8 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that if a large batch of blocks are being downloaded, it is throttled
|
// Tests that if a large batch of blocks are being downloaded, it is throttled
|
||||||
// until the cached blocks are retrieved.
|
// until the cached blocks are retrieved.
|
||||||
func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) }
|
func TestThrottling68Full(t *testing.T) { testThrottling(t, eth.ETH68, FullSync) }
|
||||||
func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) }
|
func TestThrottling68Snap(t *testing.T) { testThrottling(t, eth.ETH68, SnapSync) }
|
||||||
func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) }
|
func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) }
|
||||||
func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) }
|
func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) }
|
||||||
|
|
||||||
@ -543,9 +543,9 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
// Tests that simple synchronization against a forked chain works correctly. In
|
// Tests that simple synchronization against a forked chain works correctly. In
|
||||||
// this test common ancestor lookup should *not* be short circuited, and a full
|
// this test common ancestor lookup should *not* be short circuited, and a full
|
||||||
// binary search should be executed.
|
// binary search should be executed.
|
||||||
func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) }
|
func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) }
|
||||||
func TestForkedSync66Snap(t *testing.T) { testForkedSync(t, eth.ETH66, SnapSync) }
|
func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) }
|
||||||
func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) }
|
func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) }
|
||||||
func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) }
|
func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) }
|
||||||
func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) }
|
func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) }
|
||||||
func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) }
|
func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) }
|
||||||
@ -573,9 +573,9 @@ func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that synchronising against a much shorter but much heavier fork works
|
// Tests that synchronising against a much shorter but much heavier fork works
|
||||||
// currently and is not dropped.
|
// currently and is not dropped.
|
||||||
func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) }
|
func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) }
|
||||||
func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) }
|
func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) }
|
||||||
func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) }
|
func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) }
|
||||||
func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) }
|
func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) }
|
||||||
func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) }
|
func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) }
|
||||||
func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) }
|
func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) }
|
||||||
@ -605,9 +605,9 @@ func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
// Tests that chain forks are contained within a certain interval of the current
|
// Tests that chain forks are contained within a certain interval of the current
|
||||||
// chain head, ensuring that malicious peers cannot waste resources by feeding
|
// chain head, ensuring that malicious peers cannot waste resources by feeding
|
||||||
// long dead chains.
|
// long dead chains.
|
||||||
func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) }
|
func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) }
|
||||||
func TestBoundedForkedSync66Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, SnapSync) }
|
func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) }
|
||||||
func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) }
|
func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) }
|
||||||
func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) }
|
func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) }
|
||||||
func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) }
|
func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) }
|
||||||
func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) }
|
func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) }
|
||||||
@ -636,14 +636,14 @@ func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
// Tests that chain forks are contained within a certain interval of the current
|
// Tests that chain forks are contained within a certain interval of the current
|
||||||
// chain head for short but heavy forks too. These are a bit special because they
|
// chain head for short but heavy forks too. These are a bit special because they
|
||||||
// take different ancestor lookup paths.
|
// take different ancestor lookup paths.
|
||||||
func TestBoundedHeavyForkedSync66Full(t *testing.T) {
|
func TestBoundedHeavyForkedSync68Full(t *testing.T) {
|
||||||
testBoundedHeavyForkedSync(t, eth.ETH66, FullSync)
|
testBoundedHeavyForkedSync(t, eth.ETH68, FullSync)
|
||||||
}
|
}
|
||||||
func TestBoundedHeavyForkedSync66Snap(t *testing.T) {
|
func TestBoundedHeavyForkedSync68Snap(t *testing.T) {
|
||||||
testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync)
|
testBoundedHeavyForkedSync(t, eth.ETH68, SnapSync)
|
||||||
}
|
}
|
||||||
func TestBoundedHeavyForkedSync66Light(t *testing.T) {
|
func TestBoundedHeavyForkedSync68Light(t *testing.T) {
|
||||||
testBoundedHeavyForkedSync(t, eth.ETH66, LightSync)
|
testBoundedHeavyForkedSync(t, eth.ETH68, LightSync)
|
||||||
}
|
}
|
||||||
func TestBoundedHeavyForkedSync67Full(t *testing.T) {
|
func TestBoundedHeavyForkedSync67Full(t *testing.T) {
|
||||||
testBoundedHeavyForkedSync(t, eth.ETH67, FullSync)
|
testBoundedHeavyForkedSync(t, eth.ETH67, FullSync)
|
||||||
@ -678,9 +678,9 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that a canceled download wipes all previously accumulated state.
|
// Tests that a canceled download wipes all previously accumulated state.
|
||||||
func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) }
|
func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) }
|
||||||
func TestCancel66Snap(t *testing.T) { testCancel(t, eth.ETH66, SnapSync) }
|
func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) }
|
||||||
func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) }
|
func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) }
|
||||||
func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) }
|
func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) }
|
||||||
func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) }
|
func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) }
|
||||||
func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) }
|
func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) }
|
||||||
@ -708,9 +708,9 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
|
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
|
||||||
func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) }
|
func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) }
|
||||||
func TestMultiSynchronisation66Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, SnapSync) }
|
func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) }
|
||||||
func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) }
|
func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) }
|
||||||
func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) }
|
func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) }
|
||||||
func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) }
|
func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) }
|
||||||
func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) }
|
func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) }
|
||||||
@ -735,9 +735,9 @@ func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that synchronisations behave well in multi-version protocol environments
|
// Tests that synchronisations behave well in multi-version protocol environments
|
||||||
// and not wreak havoc on other nodes in the network.
|
// and not wreak havoc on other nodes in the network.
|
||||||
func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) }
|
func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) }
|
||||||
func TestMultiProtoSynchronisation66Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH66, SnapSync) }
|
func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) }
|
||||||
func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) }
|
func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) }
|
||||||
func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) }
|
func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) }
|
||||||
func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) }
|
func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) }
|
||||||
func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) }
|
func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) }
|
||||||
@ -750,7 +750,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
chain := testChainBase.shorten(blockCacheMaxItems - 15)
|
chain := testChainBase.shorten(blockCacheMaxItems - 15)
|
||||||
|
|
||||||
// Create peers of every type
|
// Create peers of every type
|
||||||
tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:])
|
tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:])
|
||||||
tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:])
|
tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:])
|
||||||
|
|
||||||
// Synchronise with the requested peer and make sure all blocks were retrieved
|
// Synchronise with the requested peer and make sure all blocks were retrieved
|
||||||
@ -760,7 +760,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
assertOwnChain(t, tester, len(chain.blocks))
|
assertOwnChain(t, tester, len(chain.blocks))
|
||||||
|
|
||||||
// Check that no peers have been dropped off
|
// Check that no peers have been dropped off
|
||||||
for _, version := range []int{66, 67} {
|
for _, version := range []int{68, 67} {
|
||||||
peer := fmt.Sprintf("peer %d", version)
|
peer := fmt.Sprintf("peer %d", version)
|
||||||
if _, ok := tester.peers[peer]; !ok {
|
if _, ok := tester.peers[peer]; !ok {
|
||||||
t.Errorf("%s dropped", peer)
|
t.Errorf("%s dropped", peer)
|
||||||
@ -770,9 +770,9 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that if a block is empty (e.g. header only), no body request should be
|
// Tests that if a block is empty (e.g. header only), no body request should be
|
||||||
// made, and instead the header should be assembled into a whole block in itself.
|
// made, and instead the header should be assembled into a whole block in itself.
|
||||||
func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) }
|
func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) }
|
||||||
func TestEmptyShortCircuit66Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, SnapSync) }
|
func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) }
|
||||||
func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) }
|
func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) }
|
||||||
func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) }
|
func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) }
|
||||||
func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) }
|
func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) }
|
||||||
func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) }
|
func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) }
|
||||||
@ -821,9 +821,9 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that headers are enqueued continuously, preventing malicious nodes from
|
// Tests that headers are enqueued continuously, preventing malicious nodes from
|
||||||
// stalling the downloader by feeding gapped header chains.
|
// stalling the downloader by feeding gapped header chains.
|
||||||
func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) }
|
func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) }
|
||||||
func TestMissingHeaderAttack66Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, SnapSync) }
|
func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) }
|
||||||
func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) }
|
func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) }
|
||||||
func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) }
|
func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) }
|
||||||
func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) }
|
func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) }
|
||||||
func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) }
|
func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) }
|
||||||
@ -850,9 +850,9 @@ func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that if requested headers are shifted (i.e. first is missing), the queue
|
// Tests that if requested headers are shifted (i.e. first is missing), the queue
|
||||||
// detects the invalid numbering.
|
// detects the invalid numbering.
|
||||||
func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) }
|
func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) }
|
||||||
func TestShiftedHeaderAttack66Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) }
|
func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) }
|
||||||
func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) }
|
func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) }
|
||||||
func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) }
|
func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) }
|
||||||
func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) }
|
func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) }
|
||||||
func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) }
|
func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) }
|
||||||
@ -880,14 +880,14 @@ func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that a peer advertising a high TD doesn't get to stall the downloader
|
// Tests that a peer advertising a high TD doesn't get to stall the downloader
|
||||||
// afterwards by not sending any useful hashes.
|
// afterwards by not sending any useful hashes.
|
||||||
func TestHighTDStarvationAttack66Full(t *testing.T) {
|
func TestHighTDStarvationAttack68Full(t *testing.T) {
|
||||||
testHighTDStarvationAttack(t, eth.ETH66, FullSync)
|
testHighTDStarvationAttack(t, eth.ETH68, FullSync)
|
||||||
}
|
}
|
||||||
func TestHighTDStarvationAttack66Snap(t *testing.T) {
|
func TestHighTDStarvationAttack68Snap(t *testing.T) {
|
||||||
testHighTDStarvationAttack(t, eth.ETH66, SnapSync)
|
testHighTDStarvationAttack(t, eth.ETH68, SnapSync)
|
||||||
}
|
}
|
||||||
func TestHighTDStarvationAttack66Light(t *testing.T) {
|
func TestHighTDStarvationAttack68Light(t *testing.T) {
|
||||||
testHighTDStarvationAttack(t, eth.ETH66, LightSync)
|
testHighTDStarvationAttack(t, eth.ETH68, LightSync)
|
||||||
}
|
}
|
||||||
func TestHighTDStarvationAttack67Full(t *testing.T) {
|
func TestHighTDStarvationAttack67Full(t *testing.T) {
|
||||||
testHighTDStarvationAttack(t, eth.ETH67, FullSync)
|
testHighTDStarvationAttack(t, eth.ETH67, FullSync)
|
||||||
@ -911,7 +911,7 @@ func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
|
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
|
||||||
func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) }
|
func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) }
|
||||||
func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) }
|
func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) }
|
||||||
|
|
||||||
func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
|
func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
|
||||||
@ -960,9 +960,9 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
|
|||||||
|
|
||||||
// Tests that synchronisation progress (origin block number, current block number
|
// Tests that synchronisation progress (origin block number, current block number
|
||||||
// and highest block number) is tracked and updated correctly.
|
// and highest block number) is tracked and updated correctly.
|
||||||
func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) }
|
func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) }
|
||||||
func TestSyncProgress66Snap(t *testing.T) { testSyncProgress(t, eth.ETH66, SnapSync) }
|
func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) }
|
||||||
func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) }
|
func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) }
|
||||||
func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) }
|
func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) }
|
||||||
func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) }
|
func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) }
|
||||||
func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) }
|
func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) }
|
||||||
@ -1040,9 +1040,9 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync
|
|||||||
// Tests that synchronisation progress (origin block number and highest block
|
// Tests that synchronisation progress (origin block number and highest block
|
||||||
// number) is tracked and updated correctly in case of a fork (or manual head
|
// number) is tracked and updated correctly in case of a fork (or manual head
|
||||||
// revertal).
|
// revertal).
|
||||||
func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) }
|
func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) }
|
||||||
func TestForkedSyncProgress66Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, SnapSync) }
|
func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) }
|
||||||
func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) }
|
func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) }
|
||||||
func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) }
|
func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) }
|
||||||
func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) }
|
func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) }
|
||||||
func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) }
|
func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) }
|
||||||
@ -1114,9 +1114,9 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
// Tests that if synchronisation is aborted due to some failure, then the progress
|
// Tests that if synchronisation is aborted due to some failure, then the progress
|
||||||
// origin is not updated in the next sync cycle, as it should be considered the
|
// origin is not updated in the next sync cycle, as it should be considered the
|
||||||
// continuation of the previous sync and not a new instance.
|
// continuation of the previous sync and not a new instance.
|
||||||
func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) }
|
func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) }
|
||||||
func TestFailedSyncProgress66Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, SnapSync) }
|
func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) }
|
||||||
func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) }
|
func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) }
|
||||||
func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) }
|
func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) }
|
||||||
func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) }
|
func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) }
|
||||||
func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) }
|
func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) }
|
||||||
@ -1183,9 +1183,9 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that if an attacker fakes a chain height, after the attack is detected,
|
// Tests that if an attacker fakes a chain height, after the attack is detected,
|
||||||
// the progress height is successfully reduced at the next sync invocation.
|
// the progress height is successfully reduced at the next sync invocation.
|
||||||
func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) }
|
func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) }
|
||||||
func TestFakedSyncProgress66Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, SnapSync) }
|
func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) }
|
||||||
func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) }
|
func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) }
|
||||||
func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) }
|
func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) }
|
||||||
func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) }
|
func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) }
|
||||||
func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) }
|
func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) }
|
||||||
@ -1330,8 +1330,10 @@ func TestRemoteHeaderRequestSpan(t *testing.T) {
|
|||||||
|
|
||||||
// Tests that peers below a pre-configured checkpoint block are prevented from
|
// Tests that peers below a pre-configured checkpoint block are prevented from
|
||||||
// being fast-synced from, avoiding potential cheap eclipse attacks.
|
// being fast-synced from, avoiding potential cheap eclipse attacks.
|
||||||
func TestBeaconSync66Full(t *testing.T) { testBeaconSync(t, eth.ETH66, FullSync) }
|
func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) }
|
||||||
func TestBeaconSync66Snap(t *testing.T) { testBeaconSync(t, eth.ETH66, SnapSync) }
|
func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) }
|
||||||
|
func TestBeaconSync67Full(t *testing.T) { testBeaconSync(t, eth.ETH67, FullSync) }
|
||||||
|
func TestBeaconSync67Snap(t *testing.T) { testBeaconSync(t, eth.ETH67, SnapSync) }
|
||||||
|
|
||||||
func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) {
|
func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
||||||
|
@ -58,14 +58,14 @@ func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amo
|
|||||||
case res := <-resCh:
|
case res := <-resCh:
|
||||||
// Headers successfully retrieved, update the metrics
|
// Headers successfully retrieved, update the metrics
|
||||||
headerReqTimer.Update(time.Since(start))
|
headerReqTimer.Update(time.Since(start))
|
||||||
headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket))))
|
headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest))))
|
||||||
|
|
||||||
// Don't reject the packet even if it turns out to be bad, downloader will
|
// Don't reject the packet even if it turns out to be bad, downloader will
|
||||||
// disconnect the peer on its own terms. Simply delivery the headers to
|
// disconnect the peer on its own terms. Simply delivery the headers to
|
||||||
// be processed by the caller
|
// be processed by the caller
|
||||||
res.Done <- nil
|
res.Done <- nil
|
||||||
|
|
||||||
return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil
|
return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -103,13 +103,13 @@ func (d *Downloader) fetchHeadersByNumber(p *peerConnection, number uint64, amou
|
|||||||
case res := <-resCh:
|
case res := <-resCh:
|
||||||
// Headers successfully retrieved, update the metrics
|
// Headers successfully retrieved, update the metrics
|
||||||
headerReqTimer.Update(time.Since(start))
|
headerReqTimer.Update(time.Since(start))
|
||||||
headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket))))
|
headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest))))
|
||||||
|
|
||||||
// Don't reject the packet even if it turns out to be bad, downloader will
|
// Don't reject the packet even if it turns out to be bad, downloader will
|
||||||
// disconnect the peer on its own terms. Simply delivery the headers to
|
// disconnect the peer on its own terms. Simply delivery the headers to
|
||||||
// be processed by the caller
|
// be processed by the caller
|
||||||
res.Done <- nil
|
res.Done <- nil
|
||||||
|
|
||||||
return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil
|
return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan
|
|||||||
// deliver is responsible for taking a generic response packet from the concurrent
|
// deliver is responsible for taking a generic response packet from the concurrent
|
||||||
// fetcher, unpacking the body data and delivering it to the downloader's queue.
|
// fetcher, unpacking the body data and delivering it to the downloader's queue.
|
||||||
func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
|
func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
|
||||||
txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesPacket).Unpack()
|
txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesResponse).Unpack()
|
||||||
hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes}
|
hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes}
|
||||||
|
|
||||||
accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2])
|
accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2])
|
||||||
|
@ -81,7 +81,7 @@ func (q *headerQueue) request(peer *peerConnection, req *fetchRequest, resCh cha
|
|||||||
// deliver is responsible for taking a generic response packet from the concurrent
|
// deliver is responsible for taking a generic response packet from the concurrent
|
||||||
// fetcher, unpacking the header data and delivering it to the downloader's queue.
|
// fetcher, unpacking the header data and delivering it to the downloader's queue.
|
||||||
func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
|
func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
|
||||||
headers := *packet.Res.(*eth.BlockHeadersPacket)
|
headers := *packet.Res.(*eth.BlockHeadersRequest)
|
||||||
hashes := packet.Meta.([]common.Hash)
|
hashes := packet.Meta.([]common.Hash)
|
||||||
|
|
||||||
accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh)
|
accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh)
|
||||||
|
@ -88,7 +88,7 @@ func (q *receiptQueue) request(peer *peerConnection, req *fetchRequest, resCh ch
|
|||||||
// deliver is responsible for taking a generic response packet from the concurrent
|
// deliver is responsible for taking a generic response packet from the concurrent
|
||||||
// fetcher, unpacking the receipt data and delivering it to the downloader's queue.
|
// fetcher, unpacking the receipt data and delivering it to the downloader's queue.
|
||||||
func (q *receiptQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
|
func (q *receiptQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
|
||||||
receipts := *packet.Res.(*eth.ReceiptsPacket)
|
receipts := *packet.Res.(*eth.ReceiptsResponse)
|
||||||
hashes := packet.Meta.([]common.Hash) // {receipt hashes}
|
hashes := packet.Meta.([]common.Hash) // {receipt hashes}
|
||||||
|
|
||||||
accepted, err := q.queue.DeliverReceipts(peer.id, receipts, hashes)
|
accepted, err := q.queue.DeliverReceipts(peer.id, receipts, hashes)
|
||||||
|
@ -794,7 +794,7 @@ func (s *skeleton) executeTask(peer *peerConnection, req *headerRequest) {
|
|||||||
|
|
||||||
case res := <-resCh:
|
case res := <-resCh:
|
||||||
// Headers successfully retrieved, update the metrics
|
// Headers successfully retrieved, update the metrics
|
||||||
headers := *res.Res.(*eth.BlockHeadersPacket)
|
headers := *res.Res.(*eth.BlockHeadersRequest)
|
||||||
|
|
||||||
headerReqTimer.Update(time.Since(start))
|
headerReqTimer.Update(time.Since(start))
|
||||||
s.peers.rates.Update(peer.id, eth.BlockHeadersMsg, res.Time, len(headers))
|
s.peers.rates.Update(peer.id, eth.BlockHeadersMsg, res.Time, len(headers))
|
||||||
|
@ -173,7 +173,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski
|
|||||||
}
|
}
|
||||||
res := ð.Response{
|
res := ð.Response{
|
||||||
Req: req,
|
Req: req,
|
||||||
Res: (*eth.BlockHeadersPacket)(&headers),
|
Res: (*eth.BlockHeadersRequest)(&headers),
|
||||||
Meta: hashes,
|
Meta: hashes,
|
||||||
Time: 1,
|
Time: 1,
|
||||||
Done: make(chan error),
|
Done: make(chan error),
|
||||||
@ -811,7 +811,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
|
|||||||
// Create a peer set to feed headers through
|
// Create a peer set to feed headers through
|
||||||
peerset := newPeerSet()
|
peerset := newPeerSet()
|
||||||
for _, peer := range tt.peers {
|
for _, peer := range tt.peers {
|
||||||
peerset.Register(newPeerConnection(peer.id, eth.ETH66, peer, log.New("id", peer.id)))
|
peerset.Register(newPeerConnection(peer.id, eth.ETH67, peer, log.New("id", peer.id)))
|
||||||
}
|
}
|
||||||
// Create a peer dropper to track malicious peers
|
// Create a peer dropper to track malicious peers
|
||||||
dropped := make(map[string]int)
|
dropped := make(map[string]int)
|
||||||
@ -913,7 +913,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
|
|||||||
skeleton.Sync(tt.newHead, nil, true)
|
skeleton.Sync(tt.newHead, nil, true)
|
||||||
}
|
}
|
||||||
if tt.newPeer != nil {
|
if tt.newPeer != nil {
|
||||||
if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil {
|
if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH67, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil {
|
||||||
t.Errorf("test %d: failed to register new peer: %v", i, err)
|
t.Errorf("test %d: failed to register new peer: %v", i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -483,7 +483,7 @@ func (f *BlockFetcher) loop() {
|
|||||||
select {
|
select {
|
||||||
case res := <-resCh:
|
case res := <-resCh:
|
||||||
res.Done <- nil
|
res.Done <- nil
|
||||||
f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersPacket), time.Now().Add(res.Time))
|
f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersRequest), time.Now().Add(res.Time))
|
||||||
|
|
||||||
case <-timeout.C:
|
case <-timeout.C:
|
||||||
// The peer didn't respond in time. The request
|
// The peer didn't respond in time. The request
|
||||||
@ -541,7 +541,7 @@ func (f *BlockFetcher) loop() {
|
|||||||
case res := <-resCh:
|
case res := <-resCh:
|
||||||
res.Done <- nil
|
res.Done <- nil
|
||||||
// Ignoring withdrawals here, since the block fetcher is not used post-merge.
|
// Ignoring withdrawals here, since the block fetcher is not used post-merge.
|
||||||
txs, uncles, _ := res.Res.(*eth.BlockBodiesPacket).Unpack()
|
txs, uncles, _ := res.Res.(*eth.BlockBodiesResponse).Unpack()
|
||||||
f.FilterBodies(peer, txs, uncles, time.Now())
|
f.FilterBodies(peer, txs, uncles, time.Now())
|
||||||
|
|
||||||
case <-timeout.C:
|
case <-timeout.C:
|
||||||
|
@ -213,7 +213,7 @@ func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*t
|
|||||||
}
|
}
|
||||||
res := ð.Response{
|
res := ð.Response{
|
||||||
Req: req,
|
Req: req,
|
||||||
Res: (*eth.BlockHeadersPacket)(&headers),
|
Res: (*eth.BlockHeadersRequest)(&headers),
|
||||||
Time: drift,
|
Time: drift,
|
||||||
Done: make(chan error, 1), // Ignore the returned status
|
Done: make(chan error, 1), // Ignore the returned status
|
||||||
}
|
}
|
||||||
@ -255,7 +255,7 @@ func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*typ
|
|||||||
}
|
}
|
||||||
res := ð.Response{
|
res := ð.Response{
|
||||||
Req: req,
|
Req: req,
|
||||||
Res: (*eth.BlockBodiesPacket)(&bodies),
|
Res: (*eth.BlockBodiesResponse)(&bodies),
|
||||||
Time: drift,
|
Time: drift,
|
||||||
Done: make(chan error, 1), // Ignore the returned status
|
Done: make(chan error, 1), // Ignore the returned status
|
||||||
}
|
}
|
||||||
|
@ -414,7 +414,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
|
|||||||
|
|
||||||
select {
|
select {
|
||||||
case res := <-resCh:
|
case res := <-resCh:
|
||||||
headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket))
|
headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersRequest))
|
||||||
if len(headers) == 0 {
|
if len(headers) == 0 {
|
||||||
// Required blocks are allowed to be missing if the remote
|
// Required blocks are allowed to be missing if the remote
|
||||||
// node is not yet synced
|
// node is not yet synced
|
||||||
|
@ -66,7 +66,7 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
|
|||||||
case *eth.NewBlockPacket:
|
case *eth.NewBlockPacket:
|
||||||
return h.handleBlockBroadcast(peer, packet.Block, packet.TD)
|
return h.handleBlockBroadcast(peer, packet.Block, packet.TD)
|
||||||
|
|
||||||
case *eth.NewPooledTransactionHashesPacket66:
|
case *eth.NewPooledTransactionHashesPacket67:
|
||||||
return h.txFetcher.Notify(peer.ID(), *packet)
|
return h.txFetcher.Notify(peer.ID(), *packet)
|
||||||
|
|
||||||
case *eth.NewPooledTransactionHashesPacket68:
|
case *eth.NewPooledTransactionHashesPacket68:
|
||||||
@ -75,7 +75,7 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
|
|||||||
case *eth.TransactionsPacket:
|
case *eth.TransactionsPacket:
|
||||||
return h.txFetcher.Enqueue(peer.ID(), *packet, false)
|
return h.txFetcher.Enqueue(peer.ID(), *packet, false)
|
||||||
|
|
||||||
case *eth.PooledTransactionsPacket:
|
case *eth.PooledTransactionsResponse:
|
||||||
return h.txFetcher.Enqueue(peer.ID(), *packet, true)
|
return h.txFetcher.Enqueue(peer.ID(), *packet, true)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -58,7 +58,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
|
|||||||
h.blockBroadcasts.Send(packet.Block)
|
h.blockBroadcasts.Send(packet.Block)
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
case *eth.NewPooledTransactionHashesPacket66:
|
case *eth.NewPooledTransactionHashesPacket67:
|
||||||
h.txAnnounces.Send(([]common.Hash)(*packet))
|
h.txAnnounces.Send(([]common.Hash)(*packet))
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
@ -70,7 +70,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
|
|||||||
h.txBroadcasts.Send(([]*types.Transaction)(*packet))
|
h.txBroadcasts.Send(([]*types.Transaction)(*packet))
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
case *eth.PooledTransactionsPacket:
|
case *eth.PooledTransactionsResponse:
|
||||||
h.txBroadcasts.Send(([]*types.Transaction)(*packet))
|
h.txBroadcasts.Send(([]*types.Transaction)(*packet))
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
@ -81,7 +81,6 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
|
|||||||
|
|
||||||
// Tests that peers are correctly accepted (or rejected) based on the advertised
|
// Tests that peers are correctly accepted (or rejected) based on the advertised
|
||||||
// fork IDs in the protocol handshake.
|
// fork IDs in the protocol handshake.
|
||||||
func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) }
|
|
||||||
func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) }
|
func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) }
|
||||||
func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) }
|
func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) }
|
||||||
|
|
||||||
@ -237,7 +236,6 @@ func testForkIDSplit(t *testing.T, protocol uint) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that received transactions are added to the local pool.
|
// Tests that received transactions are added to the local pool.
|
||||||
func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) }
|
|
||||||
func TestRecvTransactions67(t *testing.T) { testRecvTransactions(t, eth.ETH67) }
|
func TestRecvTransactions67(t *testing.T) { testRecvTransactions(t, eth.ETH67) }
|
||||||
func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) }
|
func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) }
|
||||||
|
|
||||||
@ -296,7 +294,6 @@ func testRecvTransactions(t *testing.T, protocol uint) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// This test checks that pending transactions are sent.
|
// This test checks that pending transactions are sent.
|
||||||
func TestSendTransactions66(t *testing.T) { testSendTransactions(t, eth.ETH66) }
|
|
||||||
func TestSendTransactions67(t *testing.T) { testSendTransactions(t, eth.ETH67) }
|
func TestSendTransactions67(t *testing.T) { testSendTransactions(t, eth.ETH67) }
|
||||||
func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) }
|
func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) }
|
||||||
|
|
||||||
@ -356,7 +353,7 @@ func testSendTransactions(t *testing.T, protocol uint) {
|
|||||||
seen := make(map[common.Hash]struct{})
|
seen := make(map[common.Hash]struct{})
|
||||||
for len(seen) < len(insert) {
|
for len(seen) < len(insert) {
|
||||||
switch protocol {
|
switch protocol {
|
||||||
case 66, 67, 68:
|
case 67, 68:
|
||||||
select {
|
select {
|
||||||
case hashes := <-anns:
|
case hashes := <-anns:
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
@ -382,7 +379,6 @@ func testSendTransactions(t *testing.T, protocol uint) {
|
|||||||
|
|
||||||
// Tests that transactions get propagated to all attached peers, either via direct
|
// Tests that transactions get propagated to all attached peers, either via direct
|
||||||
// broadcasts or via announcements/retrievals.
|
// broadcasts or via announcements/retrievals.
|
||||||
func TestTransactionPropagation66(t *testing.T) { testTransactionPropagation(t, eth.ETH66) }
|
|
||||||
func TestTransactionPropagation67(t *testing.T) { testTransactionPropagation(t, eth.ETH67) }
|
func TestTransactionPropagation67(t *testing.T) { testTransactionPropagation(t, eth.ETH67) }
|
||||||
func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) }
|
func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) }
|
||||||
|
|
||||||
@ -490,8 +486,8 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
|
|||||||
defer sourcePipe.Close()
|
defer sourcePipe.Close()
|
||||||
defer sinkPipe.Close()
|
defer sinkPipe.Close()
|
||||||
|
|
||||||
sourcePeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil)
|
sourcePeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil)
|
||||||
sinkPeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil)
|
sinkPeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil)
|
||||||
defer sourcePeer.Close()
|
defer sourcePeer.Close()
|
||||||
defer sinkPeer.Close()
|
defer sinkPeer.Close()
|
||||||
|
|
||||||
@ -543,7 +539,6 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
|
|||||||
|
|
||||||
// Tests that a propagated malformed block (uncles or transactions don't match
|
// Tests that a propagated malformed block (uncles or transactions don't match
|
||||||
// with the hashes in the header) gets discarded and not broadcast forward.
|
// with the hashes in the header) gets discarded and not broadcast forward.
|
||||||
func TestBroadcastMalformedBlock66(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH66) }
|
|
||||||
func TestBroadcastMalformedBlock67(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH67) }
|
func TestBroadcastMalformedBlock67(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH67) }
|
||||||
func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) }
|
func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) }
|
||||||
|
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
@ -45,10 +44,6 @@ const (
|
|||||||
// nowadays, the practical limit will always be softResponseLimit.
|
// nowadays, the practical limit will always be softResponseLimit.
|
||||||
maxBodiesServe = 1024
|
maxBodiesServe = 1024
|
||||||
|
|
||||||
// maxNodeDataServe is the maximum number of state trie nodes to serve. This
|
|
||||||
// number is there to limit the number of disk lookups.
|
|
||||||
maxNodeDataServe = 1024
|
|
||||||
|
|
||||||
// maxReceiptsServe is the maximum number of block receipts to serve. This
|
// maxReceiptsServe is the maximum number of block receipts to serve. This
|
||||||
// number is mostly there to limit the number of disk lookups. With block
|
// number is mostly there to limit the number of disk lookups. With block
|
||||||
// containing 200+ transactions nowadays, the practical limit will always
|
// containing 200+ transactions nowadays, the practical limit will always
|
||||||
@ -100,10 +95,6 @@ func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2
|
|||||||
for _, version := range ProtocolVersions {
|
for _, version := range ProtocolVersions {
|
||||||
version := version // Closure
|
version := version // Closure
|
||||||
|
|
||||||
// Path scheme does not support GetNodeData, don't advertise eth66 on it
|
|
||||||
if version <= ETH66 && backend.Chain().TrieDB().Scheme() == rawdb.PathScheme {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
protocols = append(protocols, p2p.Protocol{
|
protocols = append(protocols, p2p.Protocol{
|
||||||
Name: ProtocolName,
|
Name: ProtocolName,
|
||||||
Version: version,
|
Version: version,
|
||||||
@ -171,36 +162,19 @@ type Decoder interface {
|
|||||||
Time() time.Time
|
Time() time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
var eth66 = map[uint64]msgHandler{
|
|
||||||
NewBlockHashesMsg: handleNewBlockhashes,
|
|
||||||
NewBlockMsg: handleNewBlock,
|
|
||||||
TransactionsMsg: handleTransactions,
|
|
||||||
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66,
|
|
||||||
GetBlockHeadersMsg: handleGetBlockHeaders66,
|
|
||||||
BlockHeadersMsg: handleBlockHeaders66,
|
|
||||||
GetBlockBodiesMsg: handleGetBlockBodies66,
|
|
||||||
BlockBodiesMsg: handleBlockBodies66,
|
|
||||||
GetNodeDataMsg: handleGetNodeData66,
|
|
||||||
NodeDataMsg: handleNodeData66,
|
|
||||||
GetReceiptsMsg: handleGetReceipts66,
|
|
||||||
ReceiptsMsg: handleReceipts66,
|
|
||||||
GetPooledTransactionsMsg: handleGetPooledTransactions66,
|
|
||||||
PooledTransactionsMsg: handlePooledTransactions66,
|
|
||||||
}
|
|
||||||
|
|
||||||
var eth67 = map[uint64]msgHandler{
|
var eth67 = map[uint64]msgHandler{
|
||||||
NewBlockHashesMsg: handleNewBlockhashes,
|
NewBlockHashesMsg: handleNewBlockhashes,
|
||||||
NewBlockMsg: handleNewBlock,
|
NewBlockMsg: handleNewBlock,
|
||||||
TransactionsMsg: handleTransactions,
|
TransactionsMsg: handleTransactions,
|
||||||
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66,
|
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes67,
|
||||||
GetBlockHeadersMsg: handleGetBlockHeaders66,
|
GetBlockHeadersMsg: handleGetBlockHeaders,
|
||||||
BlockHeadersMsg: handleBlockHeaders66,
|
BlockHeadersMsg: handleBlockHeaders,
|
||||||
GetBlockBodiesMsg: handleGetBlockBodies66,
|
GetBlockBodiesMsg: handleGetBlockBodies,
|
||||||
BlockBodiesMsg: handleBlockBodies66,
|
BlockBodiesMsg: handleBlockBodies,
|
||||||
GetReceiptsMsg: handleGetReceipts66,
|
GetReceiptsMsg: handleGetReceipts,
|
||||||
ReceiptsMsg: handleReceipts66,
|
ReceiptsMsg: handleReceipts,
|
||||||
GetPooledTransactionsMsg: handleGetPooledTransactions66,
|
GetPooledTransactionsMsg: handleGetPooledTransactions,
|
||||||
PooledTransactionsMsg: handlePooledTransactions66,
|
PooledTransactionsMsg: handlePooledTransactions,
|
||||||
}
|
}
|
||||||
|
|
||||||
var eth68 = map[uint64]msgHandler{
|
var eth68 = map[uint64]msgHandler{
|
||||||
@ -208,14 +182,14 @@ var eth68 = map[uint64]msgHandler{
|
|||||||
NewBlockMsg: handleNewBlock,
|
NewBlockMsg: handleNewBlock,
|
||||||
TransactionsMsg: handleTransactions,
|
TransactionsMsg: handleTransactions,
|
||||||
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes68,
|
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes68,
|
||||||
GetBlockHeadersMsg: handleGetBlockHeaders66,
|
GetBlockHeadersMsg: handleGetBlockHeaders,
|
||||||
BlockHeadersMsg: handleBlockHeaders66,
|
BlockHeadersMsg: handleBlockHeaders,
|
||||||
GetBlockBodiesMsg: handleGetBlockBodies66,
|
GetBlockBodiesMsg: handleGetBlockBodies,
|
||||||
BlockBodiesMsg: handleBlockBodies66,
|
BlockBodiesMsg: handleBlockBodies,
|
||||||
GetReceiptsMsg: handleGetReceipts66,
|
GetReceiptsMsg: handleGetReceipts,
|
||||||
ReceiptsMsg: handleReceipts66,
|
ReceiptsMsg: handleReceipts,
|
||||||
GetPooledTransactionsMsg: handleGetPooledTransactions66,
|
GetPooledTransactionsMsg: handleGetPooledTransactions,
|
||||||
PooledTransactionsMsg: handlePooledTransactions66,
|
PooledTransactionsMsg: handlePooledTransactions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleMessage is invoked whenever an inbound message is received from a remote
|
// handleMessage is invoked whenever an inbound message is received from a remote
|
||||||
@ -231,14 +205,10 @@ func handleMessage(backend Backend, peer *Peer) error {
|
|||||||
}
|
}
|
||||||
defer msg.Discard()
|
defer msg.Discard()
|
||||||
|
|
||||||
var handlers = eth66
|
var handlers = eth67
|
||||||
if peer.Version() == ETH67 {
|
|
||||||
handlers = eth67
|
|
||||||
}
|
|
||||||
if peer.Version() >= ETH68 {
|
if peer.Version() >= ETH68 {
|
||||||
handlers = eth68
|
handlers = eth68
|
||||||
}
|
}
|
||||||
|
|
||||||
// Track the amount of time it takes to serve the request and run the handler
|
// Track the amount of time it takes to serve the request and run the handler
|
||||||
if metrics.Enabled {
|
if metrics.Enabled {
|
||||||
h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
|
h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
|
||||||
"github.com/ethereum/go-ethereum/core/txpool"
|
"github.com/ethereum/go-ethereum/core/txpool"
|
||||||
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
|
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
@ -151,7 +150,6 @@ func (b *testBackend) Handle(*Peer, Packet) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that block headers can be retrieved from a remote chain based on user queries.
|
// Tests that block headers can be retrieved from a remote chain based on user queries.
|
||||||
func TestGetBlockHeaders66(t *testing.T) { testGetBlockHeaders(t, ETH66) }
|
|
||||||
func TestGetBlockHeaders67(t *testing.T) { testGetBlockHeaders(t, ETH67) }
|
func TestGetBlockHeaders67(t *testing.T) { testGetBlockHeaders(t, ETH67) }
|
||||||
func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) }
|
func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) }
|
||||||
|
|
||||||
@ -178,29 +176,29 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
|
|||||||
// Create a batch of tests for various scenarios
|
// Create a batch of tests for various scenarios
|
||||||
limit := uint64(maxHeadersServe)
|
limit := uint64(maxHeadersServe)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
query *GetBlockHeadersPacket // The query to execute for header retrieval
|
query *GetBlockHeadersRequest // The query to execute for header retrieval
|
||||||
expect []common.Hash // The hashes of the block whose headers are expected
|
expect []common.Hash // The hashes of the block whose headers are expected
|
||||||
}{
|
}{
|
||||||
// A single random block should be retrievable by hash
|
// A single random block should be retrievable by hash
|
||||||
{
|
{
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
|
||||||
[]common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},
|
[]common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},
|
||||||
},
|
},
|
||||||
// A single random block should be retrievable by number
|
// A single random block should be retrievable by number
|
||||||
{
|
{
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 1},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 1},
|
||||||
[]common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},
|
[]common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},
|
||||||
},
|
},
|
||||||
// Multiple headers should be retrievable in both directions
|
// Multiple headers should be retrievable in both directions
|
||||||
{
|
{
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3},
|
||||||
[]common.Hash{
|
[]common.Hash{
|
||||||
backend.chain.GetBlockByNumber(limit / 2).Hash(),
|
backend.chain.GetBlockByNumber(limit / 2).Hash(),
|
||||||
backend.chain.GetBlockByNumber(limit/2 + 1).Hash(),
|
backend.chain.GetBlockByNumber(limit/2 + 1).Hash(),
|
||||||
backend.chain.GetBlockByNumber(limit/2 + 2).Hash(),
|
backend.chain.GetBlockByNumber(limit/2 + 2).Hash(),
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
|
||||||
[]common.Hash{
|
[]common.Hash{
|
||||||
backend.chain.GetBlockByNumber(limit / 2).Hash(),
|
backend.chain.GetBlockByNumber(limit / 2).Hash(),
|
||||||
backend.chain.GetBlockByNumber(limit/2 - 1).Hash(),
|
backend.chain.GetBlockByNumber(limit/2 - 1).Hash(),
|
||||||
@ -209,14 +207,14 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
|
|||||||
},
|
},
|
||||||
// Multiple headers with skip lists should be retrievable
|
// Multiple headers with skip lists should be retrievable
|
||||||
{
|
{
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
|
||||||
[]common.Hash{
|
[]common.Hash{
|
||||||
backend.chain.GetBlockByNumber(limit / 2).Hash(),
|
backend.chain.GetBlockByNumber(limit / 2).Hash(),
|
||||||
backend.chain.GetBlockByNumber(limit/2 + 4).Hash(),
|
backend.chain.GetBlockByNumber(limit/2 + 4).Hash(),
|
||||||
backend.chain.GetBlockByNumber(limit/2 + 8).Hash(),
|
backend.chain.GetBlockByNumber(limit/2 + 8).Hash(),
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
|
||||||
[]common.Hash{
|
[]common.Hash{
|
||||||
backend.chain.GetBlockByNumber(limit / 2).Hash(),
|
backend.chain.GetBlockByNumber(limit / 2).Hash(),
|
||||||
backend.chain.GetBlockByNumber(limit/2 - 4).Hash(),
|
backend.chain.GetBlockByNumber(limit/2 - 4).Hash(),
|
||||||
@ -225,31 +223,31 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
|
|||||||
},
|
},
|
||||||
// The chain endpoints should be retrievable
|
// The chain endpoints should be retrievable
|
||||||
{
|
{
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 0}, Amount: 1},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Number: 0}, Amount: 1},
|
||||||
[]common.Hash{backend.chain.GetBlockByNumber(0).Hash()},
|
[]common.Hash{backend.chain.GetBlockByNumber(0).Hash()},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1},
|
||||||
[]common.Hash{backend.chain.CurrentBlock().Hash()},
|
[]common.Hash{backend.chain.CurrentBlock().Hash()},
|
||||||
},
|
},
|
||||||
{ // If the peer requests a bit into the future, we deliver what we have
|
{ // If the peer requests a bit into the future, we deliver what we have
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10},
|
||||||
[]common.Hash{backend.chain.CurrentBlock().Hash()},
|
[]common.Hash{backend.chain.CurrentBlock().Hash()},
|
||||||
},
|
},
|
||||||
// Ensure protocol limits are honored
|
// Ensure protocol limits are honored
|
||||||
{
|
{
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true},
|
||||||
getHashes(backend.chain.CurrentBlock().Number.Uint64(), limit),
|
getHashes(backend.chain.CurrentBlock().Number.Uint64(), limit),
|
||||||
},
|
},
|
||||||
// Check that requesting more than available is handled gracefully
|
// Check that requesting more than available is handled gracefully
|
||||||
{
|
{
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3},
|
||||||
[]common.Hash{
|
[]common.Hash{
|
||||||
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(),
|
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(),
|
||||||
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64()).Hash(),
|
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64()).Hash(),
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
|
||||||
[]common.Hash{
|
[]common.Hash{
|
||||||
backend.chain.GetBlockByNumber(4).Hash(),
|
backend.chain.GetBlockByNumber(4).Hash(),
|
||||||
backend.chain.GetBlockByNumber(0).Hash(),
|
backend.chain.GetBlockByNumber(0).Hash(),
|
||||||
@ -257,13 +255,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
|
|||||||
},
|
},
|
||||||
// Check that requesting more than available is handled gracefully, even if mid skip
|
// Check that requesting more than available is handled gracefully, even if mid skip
|
||||||
{
|
{
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3},
|
||||||
[]common.Hash{
|
[]common.Hash{
|
||||||
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(),
|
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(),
|
||||||
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 1).Hash(),
|
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 1).Hash(),
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
|
||||||
[]common.Hash{
|
[]common.Hash{
|
||||||
backend.chain.GetBlockByNumber(4).Hash(),
|
backend.chain.GetBlockByNumber(4).Hash(),
|
||||||
backend.chain.GetBlockByNumber(1).Hash(),
|
backend.chain.GetBlockByNumber(1).Hash(),
|
||||||
@ -271,7 +269,7 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
|
|||||||
},
|
},
|
||||||
// Check a corner case where requesting more can iterate past the endpoints
|
// Check a corner case where requesting more can iterate past the endpoints
|
||||||
{
|
{
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true},
|
||||||
[]common.Hash{
|
[]common.Hash{
|
||||||
backend.chain.GetBlockByNumber(2).Hash(),
|
backend.chain.GetBlockByNumber(2).Hash(),
|
||||||
backend.chain.GetBlockByNumber(1).Hash(),
|
backend.chain.GetBlockByNumber(1).Hash(),
|
||||||
@ -280,24 +278,24 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
|
|||||||
},
|
},
|
||||||
// Check a corner case where skipping overflow loops back into the chain start
|
// Check a corner case where skipping overflow loops back into the chain start
|
||||||
{
|
{
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1},
|
||||||
[]common.Hash{
|
[]common.Hash{
|
||||||
backend.chain.GetBlockByNumber(3).Hash(),
|
backend.chain.GetBlockByNumber(3).Hash(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Check a corner case where skipping overflow loops back to the same header
|
// Check a corner case where skipping overflow loops back to the same header
|
||||||
{
|
{
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64},
|
||||||
[]common.Hash{
|
[]common.Hash{
|
||||||
backend.chain.GetBlockByNumber(1).Hash(),
|
backend.chain.GetBlockByNumber(1).Hash(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Check that non existing headers aren't returned
|
// Check that non existing headers aren't returned
|
||||||
{
|
{
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: unknown}, Amount: 1},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Hash: unknown}, Amount: 1},
|
||||||
[]common.Hash{},
|
[]common.Hash{},
|
||||||
}, {
|
}, {
|
||||||
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1},
|
&GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1},
|
||||||
[]common.Hash{},
|
[]common.Hash{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -309,13 +307,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
|
|||||||
headers = append(headers, backend.chain.GetBlockByHash(hash).Header())
|
headers = append(headers, backend.chain.GetBlockByHash(hash).Header())
|
||||||
}
|
}
|
||||||
// Send the hash request and verify the response
|
// Send the hash request and verify the response
|
||||||
p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
|
p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{
|
||||||
RequestId: 123,
|
RequestId: 123,
|
||||||
GetBlockHeadersPacket: tt.query,
|
GetBlockHeadersRequest: tt.query,
|
||||||
})
|
})
|
||||||
if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket66{
|
if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket{
|
||||||
RequestId: 123,
|
RequestId: 123,
|
||||||
BlockHeadersPacket: headers,
|
BlockHeadersRequest: headers,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Errorf("test %d: headers mismatch: %v", i, err)
|
t.Errorf("test %d: headers mismatch: %v", i, err)
|
||||||
}
|
}
|
||||||
@ -324,11 +322,11 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
|
|||||||
if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil {
|
if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil {
|
||||||
tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0
|
tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0
|
||||||
|
|
||||||
p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
|
p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{
|
||||||
RequestId: 456,
|
RequestId: 456,
|
||||||
GetBlockHeadersPacket: tt.query,
|
GetBlockHeadersRequest: tt.query,
|
||||||
})
|
})
|
||||||
expected := &BlockHeadersPacket66{RequestId: 456, BlockHeadersPacket: headers}
|
expected := &BlockHeadersPacket{RequestId: 456, BlockHeadersRequest: headers}
|
||||||
if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil {
|
if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil {
|
||||||
t.Errorf("test %d by hash: headers mismatch: %v", i, err)
|
t.Errorf("test %d by hash: headers mismatch: %v", i, err)
|
||||||
}
|
}
|
||||||
@ -338,7 +336,6 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that block contents can be retrieved from a remote chain based on their hashes.
|
// Tests that block contents can be retrieved from a remote chain based on their hashes.
|
||||||
func TestGetBlockBodies66(t *testing.T) { testGetBlockBodies(t, ETH66) }
|
|
||||||
func TestGetBlockBodies67(t *testing.T) { testGetBlockBodies(t, ETH67) }
|
func TestGetBlockBodies67(t *testing.T) { testGetBlockBodies(t, ETH67) }
|
||||||
func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) }
|
func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) }
|
||||||
|
|
||||||
@ -420,139 +417,20 @@ func testGetBlockBodies(t *testing.T, protocol uint) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Send the hash request and verify the response
|
// Send the hash request and verify the response
|
||||||
p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket66{
|
p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket{
|
||||||
RequestId: 123,
|
RequestId: 123,
|
||||||
GetBlockBodiesPacket: hashes,
|
GetBlockBodiesRequest: hashes,
|
||||||
})
|
})
|
||||||
if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket66{
|
if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket{
|
||||||
RequestId: 123,
|
RequestId: 123,
|
||||||
BlockBodiesPacket: bodies,
|
BlockBodiesResponse: bodies,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("test %d: bodies mismatch: %v", i, err)
|
t.Fatalf("test %d: bodies mismatch: %v", i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that the state trie nodes can be retrieved based on hashes.
|
|
||||||
func TestGetNodeData66(t *testing.T) { testGetNodeData(t, ETH66, false) }
|
|
||||||
func TestGetNodeData67(t *testing.T) { testGetNodeData(t, ETH67, true) }
|
|
||||||
func TestGetNodeData68(t *testing.T) { testGetNodeData(t, ETH68, true) }
|
|
||||||
|
|
||||||
func testGetNodeData(t *testing.T, protocol uint, drop bool) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
// Define three accounts to simulate transactions with
|
|
||||||
acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
|
||||||
acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
|
|
||||||
acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
|
|
||||||
acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
|
|
||||||
|
|
||||||
signer := types.HomesteadSigner{}
|
|
||||||
// Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_makers_test)
|
|
||||||
generator := func(i int, block *core.BlockGen) {
|
|
||||||
switch i {
|
|
||||||
case 0:
|
|
||||||
// In block 1, the test bank sends account #1 some ether.
|
|
||||||
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey)
|
|
||||||
block.AddTx(tx)
|
|
||||||
case 1:
|
|
||||||
// In block 2, the test bank sends some more ether to account #1.
|
|
||||||
// acc1Addr passes it on to account #2.
|
|
||||||
tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey)
|
|
||||||
tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, acc1Key)
|
|
||||||
block.AddTx(tx1)
|
|
||||||
block.AddTx(tx2)
|
|
||||||
case 2:
|
|
||||||
// Block 3 is empty but was mined by account #2.
|
|
||||||
block.SetCoinbase(acc2Addr)
|
|
||||||
block.SetExtra([]byte("yeehaw"))
|
|
||||||
case 3:
|
|
||||||
// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
|
|
||||||
b2 := block.PrevBlock(1).Header()
|
|
||||||
b2.Extra = []byte("foo")
|
|
||||||
block.AddUncle(b2)
|
|
||||||
b3 := block.PrevBlock(2).Header()
|
|
||||||
b3.Extra = []byte("foo")
|
|
||||||
block.AddUncle(b3)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Assemble the test environment
|
|
||||||
backend := newTestBackendWithGenerator(4, false, generator)
|
|
||||||
defer backend.close()
|
|
||||||
|
|
||||||
peer, _ := newTestPeer("peer", protocol, backend)
|
|
||||||
defer peer.close()
|
|
||||||
|
|
||||||
// Collect all state tree hashes.
|
|
||||||
var hashes []common.Hash
|
|
||||||
it := backend.db.NewIterator(nil, nil)
|
|
||||||
for it.Next() {
|
|
||||||
if key := it.Key(); len(key) == common.HashLength {
|
|
||||||
hashes = append(hashes, common.BytesToHash(key))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
it.Release()
|
|
||||||
|
|
||||||
// Request all hashes.
|
|
||||||
p2p.Send(peer.app, GetNodeDataMsg, &GetNodeDataPacket66{
|
|
||||||
RequestId: 123,
|
|
||||||
GetNodeDataPacket: hashes,
|
|
||||||
})
|
|
||||||
msg, err := peer.app.ReadMsg()
|
|
||||||
if !drop {
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to read node data response: %v", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.Fatalf("succeeded to read node data response on non-supporting protocol: %v", msg)
|
|
||||||
}
|
|
||||||
if msg.Code != NodeDataMsg {
|
|
||||||
t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, NodeDataMsg)
|
|
||||||
}
|
|
||||||
var res NodeDataPacket66
|
|
||||||
if err := msg.Decode(&res); err != nil {
|
|
||||||
t.Fatalf("failed to decode response node data: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that all hashes correspond to the requested data.
|
|
||||||
data := res.NodeDataPacket
|
|
||||||
for i, want := range hashes {
|
|
||||||
if hash := crypto.Keccak256Hash(data[i]); hash != want {
|
|
||||||
t.Errorf("data hash mismatch: have %x, want %x", hash, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reconstruct state tree from the received data.
|
|
||||||
reconstructDB := rawdb.NewMemoryDatabase()
|
|
||||||
for i := 0; i < len(data); i++ {
|
|
||||||
rawdb.WriteLegacyTrieNode(reconstructDB, hashes[i], data[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sanity check whether all state matches.
|
|
||||||
accounts := []common.Address{testAddr, acc1Addr, acc2Addr}
|
|
||||||
for i := uint64(0); i <= backend.chain.CurrentBlock().Number.Uint64(); i++ {
|
|
||||||
root := backend.chain.GetBlockByNumber(i).Root()
|
|
||||||
reconstructed, _ := state.New(root, state.NewDatabase(reconstructDB), nil)
|
|
||||||
for j, acc := range accounts {
|
|
||||||
state, _ := backend.chain.StateAt(root)
|
|
||||||
bw := state.GetBalance(acc)
|
|
||||||
bh := reconstructed.GetBalance(acc)
|
|
||||||
|
|
||||||
if (bw == nil) != (bh == nil) {
|
|
||||||
t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
|
|
||||||
}
|
|
||||||
if bw != nil && bh != nil && bw.Cmp(bh) != 0 {
|
|
||||||
t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that the transaction receipts can be retrieved based on hashes.
|
// Tests that the transaction receipts can be retrieved based on hashes.
|
||||||
func TestGetBlockReceipts66(t *testing.T) { testGetBlockReceipts(t, ETH66) }
|
|
||||||
func TestGetBlockReceipts67(t *testing.T) { testGetBlockReceipts(t, ETH67) }
|
func TestGetBlockReceipts67(t *testing.T) { testGetBlockReceipts(t, ETH67) }
|
||||||
func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) }
|
func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) }
|
||||||
|
|
||||||
@ -613,13 +491,13 @@ func testGetBlockReceipts(t *testing.T, protocol uint) {
|
|||||||
receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash()))
|
receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash()))
|
||||||
}
|
}
|
||||||
// Send the hash request and verify the response
|
// Send the hash request and verify the response
|
||||||
p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket66{
|
p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket{
|
||||||
RequestId: 123,
|
RequestId: 123,
|
||||||
GetReceiptsPacket: hashes,
|
GetReceiptsRequest: hashes,
|
||||||
})
|
})
|
||||||
if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket66{
|
if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket{
|
||||||
RequestId: 123,
|
RequestId: 123,
|
||||||
ReceiptsPacket: receipts,
|
ReceiptsResponse: receipts,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Errorf("receipts mismatch: %v", err)
|
t.Errorf("receipts mismatch: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -22,27 +22,25 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
|
|
||||||
// handleGetBlockHeaders66 is the eth/66 version of handleGetBlockHeaders
|
func handleGetBlockHeaders(backend Backend, msg Decoder, peer *Peer) error {
|
||||||
func handleGetBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {
|
|
||||||
// Decode the complex header query
|
// Decode the complex header query
|
||||||
var query GetBlockHeadersPacket66
|
var query GetBlockHeadersPacket
|
||||||
if err := msg.Decode(&query); err != nil {
|
if err := msg.Decode(&query); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersPacket, peer)
|
response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersRequest, peer)
|
||||||
return peer.ReplyBlockHeadersRLP(query.RequestId, response)
|
return peer.ReplyBlockHeadersRLP(query.RequestId, response)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServiceGetBlockHeadersQuery assembles the response to a header query. It is
|
// ServiceGetBlockHeadersQuery assembles the response to a header query. It is
|
||||||
// exposed to allow external packages to test protocol behavior.
|
// exposed to allow external packages to test protocol behavior.
|
||||||
func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue {
|
func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue {
|
||||||
if query.Skip == 0 {
|
if query.Skip == 0 {
|
||||||
// The fast path: when the request is for a contiguous segment of headers.
|
// The fast path: when the request is for a contiguous segment of headers.
|
||||||
return serviceContiguousBlockHeaderQuery(chain, query)
|
return serviceContiguousBlockHeaderQuery(chain, query)
|
||||||
@ -51,7 +49,7 @@ func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersP
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue {
|
func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue {
|
||||||
hashMode := query.Origin.Hash != (common.Hash{})
|
hashMode := query.Origin.Hash != (common.Hash{})
|
||||||
first := true
|
first := true
|
||||||
maxNonCanonical := uint64(100)
|
maxNonCanonical := uint64(100)
|
||||||
@ -140,7 +138,7 @@ func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBloc
|
|||||||
return headers
|
return headers
|
||||||
}
|
}
|
||||||
|
|
||||||
func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket) []rlp.RawValue {
|
func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest) []rlp.RawValue {
|
||||||
count := query.Amount
|
count := query.Amount
|
||||||
if count > maxHeadersServe {
|
if count > maxHeadersServe {
|
||||||
count = maxHeadersServe
|
count = maxHeadersServe
|
||||||
@ -203,19 +201,19 @@ func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHe
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleGetBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
|
func handleGetBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
|
||||||
// Decode the block body retrieval message
|
// Decode the block body retrieval message
|
||||||
var query GetBlockBodiesPacket66
|
var query GetBlockBodiesPacket
|
||||||
if err := msg.Decode(&query); err != nil {
|
if err := msg.Decode(&query); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesPacket)
|
response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesRequest)
|
||||||
return peer.ReplyBlockBodiesRLP(query.RequestId, response)
|
return peer.ReplyBlockBodiesRLP(query.RequestId, response)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServiceGetBlockBodiesQuery assembles the response to a body query. It is
|
// ServiceGetBlockBodiesQuery assembles the response to a body query. It is
|
||||||
// exposed to allow external packages to test protocol behavior.
|
// exposed to allow external packages to test protocol behavior.
|
||||||
func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPacket) []rlp.RawValue {
|
func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesRequest) []rlp.RawValue {
|
||||||
// Gather blocks until the fetch or network limits is reached
|
// Gather blocks until the fetch or network limits is reached
|
||||||
var (
|
var (
|
||||||
bytes int
|
bytes int
|
||||||
@ -234,60 +232,19 @@ func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPack
|
|||||||
return bodies
|
return bodies
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error {
|
func handleGetReceipts(backend Backend, msg Decoder, peer *Peer) error {
|
||||||
// Decode the trie node data retrieval message
|
|
||||||
var query GetNodeDataPacket66
|
|
||||||
if err := msg.Decode(&query); err != nil {
|
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
||||||
}
|
|
||||||
response := ServiceGetNodeDataQuery(backend.Chain(), query.GetNodeDataPacket)
|
|
||||||
return peer.ReplyNodeData(query.RequestId, response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServiceGetNodeDataQuery assembles the response to a node data query. It is
|
|
||||||
// exposed to allow external packages to test protocol behavior.
|
|
||||||
func ServiceGetNodeDataQuery(chain *core.BlockChain, query GetNodeDataPacket) [][]byte {
|
|
||||||
// Request nodes by hash is not supported in path-based scheme.
|
|
||||||
if chain.TrieDB().Scheme() == rawdb.PathScheme {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Gather state data until the fetch or network limits is reached
|
|
||||||
var (
|
|
||||||
bytes int
|
|
||||||
nodes [][]byte
|
|
||||||
)
|
|
||||||
for lookups, hash := range query {
|
|
||||||
if bytes >= softResponseLimit || len(nodes) >= maxNodeDataServe ||
|
|
||||||
lookups >= 2*maxNodeDataServe {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// Retrieve the requested state entry
|
|
||||||
entry, err := chain.TrieDB().Node(hash)
|
|
||||||
if len(entry) == 0 || err != nil {
|
|
||||||
// Read the contract code with prefix only to save unnecessary lookups.
|
|
||||||
entry, err = chain.ContractCodeWithPrefix(hash)
|
|
||||||
}
|
|
||||||
if err == nil && len(entry) > 0 {
|
|
||||||
nodes = append(nodes, entry)
|
|
||||||
bytes += len(entry)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleGetReceipts66(backend Backend, msg Decoder, peer *Peer) error {
|
|
||||||
// Decode the block receipts retrieval message
|
// Decode the block receipts retrieval message
|
||||||
var query GetReceiptsPacket66
|
var query GetReceiptsPacket
|
||||||
if err := msg.Decode(&query); err != nil {
|
if err := msg.Decode(&query); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsPacket)
|
response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsRequest)
|
||||||
return peer.ReplyReceiptsRLP(query.RequestId, response)
|
return peer.ReplyReceiptsRLP(query.RequestId, response)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServiceGetReceiptsQuery assembles the response to a receipt query. It is
|
// ServiceGetReceiptsQuery assembles the response to a receipt query. It is
|
||||||
// exposed to allow external packages to test protocol behavior.
|
// exposed to allow external packages to test protocol behavior.
|
||||||
func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsPacket) []rlp.RawValue {
|
func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsRequest) []rlp.RawValue {
|
||||||
// Gather state data until the fetch or network limits is reached
|
// Gather state data until the fetch or network limits is reached
|
||||||
var (
|
var (
|
||||||
bytes int
|
bytes int
|
||||||
@ -356,15 +313,15 @@ func handleNewBlock(backend Backend, msg Decoder, peer *Peer) error {
|
|||||||
return backend.Handle(peer, ann)
|
return backend.Handle(peer, ann)
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {
|
func handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error {
|
||||||
// A batch of headers arrived to one of our previous requests
|
// A batch of headers arrived to one of our previous requests
|
||||||
res := new(BlockHeadersPacket66)
|
res := new(BlockHeadersPacket)
|
||||||
if err := msg.Decode(res); err != nil {
|
if err := msg.Decode(res); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
metadata := func() interface{} {
|
metadata := func() interface{} {
|
||||||
hashes := make([]common.Hash, len(res.BlockHeadersPacket))
|
hashes := make([]common.Hash, len(res.BlockHeadersRequest))
|
||||||
for i, header := range res.BlockHeadersPacket {
|
for i, header := range res.BlockHeadersRequest {
|
||||||
hashes[i] = header.Hash()
|
hashes[i] = header.Hash()
|
||||||
}
|
}
|
||||||
return hashes
|
return hashes
|
||||||
@ -372,24 +329,24 @@ func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {
|
|||||||
return peer.dispatchResponse(&Response{
|
return peer.dispatchResponse(&Response{
|
||||||
id: res.RequestId,
|
id: res.RequestId,
|
||||||
code: BlockHeadersMsg,
|
code: BlockHeadersMsg,
|
||||||
Res: &res.BlockHeadersPacket,
|
Res: &res.BlockHeadersRequest,
|
||||||
}, metadata)
|
}, metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
|
func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
|
||||||
// A batch of block bodies arrived to one of our previous requests
|
// A batch of block bodies arrived to one of our previous requests
|
||||||
res := new(BlockBodiesPacket66)
|
res := new(BlockBodiesPacket)
|
||||||
if err := msg.Decode(res); err != nil {
|
if err := msg.Decode(res); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
metadata := func() interface{} {
|
metadata := func() interface{} {
|
||||||
var (
|
var (
|
||||||
txsHashes = make([]common.Hash, len(res.BlockBodiesPacket))
|
txsHashes = make([]common.Hash, len(res.BlockBodiesResponse))
|
||||||
uncleHashes = make([]common.Hash, len(res.BlockBodiesPacket))
|
uncleHashes = make([]common.Hash, len(res.BlockBodiesResponse))
|
||||||
withdrawalHashes = make([]common.Hash, len(res.BlockBodiesPacket))
|
withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse))
|
||||||
)
|
)
|
||||||
hasher := trie.NewStackTrie(nil)
|
hasher := trie.NewStackTrie(nil)
|
||||||
for i, body := range res.BlockBodiesPacket {
|
for i, body := range res.BlockBodiesResponse {
|
||||||
txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher)
|
txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher)
|
||||||
uncleHashes[i] = types.CalcUncleHash(body.Uncles)
|
uncleHashes[i] = types.CalcUncleHash(body.Uncles)
|
||||||
if body.Withdrawals != nil {
|
if body.Withdrawals != nil {
|
||||||
@ -401,33 +358,20 @@ func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
|
|||||||
return peer.dispatchResponse(&Response{
|
return peer.dispatchResponse(&Response{
|
||||||
id: res.RequestId,
|
id: res.RequestId,
|
||||||
code: BlockBodiesMsg,
|
code: BlockBodiesMsg,
|
||||||
Res: &res.BlockBodiesPacket,
|
Res: &res.BlockBodiesResponse,
|
||||||
}, metadata)
|
}, metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error {
|
func handleReceipts(backend Backend, msg Decoder, peer *Peer) error {
|
||||||
// A batch of node state data arrived to one of our previous requests
|
|
||||||
res := new(NodeDataPacket66)
|
|
||||||
if err := msg.Decode(res); err != nil {
|
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
||||||
}
|
|
||||||
return peer.dispatchResponse(&Response{
|
|
||||||
id: res.RequestId,
|
|
||||||
code: NodeDataMsg,
|
|
||||||
Res: &res.NodeDataPacket,
|
|
||||||
}, nil) // No post-processing, we're not using this packet anymore
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error {
|
|
||||||
// A batch of receipts arrived to one of our previous requests
|
// A batch of receipts arrived to one of our previous requests
|
||||||
res := new(ReceiptsPacket66)
|
res := new(ReceiptsPacket)
|
||||||
if err := msg.Decode(res); err != nil {
|
if err := msg.Decode(res); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
metadata := func() interface{} {
|
metadata := func() interface{} {
|
||||||
hasher := trie.NewStackTrie(nil)
|
hasher := trie.NewStackTrie(nil)
|
||||||
hashes := make([]common.Hash, len(res.ReceiptsPacket))
|
hashes := make([]common.Hash, len(res.ReceiptsResponse))
|
||||||
for i, receipt := range res.ReceiptsPacket {
|
for i, receipt := range res.ReceiptsResponse {
|
||||||
hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher)
|
hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher)
|
||||||
}
|
}
|
||||||
return hashes
|
return hashes
|
||||||
@ -435,17 +379,17 @@ func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error {
|
|||||||
return peer.dispatchResponse(&Response{
|
return peer.dispatchResponse(&Response{
|
||||||
id: res.RequestId,
|
id: res.RequestId,
|
||||||
code: ReceiptsMsg,
|
code: ReceiptsMsg,
|
||||||
Res: &res.ReceiptsPacket,
|
Res: &res.ReceiptsResponse,
|
||||||
}, metadata)
|
}, metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleNewPooledTransactionHashes66(backend Backend, msg Decoder, peer *Peer) error {
|
func handleNewPooledTransactionHashes67(backend Backend, msg Decoder, peer *Peer) error {
|
||||||
// New transaction announcement arrived, make sure we have
|
// New transaction announcement arrived, make sure we have
|
||||||
// a valid and fresh chain to handle them
|
// a valid and fresh chain to handle them
|
||||||
if !backend.AcceptTxs() {
|
if !backend.AcceptTxs() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
ann := new(NewPooledTransactionHashesPacket66)
|
ann := new(NewPooledTransactionHashesPacket67)
|
||||||
if err := msg.Decode(ann); err != nil {
|
if err := msg.Decode(ann); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
@ -476,17 +420,17 @@ func handleNewPooledTransactionHashes68(backend Backend, msg Decoder, peer *Peer
|
|||||||
return backend.Handle(peer, ann)
|
return backend.Handle(peer, ann)
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleGetPooledTransactions66(backend Backend, msg Decoder, peer *Peer) error {
|
func handleGetPooledTransactions(backend Backend, msg Decoder, peer *Peer) error {
|
||||||
// Decode the pooled transactions retrieval message
|
// Decode the pooled transactions retrieval message
|
||||||
var query GetPooledTransactionsPacket66
|
var query GetPooledTransactionsPacket
|
||||||
if err := msg.Decode(&query); err != nil {
|
if err := msg.Decode(&query); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsPacket, peer)
|
hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsRequest, peer)
|
||||||
return peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs)
|
return peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsPacket, peer *Peer) ([]common.Hash, []rlp.RawValue) {
|
func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsRequest, peer *Peer) ([]common.Hash, []rlp.RawValue) {
|
||||||
// Gather transactions until the fetch or network limits is reached
|
// Gather transactions until the fetch or network limits is reached
|
||||||
var (
|
var (
|
||||||
bytes int
|
bytes int
|
||||||
@ -534,17 +478,17 @@ func handleTransactions(backend Backend, msg Decoder, peer *Peer) error {
|
|||||||
return backend.Handle(peer, &txs)
|
return backend.Handle(peer, &txs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error {
|
func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error {
|
||||||
// Transactions arrived, make sure we have a valid and fresh chain to handle them
|
// Transactions arrived, make sure we have a valid and fresh chain to handle them
|
||||||
if !backend.AcceptTxs() {
|
if !backend.AcceptTxs() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Transactions can be processed, parse all of them and deliver to the pool
|
// Transactions can be processed, parse all of them and deliver to the pool
|
||||||
var txs PooledTransactionsPacket66
|
var txs PooledTransactionsPacket
|
||||||
if err := msg.Decode(&txs); err != nil {
|
if err := msg.Decode(&txs); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
for i, tx := range txs.PooledTransactionsPacket {
|
for i, tx := range txs.PooledTransactionsResponse {
|
||||||
// Validate and mark the remote transaction
|
// Validate and mark the remote transaction
|
||||||
if tx == nil {
|
if tx == nil {
|
||||||
return fmt.Errorf("%w: transaction %d is nil", errDecode, i)
|
return fmt.Errorf("%w: transaction %d is nil", errDecode, i)
|
||||||
@ -553,5 +497,5 @@ func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error
|
|||||||
}
|
}
|
||||||
requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId)
|
requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId)
|
||||||
|
|
||||||
return backend.Handle(peer, &txs.PooledTransactionsPacket)
|
return backend.Handle(peer, &txs.PooledTransactionsResponse)
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Tests that handshake failures are detected and reported correctly.
|
// Tests that handshake failures are detected and reported correctly.
|
||||||
func TestHandshake66(t *testing.T) { testHandshake(t, ETH66) }
|
func TestHandshake67(t *testing.T) { testHandshake(t, ETH67) }
|
||||||
|
func TestHandshake68(t *testing.T) { testHandshake(t, ETH68) }
|
||||||
|
|
||||||
func testHandshake(t *testing.T, protocol uint) {
|
func testHandshake(t *testing.T, protocol uint) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
@ -219,7 +219,7 @@ func (p *Peer) AsyncSendTransactions(hashes []common.Hash) {
|
|||||||
func (p *Peer) sendPooledTransactionHashes66(hashes []common.Hash) error {
|
func (p *Peer) sendPooledTransactionHashes66(hashes []common.Hash) error {
|
||||||
// Mark all the transactions as known, but ensure we don't overflow our limits
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
||||||
p.knownTxs.Add(hashes...)
|
p.knownTxs.Add(hashes...)
|
||||||
return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket66(hashes))
|
return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket67(hashes))
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendPooledTransactionHashes68 sends transaction hashes (tagged with their type
|
// sendPooledTransactionHashes68 sends transaction hashes (tagged with their type
|
||||||
@ -248,15 +248,15 @@ func (p *Peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReplyPooledTransactionsRLP is the eth/66 version of SendPooledTransactionsRLP.
|
// ReplyPooledTransactionsRLP is the response to RequestTxs.
|
||||||
func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs []rlp.RawValue) error {
|
func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs []rlp.RawValue) error {
|
||||||
// Mark all the transactions as known, but ensure we don't overflow our limits
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
||||||
p.knownTxs.Add(hashes...)
|
p.knownTxs.Add(hashes...)
|
||||||
|
|
||||||
// Not packed into PooledTransactionsPacket to avoid RLP decoding
|
// Not packed into PooledTransactionsResponse to avoid RLP decoding
|
||||||
return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket66{
|
return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket{
|
||||||
RequestId: id,
|
RequestId: id,
|
||||||
PooledTransactionsRLPPacket: txs,
|
PooledTransactionsRLPResponse: txs,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -309,36 +309,28 @@ func (p *Peer) AsyncSendNewBlock(block *types.Block, td *big.Int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReplyBlockHeadersRLP is the eth/66 response to GetBlockHeaders.
|
// ReplyBlockHeadersRLP is the response to GetBlockHeaders.
|
||||||
func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error {
|
func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error {
|
||||||
return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket66{
|
return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket{
|
||||||
RequestId: id,
|
RequestId: id,
|
||||||
BlockHeadersRLPPacket: headers,
|
BlockHeadersRLPResponse: headers,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReplyBlockBodiesRLP is the eth/66 response to GetBlockBodies.
|
// ReplyBlockBodiesRLP is the response to GetBlockBodies.
|
||||||
func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error {
|
func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error {
|
||||||
// Not packed into BlockBodiesPacket to avoid RLP decoding
|
// Not packed into BlockBodiesResponse to avoid RLP decoding
|
||||||
return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket66{
|
return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket{
|
||||||
RequestId: id,
|
RequestId: id,
|
||||||
BlockBodiesRLPPacket: bodies,
|
BlockBodiesRLPResponse: bodies,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReplyNodeData is the eth/66 response to GetNodeData.
|
// ReplyReceiptsRLP is the response to GetReceipts.
|
||||||
func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error {
|
|
||||||
return p2p.Send(p.rw, NodeDataMsg, &NodeDataPacket66{
|
|
||||||
RequestId: id,
|
|
||||||
NodeDataPacket: data,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplyReceiptsRLP is the eth/66 response to GetReceipts.
|
|
||||||
func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error {
|
func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error {
|
||||||
return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket66{
|
return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket{
|
||||||
RequestId: id,
|
RequestId: id,
|
||||||
ReceiptsRLPPacket: receipts,
|
ReceiptsRLPResponse: receipts,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -353,9 +345,9 @@ func (p *Peer) RequestOneHeader(hash common.Hash, sink chan *Response) (*Request
|
|||||||
sink: sink,
|
sink: sink,
|
||||||
code: GetBlockHeadersMsg,
|
code: GetBlockHeadersMsg,
|
||||||
want: BlockHeadersMsg,
|
want: BlockHeadersMsg,
|
||||||
data: &GetBlockHeadersPacket66{
|
data: &GetBlockHeadersPacket{
|
||||||
RequestId: id,
|
RequestId: id,
|
||||||
GetBlockHeadersPacket: &GetBlockHeadersPacket{
|
GetBlockHeadersRequest: &GetBlockHeadersRequest{
|
||||||
Origin: HashOrNumber{Hash: hash},
|
Origin: HashOrNumber{Hash: hash},
|
||||||
Amount: uint64(1),
|
Amount: uint64(1),
|
||||||
Skip: uint64(0),
|
Skip: uint64(0),
|
||||||
@ -380,9 +372,9 @@ func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, re
|
|||||||
sink: sink,
|
sink: sink,
|
||||||
code: GetBlockHeadersMsg,
|
code: GetBlockHeadersMsg,
|
||||||
want: BlockHeadersMsg,
|
want: BlockHeadersMsg,
|
||||||
data: &GetBlockHeadersPacket66{
|
data: &GetBlockHeadersPacket{
|
||||||
RequestId: id,
|
RequestId: id,
|
||||||
GetBlockHeadersPacket: &GetBlockHeadersPacket{
|
GetBlockHeadersRequest: &GetBlockHeadersRequest{
|
||||||
Origin: HashOrNumber{Hash: origin},
|
Origin: HashOrNumber{Hash: origin},
|
||||||
Amount: uint64(amount),
|
Amount: uint64(amount),
|
||||||
Skip: uint64(skip),
|
Skip: uint64(skip),
|
||||||
@ -407,9 +399,9 @@ func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, rever
|
|||||||
sink: sink,
|
sink: sink,
|
||||||
code: GetBlockHeadersMsg,
|
code: GetBlockHeadersMsg,
|
||||||
want: BlockHeadersMsg,
|
want: BlockHeadersMsg,
|
||||||
data: &GetBlockHeadersPacket66{
|
data: &GetBlockHeadersPacket{
|
||||||
RequestId: id,
|
RequestId: id,
|
||||||
GetBlockHeadersPacket: &GetBlockHeadersPacket{
|
GetBlockHeadersRequest: &GetBlockHeadersRequest{
|
||||||
Origin: HashOrNumber{Number: origin},
|
Origin: HashOrNumber{Number: origin},
|
||||||
Amount: uint64(amount),
|
Amount: uint64(amount),
|
||||||
Skip: uint64(skip),
|
Skip: uint64(skip),
|
||||||
@ -434,31 +426,9 @@ func (p *Peer) RequestBodies(hashes []common.Hash, sink chan *Response) (*Reques
|
|||||||
sink: sink,
|
sink: sink,
|
||||||
code: GetBlockBodiesMsg,
|
code: GetBlockBodiesMsg,
|
||||||
want: BlockBodiesMsg,
|
want: BlockBodiesMsg,
|
||||||
data: &GetBlockBodiesPacket66{
|
data: &GetBlockBodiesPacket{
|
||||||
RequestId: id,
|
RequestId: id,
|
||||||
GetBlockBodiesPacket: hashes,
|
GetBlockBodiesRequest: hashes,
|
||||||
},
|
|
||||||
}
|
|
||||||
if err := p.dispatchRequest(req); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequestNodeData fetches a batch of arbitrary data from a node's known state
|
|
||||||
// data, corresponding to the specified hashes.
|
|
||||||
func (p *Peer) RequestNodeData(hashes []common.Hash, sink chan *Response) (*Request, error) {
|
|
||||||
p.Log().Debug("Fetching batch of state data", "count", len(hashes))
|
|
||||||
id := rand.Uint64()
|
|
||||||
|
|
||||||
req := &Request{
|
|
||||||
id: id,
|
|
||||||
sink: sink,
|
|
||||||
code: GetNodeDataMsg,
|
|
||||||
want: NodeDataMsg,
|
|
||||||
data: &GetNodeDataPacket66{
|
|
||||||
RequestId: id,
|
|
||||||
GetNodeDataPacket: hashes,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if err := p.dispatchRequest(req); err != nil {
|
if err := p.dispatchRequest(req); err != nil {
|
||||||
@ -477,9 +447,9 @@ func (p *Peer) RequestReceipts(hashes []common.Hash, sink chan *Response) (*Requ
|
|||||||
sink: sink,
|
sink: sink,
|
||||||
code: GetReceiptsMsg,
|
code: GetReceiptsMsg,
|
||||||
want: ReceiptsMsg,
|
want: ReceiptsMsg,
|
||||||
data: &GetReceiptsPacket66{
|
data: &GetReceiptsPacket{
|
||||||
RequestId: id,
|
RequestId: id,
|
||||||
GetReceiptsPacket: hashes,
|
GetReceiptsRequest: hashes,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if err := p.dispatchRequest(req); err != nil {
|
if err := p.dispatchRequest(req); err != nil {
|
||||||
@ -494,9 +464,9 @@ func (p *Peer) RequestTxs(hashes []common.Hash) error {
|
|||||||
id := rand.Uint64()
|
id := rand.Uint64()
|
||||||
|
|
||||||
requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id)
|
requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id)
|
||||||
return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket66{
|
return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket{
|
||||||
RequestId: id,
|
RequestId: id,
|
||||||
GetPooledTransactionsPacket: hashes,
|
GetPooledTransactionsRequest: hashes,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
|
|
||||||
// Constants to match up protocol versions and messages
|
// Constants to match up protocol versions and messages
|
||||||
const (
|
const (
|
||||||
ETH66 = 66
|
|
||||||
ETH67 = 67
|
ETH67 = 67
|
||||||
ETH68 = 68
|
ETH68 = 68
|
||||||
)
|
)
|
||||||
@ -41,11 +40,11 @@ const ProtocolName = "eth"
|
|||||||
|
|
||||||
// ProtocolVersions are the supported versions of the `eth` protocol (first
|
// ProtocolVersions are the supported versions of the `eth` protocol (first
|
||||||
// is primary).
|
// is primary).
|
||||||
var ProtocolVersions = []uint{ETH68, ETH67, ETH66}
|
var ProtocolVersions = []uint{ETH68, ETH67}
|
||||||
|
|
||||||
// protocolLengths are the number of implemented message corresponding to
|
// protocolLengths are the number of implemented message corresponding to
|
||||||
// different protocol versions.
|
// different protocol versions.
|
||||||
var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17, ETH66: 17}
|
var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17}
|
||||||
|
|
||||||
// maxMessageSize is the maximum cap on the size of a protocol message.
|
// maxMessageSize is the maximum cap on the size of a protocol message.
|
||||||
const maxMessageSize = 10 * 1024 * 1024
|
const maxMessageSize = 10 * 1024 * 1024
|
||||||
@ -62,8 +61,6 @@ const (
|
|||||||
NewPooledTransactionHashesMsg = 0x08
|
NewPooledTransactionHashesMsg = 0x08
|
||||||
GetPooledTransactionsMsg = 0x09
|
GetPooledTransactionsMsg = 0x09
|
||||||
PooledTransactionsMsg = 0x0a
|
PooledTransactionsMsg = 0x0a
|
||||||
GetNodeDataMsg = 0x0d
|
|
||||||
NodeDataMsg = 0x0e
|
|
||||||
GetReceiptsMsg = 0x0f
|
GetReceiptsMsg = 0x0f
|
||||||
ReceiptsMsg = 0x10
|
ReceiptsMsg = 0x10
|
||||||
)
|
)
|
||||||
@ -85,7 +82,7 @@ type Packet interface {
|
|||||||
Kind() byte // Kind returns the message type.
|
Kind() byte // Kind returns the message type.
|
||||||
}
|
}
|
||||||
|
|
||||||
// StatusPacket is the network packet for the status message for eth/64 and later.
|
// StatusPacket is the network packet for the status message.
|
||||||
type StatusPacket struct {
|
type StatusPacket struct {
|
||||||
ProtocolVersion uint32
|
ProtocolVersion uint32
|
||||||
NetworkID uint64
|
NetworkID uint64
|
||||||
@ -118,18 +115,18 @@ func (p *NewBlockHashesPacket) Unpack() ([]common.Hash, []uint64) {
|
|||||||
// TransactionsPacket is the network packet for broadcasting new transactions.
|
// TransactionsPacket is the network packet for broadcasting new transactions.
|
||||||
type TransactionsPacket []*types.Transaction
|
type TransactionsPacket []*types.Transaction
|
||||||
|
|
||||||
// GetBlockHeadersPacket represents a block header query.
|
// GetBlockHeadersRequest represents a block header query.
|
||||||
type GetBlockHeadersPacket struct {
|
type GetBlockHeadersRequest struct {
|
||||||
Origin HashOrNumber // Block from which to retrieve headers
|
Origin HashOrNumber // Block from which to retrieve headers
|
||||||
Amount uint64 // Maximum number of headers to retrieve
|
Amount uint64 // Maximum number of headers to retrieve
|
||||||
Skip uint64 // Blocks to skip between consecutive headers
|
Skip uint64 // Blocks to skip between consecutive headers
|
||||||
Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis)
|
Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBlockHeadersPacket66 represents a block header query over eth/66
|
// GetBlockHeadersPacket represents a block header query with request ID wrapping.
|
||||||
type GetBlockHeadersPacket66 struct {
|
type GetBlockHeadersPacket struct {
|
||||||
RequestId uint64
|
RequestId uint64
|
||||||
*GetBlockHeadersPacket
|
*GetBlockHeadersRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
// HashOrNumber is a combined field for specifying an origin block.
|
// HashOrNumber is a combined field for specifying an origin block.
|
||||||
@ -168,23 +165,23 @@ func (hn *HashOrNumber) DecodeRLP(s *rlp.Stream) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockHeadersPacket represents a block header response.
|
// BlockHeadersRequest represents a block header response.
|
||||||
type BlockHeadersPacket []*types.Header
|
type BlockHeadersRequest []*types.Header
|
||||||
|
|
||||||
// BlockHeadersPacket66 represents a block header response over eth/66.
|
// BlockHeadersPacket represents a block header response over with request ID wrapping.
|
||||||
type BlockHeadersPacket66 struct {
|
type BlockHeadersPacket struct {
|
||||||
RequestId uint64
|
RequestId uint64
|
||||||
BlockHeadersPacket
|
BlockHeadersRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockHeadersRLPPacket represents a block header response, to use when we already
|
// BlockHeadersRLPResponse represents a block header response, to use when we already
|
||||||
// have the headers rlp encoded.
|
// have the headers rlp encoded.
|
||||||
type BlockHeadersRLPPacket []rlp.RawValue
|
type BlockHeadersRLPResponse []rlp.RawValue
|
||||||
|
|
||||||
// BlockHeadersRLPPacket66 represents a block header response over eth/66.
|
// BlockHeadersRLPPacket represents a block header response with request ID wrapping.
|
||||||
type BlockHeadersRLPPacket66 struct {
|
type BlockHeadersRLPPacket struct {
|
||||||
RequestId uint64
|
RequestId uint64
|
||||||
BlockHeadersRLPPacket
|
BlockHeadersRLPResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBlockPacket is the network packet for the block propagation message.
|
// NewBlockPacket is the network packet for the block propagation message.
|
||||||
@ -206,33 +203,34 @@ func (request *NewBlockPacket) sanityCheck() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBlockBodiesPacket represents a block body query.
|
// GetBlockBodiesRequest represents a block body query.
|
||||||
type GetBlockBodiesPacket []common.Hash
|
type GetBlockBodiesRequest []common.Hash
|
||||||
|
|
||||||
// GetBlockBodiesPacket66 represents a block body query over eth/66.
|
// GetBlockBodiesPacket represents a block body query with request ID wrapping.
|
||||||
type GetBlockBodiesPacket66 struct {
|
type GetBlockBodiesPacket struct {
|
||||||
RequestId uint64
|
RequestId uint64
|
||||||
GetBlockBodiesPacket
|
GetBlockBodiesRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockBodiesPacket is the network packet for block content distribution.
|
// BlockBodiesResponse is the network packet for block content distribution.
|
||||||
type BlockBodiesPacket []*BlockBody
|
type BlockBodiesResponse []*BlockBody
|
||||||
|
|
||||||
// BlockBodiesPacket66 is the network packet for block content distribution over eth/66.
|
// BlockBodiesPacket is the network packet for block content distribution with
|
||||||
type BlockBodiesPacket66 struct {
|
// request ID wrapping.
|
||||||
|
type BlockBodiesPacket struct {
|
||||||
RequestId uint64
|
RequestId uint64
|
||||||
BlockBodiesPacket
|
BlockBodiesResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockBodiesRLPPacket is used for replying to block body requests, in cases
|
// BlockBodiesRLPResponse is used for replying to block body requests, in cases
|
||||||
// where we already have them RLP-encoded, and thus can avoid the decode-encode
|
// where we already have them RLP-encoded, and thus can avoid the decode-encode
|
||||||
// roundtrip.
|
// roundtrip.
|
||||||
type BlockBodiesRLPPacket []rlp.RawValue
|
type BlockBodiesRLPResponse []rlp.RawValue
|
||||||
|
|
||||||
// BlockBodiesRLPPacket66 is the BlockBodiesRLPPacket over eth/66
|
// BlockBodiesRLPPacket is the BlockBodiesRLPResponse with request ID wrapping.
|
||||||
type BlockBodiesRLPPacket66 struct {
|
type BlockBodiesRLPPacket struct {
|
||||||
RequestId uint64
|
RequestId uint64
|
||||||
BlockBodiesRLPPacket
|
BlockBodiesRLPResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockBody represents the data content of a single block.
|
// BlockBody represents the data content of a single block.
|
||||||
@ -244,7 +242,7 @@ type BlockBody struct {
|
|||||||
|
|
||||||
// Unpack retrieves the transactions and uncles from the range packet and returns
|
// Unpack retrieves the transactions and uncles from the range packet and returns
|
||||||
// them in a split flat format that's more consistent with the internal data structures.
|
// them in a split flat format that's more consistent with the internal data structures.
|
||||||
func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) {
|
func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) {
|
||||||
// TODO(matt): add support for withdrawals to fetchers
|
// TODO(matt): add support for withdrawals to fetchers
|
||||||
var (
|
var (
|
||||||
txset = make([][]*types.Transaction, len(*p))
|
txset = make([][]*types.Transaction, len(*p))
|
||||||
@ -257,53 +255,36 @@ func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header,
|
|||||||
return txset, uncleset, withdrawalset
|
return txset, uncleset, withdrawalset
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNodeDataPacket represents a trie node data query.
|
// GetReceiptsRequest represents a block receipts query.
|
||||||
type GetNodeDataPacket []common.Hash
|
type GetReceiptsRequest []common.Hash
|
||||||
|
|
||||||
// GetNodeDataPacket66 represents a trie node data query over eth/66.
|
// GetReceiptsPacket represents a block receipts query with request ID wrapping.
|
||||||
type GetNodeDataPacket66 struct {
|
type GetReceiptsPacket struct {
|
||||||
RequestId uint64
|
RequestId uint64
|
||||||
GetNodeDataPacket
|
GetReceiptsRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeDataPacket is the network packet for trie node data distribution.
|
// ReceiptsResponse is the network packet for block receipts distribution.
|
||||||
type NodeDataPacket [][]byte
|
type ReceiptsResponse [][]*types.Receipt
|
||||||
|
|
||||||
// NodeDataPacket66 is the network packet for trie node data distribution over eth/66.
|
// ReceiptsPacket is the network packet for block receipts distribution with
|
||||||
type NodeDataPacket66 struct {
|
// request ID wrapping.
|
||||||
|
type ReceiptsPacket struct {
|
||||||
RequestId uint64
|
RequestId uint64
|
||||||
NodeDataPacket
|
ReceiptsResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetReceiptsPacket represents a block receipts query.
|
// ReceiptsRLPResponse is used for receipts, when we already have it encoded
|
||||||
type GetReceiptsPacket []common.Hash
|
type ReceiptsRLPResponse []rlp.RawValue
|
||||||
|
|
||||||
// GetReceiptsPacket66 represents a block receipts query over eth/66.
|
// ReceiptsRLPPacket is ReceiptsRLPResponse with request ID wrapping.
|
||||||
type GetReceiptsPacket66 struct {
|
type ReceiptsRLPPacket struct {
|
||||||
RequestId uint64
|
RequestId uint64
|
||||||
GetReceiptsPacket
|
ReceiptsRLPResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReceiptsPacket is the network packet for block receipts distribution.
|
// NewPooledTransactionHashesPacket67 represents a transaction announcement packet on eth/67.
|
||||||
type ReceiptsPacket [][]*types.Receipt
|
type NewPooledTransactionHashesPacket67 []common.Hash
|
||||||
|
|
||||||
// ReceiptsPacket66 is the network packet for block receipts distribution over eth/66.
|
|
||||||
type ReceiptsPacket66 struct {
|
|
||||||
RequestId uint64
|
|
||||||
ReceiptsPacket
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReceiptsRLPPacket is used for receipts, when we already have it encoded
|
|
||||||
type ReceiptsRLPPacket []rlp.RawValue
|
|
||||||
|
|
||||||
// ReceiptsRLPPacket66 is the eth-66 version of ReceiptsRLPPacket
|
|
||||||
type ReceiptsRLPPacket66 struct {
|
|
||||||
RequestId uint64
|
|
||||||
ReceiptsRLPPacket
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPooledTransactionHashesPacket66 represents a transaction announcement packet on eth/66 and eth/67.
|
|
||||||
type NewPooledTransactionHashesPacket66 []common.Hash
|
|
||||||
|
|
||||||
// NewPooledTransactionHashesPacket68 represents a transaction announcement packet on eth/68 and newer.
|
// NewPooledTransactionHashesPacket68 represents a transaction announcement packet on eth/68 and newer.
|
||||||
type NewPooledTransactionHashesPacket68 struct {
|
type NewPooledTransactionHashesPacket68 struct {
|
||||||
@ -312,31 +293,33 @@ type NewPooledTransactionHashesPacket68 struct {
|
|||||||
Hashes []common.Hash
|
Hashes []common.Hash
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPooledTransactionsPacket represents a transaction query.
|
// GetPooledTransactionsRequest represents a transaction query.
|
||||||
type GetPooledTransactionsPacket []common.Hash
|
type GetPooledTransactionsRequest []common.Hash
|
||||||
|
|
||||||
type GetPooledTransactionsPacket66 struct {
|
// GetPooledTransactionsPacket represents a transaction query with request ID wrapping.
|
||||||
|
type GetPooledTransactionsPacket struct {
|
||||||
RequestId uint64
|
RequestId uint64
|
||||||
GetPooledTransactionsPacket
|
GetPooledTransactionsRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
// PooledTransactionsPacket is the network packet for transaction distribution.
|
// PooledTransactionsResponse is the network packet for transaction distribution.
|
||||||
type PooledTransactionsPacket []*types.Transaction
|
type PooledTransactionsResponse []*types.Transaction
|
||||||
|
|
||||||
// PooledTransactionsPacket66 is the network packet for transaction distribution over eth/66.
|
// PooledTransactionsPacket is the network packet for transaction distribution
|
||||||
type PooledTransactionsPacket66 struct {
|
// with request ID wrapping.
|
||||||
|
type PooledTransactionsPacket struct {
|
||||||
RequestId uint64
|
RequestId uint64
|
||||||
PooledTransactionsPacket
|
PooledTransactionsResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
// PooledTransactionsRLPPacket is the network packet for transaction distribution, used
|
// PooledTransactionsRLPResponse is the network packet for transaction distribution, used
|
||||||
// in the cases we already have them in rlp-encoded form
|
// in the cases we already have them in rlp-encoded form
|
||||||
type PooledTransactionsRLPPacket []rlp.RawValue
|
type PooledTransactionsRLPResponse []rlp.RawValue
|
||||||
|
|
||||||
// PooledTransactionsRLPPacket66 is the eth/66 form of PooledTransactionsRLPPacket
|
// PooledTransactionsRLPPacket is PooledTransactionsRLPResponse with request ID wrapping.
|
||||||
type PooledTransactionsRLPPacket66 struct {
|
type PooledTransactionsRLPPacket struct {
|
||||||
RequestId uint64
|
RequestId uint64
|
||||||
PooledTransactionsRLPPacket
|
PooledTransactionsRLPResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*StatusPacket) Name() string { return "Status" }
|
func (*StatusPacket) Name() string { return "Status" }
|
||||||
@ -348,40 +331,34 @@ func (*NewBlockHashesPacket) Kind() byte { return NewBlockHashesMsg }
|
|||||||
func (*TransactionsPacket) Name() string { return "Transactions" }
|
func (*TransactionsPacket) Name() string { return "Transactions" }
|
||||||
func (*TransactionsPacket) Kind() byte { return TransactionsMsg }
|
func (*TransactionsPacket) Kind() byte { return TransactionsMsg }
|
||||||
|
|
||||||
func (*GetBlockHeadersPacket) Name() string { return "GetBlockHeaders" }
|
func (*GetBlockHeadersRequest) Name() string { return "GetBlockHeaders" }
|
||||||
func (*GetBlockHeadersPacket) Kind() byte { return GetBlockHeadersMsg }
|
func (*GetBlockHeadersRequest) Kind() byte { return GetBlockHeadersMsg }
|
||||||
|
|
||||||
func (*BlockHeadersPacket) Name() string { return "BlockHeaders" }
|
func (*BlockHeadersRequest) Name() string { return "BlockHeaders" }
|
||||||
func (*BlockHeadersPacket) Kind() byte { return BlockHeadersMsg }
|
func (*BlockHeadersRequest) Kind() byte { return BlockHeadersMsg }
|
||||||
|
|
||||||
func (*GetBlockBodiesPacket) Name() string { return "GetBlockBodies" }
|
func (*GetBlockBodiesRequest) Name() string { return "GetBlockBodies" }
|
||||||
func (*GetBlockBodiesPacket) Kind() byte { return GetBlockBodiesMsg }
|
func (*GetBlockBodiesRequest) Kind() byte { return GetBlockBodiesMsg }
|
||||||
|
|
||||||
func (*BlockBodiesPacket) Name() string { return "BlockBodies" }
|
func (*BlockBodiesResponse) Name() string { return "BlockBodies" }
|
||||||
func (*BlockBodiesPacket) Kind() byte { return BlockBodiesMsg }
|
func (*BlockBodiesResponse) Kind() byte { return BlockBodiesMsg }
|
||||||
|
|
||||||
func (*NewBlockPacket) Name() string { return "NewBlock" }
|
func (*NewBlockPacket) Name() string { return "NewBlock" }
|
||||||
func (*NewBlockPacket) Kind() byte { return NewBlockMsg }
|
func (*NewBlockPacket) Kind() byte { return NewBlockMsg }
|
||||||
|
|
||||||
func (*NewPooledTransactionHashesPacket66) Name() string { return "NewPooledTransactionHashes" }
|
func (*NewPooledTransactionHashesPacket67) Name() string { return "NewPooledTransactionHashes" }
|
||||||
func (*NewPooledTransactionHashesPacket66) Kind() byte { return NewPooledTransactionHashesMsg }
|
func (*NewPooledTransactionHashesPacket67) Kind() byte { return NewPooledTransactionHashesMsg }
|
||||||
func (*NewPooledTransactionHashesPacket68) Name() string { return "NewPooledTransactionHashes" }
|
func (*NewPooledTransactionHashesPacket68) Name() string { return "NewPooledTransactionHashes" }
|
||||||
func (*NewPooledTransactionHashesPacket68) Kind() byte { return NewPooledTransactionHashesMsg }
|
func (*NewPooledTransactionHashesPacket68) Kind() byte { return NewPooledTransactionHashesMsg }
|
||||||
|
|
||||||
func (*GetPooledTransactionsPacket) Name() string { return "GetPooledTransactions" }
|
func (*GetPooledTransactionsRequest) Name() string { return "GetPooledTransactions" }
|
||||||
func (*GetPooledTransactionsPacket) Kind() byte { return GetPooledTransactionsMsg }
|
func (*GetPooledTransactionsRequest) Kind() byte { return GetPooledTransactionsMsg }
|
||||||
|
|
||||||
func (*PooledTransactionsPacket) Name() string { return "PooledTransactions" }
|
func (*PooledTransactionsResponse) Name() string { return "PooledTransactions" }
|
||||||
func (*PooledTransactionsPacket) Kind() byte { return PooledTransactionsMsg }
|
func (*PooledTransactionsResponse) Kind() byte { return PooledTransactionsMsg }
|
||||||
|
|
||||||
func (*GetNodeDataPacket) Name() string { return "GetNodeData" }
|
func (*GetReceiptsRequest) Name() string { return "GetReceipts" }
|
||||||
func (*GetNodeDataPacket) Kind() byte { return GetNodeDataMsg }
|
func (*GetReceiptsRequest) Kind() byte { return GetReceiptsMsg }
|
||||||
|
|
||||||
func (*NodeDataPacket) Name() string { return "NodeData" }
|
func (*ReceiptsResponse) Name() string { return "Receipts" }
|
||||||
func (*NodeDataPacket) Kind() byte { return NodeDataMsg }
|
func (*ReceiptsResponse) Kind() byte { return ReceiptsMsg }
|
||||||
|
|
||||||
func (*GetReceiptsPacket) Name() string { return "GetReceipts" }
|
|
||||||
func (*GetReceiptsPacket) Kind() byte { return GetReceiptsMsg }
|
|
||||||
|
|
||||||
func (*ReceiptsPacket) Name() string { return "Receipts" }
|
|
||||||
func (*ReceiptsPacket) Kind() byte { return ReceiptsMsg }
|
|
||||||
|
@ -35,19 +35,19 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Assemble some table driven tests
|
// Assemble some table driven tests
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
packet *GetBlockHeadersPacket
|
packet *GetBlockHeadersRequest
|
||||||
fail bool
|
fail bool
|
||||||
}{
|
}{
|
||||||
// Providing the origin as either a hash or a number should both work
|
// Providing the origin as either a hash or a number should both work
|
||||||
{fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}}},
|
{fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}}},
|
||||||
{fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}}},
|
{fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}}},
|
||||||
|
|
||||||
// Providing arbitrary query field should also work
|
// Providing arbitrary query field should also work
|
||||||
{fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}},
|
{fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}},
|
||||||
{fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}},
|
{fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}},
|
||||||
|
|
||||||
// Providing both the origin hash and origin number must fail
|
// Providing both the origin hash and origin number must fail
|
||||||
{fail: true, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash, Number: 314}}},
|
{fail: true, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash, Number: 314}}},
|
||||||
}
|
}
|
||||||
// Iterate over each of the tests and try to encode and then decode
|
// Iterate over each of the tests and try to encode and then decode
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
@ -58,7 +58,7 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
|
|||||||
t.Fatalf("test %d: encode should have failed", i)
|
t.Fatalf("test %d: encode should have failed", i)
|
||||||
}
|
}
|
||||||
if !tt.fail {
|
if !tt.fail {
|
||||||
packet := new(GetBlockHeadersPacket)
|
packet := new(GetBlockHeadersRequest)
|
||||||
if err := rlp.DecodeBytes(bytes, packet); err != nil {
|
if err := rlp.DecodeBytes(bytes, packet); err != nil {
|
||||||
t.Fatalf("test %d: failed to decode packet: %v", i, err)
|
t.Fatalf("test %d: failed to decode packet: %v", i, err)
|
||||||
}
|
}
|
||||||
@ -70,46 +70,40 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestEth66EmptyMessages tests encoding of empty eth66 messages
|
// TestEmptyMessages tests encoding of empty messages.
|
||||||
func TestEth66EmptyMessages(t *testing.T) {
|
func TestEmptyMessages(t *testing.T) {
|
||||||
// All empty messages encodes to the same format
|
// All empty messages encodes to the same format
|
||||||
want := common.FromHex("c4820457c0")
|
want := common.FromHex("c4820457c0")
|
||||||
|
|
||||||
for i, msg := range []interface{}{
|
for i, msg := range []interface{}{
|
||||||
// Headers
|
// Headers
|
||||||
GetBlockHeadersPacket66{1111, nil},
|
GetBlockHeadersPacket{1111, nil},
|
||||||
BlockHeadersPacket66{1111, nil},
|
BlockHeadersPacket{1111, nil},
|
||||||
// Bodies
|
// Bodies
|
||||||
GetBlockBodiesPacket66{1111, nil},
|
GetBlockBodiesPacket{1111, nil},
|
||||||
BlockBodiesPacket66{1111, nil},
|
BlockBodiesPacket{1111, nil},
|
||||||
BlockBodiesRLPPacket66{1111, nil},
|
BlockBodiesRLPPacket{1111, nil},
|
||||||
// Node data
|
|
||||||
GetNodeDataPacket66{1111, nil},
|
|
||||||
NodeDataPacket66{1111, nil},
|
|
||||||
// Receipts
|
// Receipts
|
||||||
GetReceiptsPacket66{1111, nil},
|
GetReceiptsPacket{1111, nil},
|
||||||
ReceiptsPacket66{1111, nil},
|
ReceiptsPacket{1111, nil},
|
||||||
// Transactions
|
// Transactions
|
||||||
GetPooledTransactionsPacket66{1111, nil},
|
GetPooledTransactionsPacket{1111, nil},
|
||||||
PooledTransactionsPacket66{1111, nil},
|
PooledTransactionsPacket{1111, nil},
|
||||||
PooledTransactionsRLPPacket66{1111, nil},
|
PooledTransactionsRLPPacket{1111, nil},
|
||||||
|
|
||||||
// Headers
|
// Headers
|
||||||
BlockHeadersPacket66{1111, BlockHeadersPacket([]*types.Header{})},
|
BlockHeadersPacket{1111, BlockHeadersRequest([]*types.Header{})},
|
||||||
// Bodies
|
// Bodies
|
||||||
GetBlockBodiesPacket66{1111, GetBlockBodiesPacket([]common.Hash{})},
|
GetBlockBodiesPacket{1111, GetBlockBodiesRequest([]common.Hash{})},
|
||||||
BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{})},
|
BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{})},
|
||||||
BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{})},
|
BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{})},
|
||||||
// Node data
|
|
||||||
GetNodeDataPacket66{1111, GetNodeDataPacket([]common.Hash{})},
|
|
||||||
NodeDataPacket66{1111, NodeDataPacket([][]byte{})},
|
|
||||||
// Receipts
|
// Receipts
|
||||||
GetReceiptsPacket66{1111, GetReceiptsPacket([]common.Hash{})},
|
GetReceiptsPacket{1111, GetReceiptsRequest([]common.Hash{})},
|
||||||
ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{})},
|
ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{})},
|
||||||
// Transactions
|
// Transactions
|
||||||
GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket([]common.Hash{})},
|
GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest([]common.Hash{})},
|
||||||
PooledTransactionsPacket66{1111, PooledTransactionsPacket([]*types.Transaction{})},
|
PooledTransactionsPacket{1111, PooledTransactionsResponse([]*types.Transaction{})},
|
||||||
PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket([]rlp.RawValue{})},
|
PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse([]rlp.RawValue{})},
|
||||||
} {
|
} {
|
||||||
if have, _ := rlp.EncodeToBytes(msg); !bytes.Equal(have, want) {
|
if have, _ := rlp.EncodeToBytes(msg); !bytes.Equal(have, want) {
|
||||||
t.Errorf("test %d, type %T, have\n\t%x\nwant\n\t%x", i, msg, have, want)
|
t.Errorf("test %d, type %T, have\n\t%x\nwant\n\t%x", i, msg, have, want)
|
||||||
@ -117,8 +111,8 @@ func TestEth66EmptyMessages(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestEth66Messages tests the encoding of all redefined eth66 messages
|
// TestMessages tests the encoding of all messages.
|
||||||
func TestEth66Messages(t *testing.T) {
|
func TestMessages(t *testing.T) {
|
||||||
// Some basic structs used during testing
|
// Some basic structs used during testing
|
||||||
var (
|
var (
|
||||||
header *types.Header
|
header *types.Header
|
||||||
@ -169,10 +163,6 @@ func TestEth66Messages(t *testing.T) {
|
|||||||
common.HexToHash("deadc0de"),
|
common.HexToHash("deadc0de"),
|
||||||
common.HexToHash("feedbeef"),
|
common.HexToHash("feedbeef"),
|
||||||
}
|
}
|
||||||
byteSlices := [][]byte{
|
|
||||||
common.FromHex("deadc0de"),
|
|
||||||
common.FromHex("feedbeef"),
|
|
||||||
}
|
|
||||||
// init the receipts
|
// init the receipts
|
||||||
{
|
{
|
||||||
receipts = []*types.Receipt{
|
receipts = []*types.Receipt{
|
||||||
@ -203,59 +193,51 @@ func TestEth66Messages(t *testing.T) {
|
|||||||
want []byte
|
want []byte
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{hashes[0], 0}, 5, 5, false}},
|
GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{hashes[0], 0}, 5, 5, false}},
|
||||||
common.FromHex("e8820457e4a000000000000000000000000000000000000000000000000000000000deadc0de050580"),
|
common.FromHex("e8820457e4a000000000000000000000000000000000000000000000000000000000deadc0de050580"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}},
|
GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}},
|
||||||
common.FromHex("ca820457c682270f050580"),
|
common.FromHex("ca820457c682270f050580"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
BlockHeadersPacket66{1111, BlockHeadersPacket{header}},
|
BlockHeadersPacket{1111, BlockHeadersRequest{header}},
|
||||||
common.FromHex("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"),
|
common.FromHex("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
GetBlockBodiesPacket66{1111, GetBlockBodiesPacket(hashes)},
|
GetBlockBodiesPacket{1111, GetBlockBodiesRequest(hashes)},
|
||||||
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
|
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{blockBody})},
|
BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{blockBody})},
|
||||||
common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"),
|
common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"),
|
||||||
},
|
},
|
||||||
{ // Identical to non-rlp-shortcut version
|
{ // Identical to non-rlp-shortcut version
|
||||||
BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{blockBodyRlp})},
|
BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{blockBodyRlp})},
|
||||||
common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"),
|
common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
GetNodeDataPacket66{1111, GetNodeDataPacket(hashes)},
|
GetReceiptsPacket{1111, GetReceiptsRequest(hashes)},
|
||||||
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
|
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
NodeDataPacket66{1111, NodeDataPacket(byteSlices)},
|
ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{receipts})},
|
||||||
common.FromHex("ce820457ca84deadc0de84feedbeef"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
GetReceiptsPacket66{1111, GetReceiptsPacket(hashes)},
|
|
||||||
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{receipts})},
|
|
||||||
common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"),
|
common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ReceiptsRLPPacket66{1111, ReceiptsRLPPacket([]rlp.RawValue{receiptsRlp})},
|
ReceiptsRLPPacket{1111, ReceiptsRLPResponse([]rlp.RawValue{receiptsRlp})},
|
||||||
common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"),
|
common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket(hashes)},
|
GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest(hashes)},
|
||||||
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
|
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
PooledTransactionsPacket66{1111, PooledTransactionsPacket(txs)},
|
PooledTransactionsPacket{1111, PooledTransactionsResponse(txs)},
|
||||||
common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"),
|
common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket(txRlps)},
|
PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse(txRlps)},
|
||||||
common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"),
|
common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
|
@ -28,8 +28,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Tests that snap sync is disabled after a successful sync cycle.
|
// Tests that snap sync is disabled after a successful sync cycle.
|
||||||
func TestSnapSyncDisabling66(t *testing.T) { testSnapSyncDisabling(t, eth.ETH66, snap.SNAP1) }
|
|
||||||
func TestSnapSyncDisabling67(t *testing.T) { testSnapSyncDisabling(t, eth.ETH67, snap.SNAP1) }
|
func TestSnapSyncDisabling67(t *testing.T) { testSnapSyncDisabling(t, eth.ETH67, snap.SNAP1) }
|
||||||
|
func TestSnapSyncDisabling68(t *testing.T) { testSnapSyncDisabling(t, eth.ETH68, snap.SNAP1) }
|
||||||
|
|
||||||
// Tests that snap sync gets disabled as soon as a real block is successfully
|
// Tests that snap sync gets disabled as soon as a real block is successfully
|
||||||
// imported into the blockchain.
|
// imported into the blockchain.
|
||||||
|
Loading…
Reference in New Issue
Block a user