les: removed les/1 protocol messages

This commit is contained in:
Zsolt Felfoldi 2019-04-05 17:01:51 +02:00
parent 3996bc1ad9
commit 5515f364ae
11 changed files with 92 additions and 506 deletions

View File

@ -179,8 +179,6 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
func lesTopic(genesisHash common.Hash, protocolVersion uint) discv5.Topic {
var name string
switch protocolVersion {
case lpv1:
name = "LES"
case lpv2:
name = "LES2"
default:

View File

@ -39,11 +39,8 @@ var (
GetBlockBodiesMsg: {0, 700000},
GetReceiptsMsg: {0, 1000000},
GetCodeMsg: {0, 450000},
GetProofsV1Msg: {0, 600000},
GetProofsV2Msg: {0, 600000},
GetHeaderProofsMsg: {0, 1000000},
GetHelperTrieProofsMsg: {0, 1000000},
SendTxMsg: {0, 450000},
SendTxV2Msg: {0, 450000},
GetTxStatusMsg: {0, 250000},
}
@ -53,11 +50,8 @@ var (
GetBlockBodiesMsg: {0, 40},
GetReceiptsMsg: {0, 40},
GetCodeMsg: {0, 80},
GetProofsV1Msg: {0, 80},
GetProofsV2Msg: {0, 80},
GetHeaderProofsMsg: {0, 20},
GetHelperTrieProofsMsg: {0, 20},
SendTxMsg: {0, 66000},
SendTxV2Msg: {0, 66000},
GetTxStatusMsg: {0, 50},
}
@ -67,11 +61,8 @@ var (
GetBlockBodiesMsg: {0, 100000},
GetReceiptsMsg: {0, 200000},
GetCodeMsg: {0, 50000},
GetProofsV1Msg: {0, 4000},
GetProofsV2Msg: {0, 4000},
GetHeaderProofsMsg: {0, 4000},
GetHelperTrieProofsMsg: {0, 4000},
SendTxMsg: {0, 0},
SendTxV2Msg: {0, 100},
GetTxStatusMsg: {0, 100},
}

View File

@ -772,80 +772,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
Obj: resp.Receipts,
}
case GetProofsV1Msg:
p.Log().Trace("Received proofs request")
// Decode the retrieval message
var req struct {
ReqID uint64
Reqs []ProofReq
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Gather state data until the fetch or network limits is reached
var (
bytes int
proofs proofsData
)
reqCnt := len(req.Reqs)
if !accept(req.ReqID, uint64(reqCnt), MaxProofsFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
for i, req := range req.Reqs {
if i != 0 && !task.waitOrStop() {
return
}
// Look up the root hash belonging to the request
number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash)
if number == nil {
p.Log().Warn("Failed to retrieve block num for proof", "hash", req.BHash)
continue
}
header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number)
if header == nil {
p.Log().Warn("Failed to retrieve header for proof", "block", *number, "hash", req.BHash)
continue
}
// Open the account or storage trie for the request
statedb := pm.blockchain.StateCache()
var trie state.Trie
switch len(req.AccKey) {
case 0:
// No account key specified, open an account trie
trie, err = statedb.OpenTrie(header.Root)
if trie == nil || err != nil {
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "root", header.Root, "err", err)
continue
}
default:
// Account key specified, open a storage trie
account, err := pm.getAccount(statedb.TrieDB(), header.Root, common.BytesToHash(req.AccKey))
if err != nil {
p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "err", err)
continue
}
trie, err = statedb.OpenStorageTrie(common.BytesToHash(req.AccKey), account.Root)
if trie == nil || err != nil {
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "root", account.Root, "err", err)
continue
}
}
// Prove the user's request from the account or stroage trie
var proof light.NodeList
if err := trie.Prove(req.Key, 0, &proof); err != nil {
p.Log().Warn("Failed to prove state request", "block", header.Number, "hash", header.Hash(), "err", err)
continue
}
proofs = append(proofs, proof)
if bytes += proof.DataSize(); bytes >= softResponseLimit {
break
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyProofs(req.ReqID, proofs), task.done())
}()
case GetProofsV2Msg:
p.Log().Trace("Received les/2 proofs request")
// Decode the retrieval message
@ -927,27 +853,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyProofsV2(req.ReqID, nodes.NodeList()), task.done())
}()
case ProofsV1Msg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received proofs response")
// A batch of merkle proofs arrived to one of our previous requests
var resp struct {
ReqID, BV uint64
Data []light.NodeList
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
deliverMsg = &Msg{
MsgType: MsgProofsV1,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case ProofsV2Msg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
@ -969,54 +874,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
Obj: resp.Data,
}
case GetHeaderProofsMsg:
p.Log().Trace("Received headers proof request")
// Decode the retrieval message
var req struct {
ReqID uint64
Reqs []ChtReq
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Gather state data until the fetch or network limits is reached
var (
bytes int
proofs []ChtResp
)
reqCnt := len(req.Reqs)
if !accept(req.ReqID, uint64(reqCnt), MaxHelperTrieProofsFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
trieDb := trie.NewDatabase(rawdb.NewTable(pm.chainDb, light.ChtTablePrefix))
for i, req := range req.Reqs {
if i != 0 && !task.waitOrStop() {
return
}
if header := pm.blockchain.GetHeaderByNumber(req.BlockNum); header != nil {
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, req.ChtNum*pm.iConfig.ChtSize-1)
if root := light.GetChtRoot(pm.chainDb, req.ChtNum-1, sectionHead); root != (common.Hash{}) {
trie, err := trie.New(root, trieDb)
if err != nil {
continue
}
var encNumber [8]byte
binary.BigEndian.PutUint64(encNumber[:], req.BlockNum)
var proof light.NodeList
trie.Prove(encNumber[:], 0, &proof)
proofs = append(proofs, ChtResp{Header: header, Proof: proof})
if bytes += proof.DataSize() + estHeaderRlpSize; bytes >= softResponseLimit {
break
}
}
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyHeaderProofs(req.ReqID, proofs), task.done())
}()
case GetHelperTrieProofsMsg:
p.Log().Trace("Received helper trie proof request")
// Decode the retrieval message
@ -1081,26 +938,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyHelperTrieProofs(req.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData}), task.done())
}()
case HeaderProofsMsg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received headers proof response")
var resp struct {
ReqID, BV uint64
Data []ChtResp
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
deliverMsg = &Msg{
MsgType: MsgHeaderProofs,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case HelperTrieProofsMsg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
@ -1122,29 +959,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
Obj: resp.Data,
}
case SendTxMsg:
if pm.txpool == nil {
return errResp(ErrRequestRejected, "")
}
// Transactions arrived, parse all of them and deliver to the pool
var txs []*types.Transaction
if err := msg.Decode(&txs); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
reqCnt := len(txs)
if !accept(0, uint64(reqCnt), MaxTxSend) {
return errResp(ErrRequestRejected, "")
}
go func() {
for i, tx := range txs {
if i != 0 && !task.waitOrStop() {
return
}
pm.txpool.AddRemotes([]*types.Transaction{tx})
}
sendResponse(0, uint64(reqCnt), nil, task.done())
}()
case SendTxV2Msg:
if pm.txpool == nil {
return errResp(ErrRequestRejected, "")

View File

@ -46,7 +46,6 @@ func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}
}
// Tests that block headers can be retrieved from a remote chain based on user queries.
func TestGetBlockHeadersLes1(t *testing.T) { testGetBlockHeaders(t, 1) }
func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) }
func testGetBlockHeaders(t *testing.T, protocol int) {
@ -174,7 +173,6 @@ func testGetBlockHeaders(t *testing.T, protocol int) {
}
// Tests that block contents can be retrieved from a remote chain based on their hashes.
func TestGetBlockBodiesLes1(t *testing.T) { testGetBlockBodies(t, 1) }
func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) }
func testGetBlockBodies(t *testing.T, protocol int) {
@ -249,7 +247,6 @@ func testGetBlockBodies(t *testing.T, protocol int) {
}
// Tests that the contract codes can be retrieved based on account addresses.
func TestGetCodeLes1(t *testing.T) { testGetCode(t, 1) }
func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) }
func testGetCode(t *testing.T, protocol int) {
@ -281,7 +278,6 @@ func testGetCode(t *testing.T, protocol int) {
}
// Tests that the transaction receipts can be retrieved based on hashes.
func TestGetReceiptLes1(t *testing.T) { testGetReceipt(t, 1) }
func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) }
func testGetReceipt(t *testing.T, protocol int) {
@ -307,7 +303,6 @@ func testGetReceipt(t *testing.T, protocol int) {
}
// Tests that trie merkle proofs can be retrieved
func TestGetProofsLes1(t *testing.T) { testGetProofs(t, 1) }
func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) }
func testGetProofs(t *testing.T, protocol int) {
@ -316,10 +311,7 @@ func testGetProofs(t *testing.T, protocol int) {
defer tearDown()
bc := server.pm.blockchain.(*core.BlockChain)
var (
proofreqs []ProofReq
proofsV1 [][]rlp.RawValue
)
var proofreqs []ProofReq
proofsV2 := light.NewNodeSet()
accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr, {}}
@ -334,42 +326,24 @@ func testGetProofs(t *testing.T, protocol int) {
Key: crypto.Keccak256(acc[:]),
}
proofreqs = append(proofreqs, req)
switch protocol {
case 1:
var proof light.NodeList
trie.Prove(crypto.Keccak256(acc[:]), 0, &proof)
proofsV1 = append(proofsV1, proof)
case 2:
trie.Prove(crypto.Keccak256(acc[:]), 0, proofsV2)
}
trie.Prove(crypto.Keccak256(acc[:]), 0, proofsV2)
}
}
// Send the proof request and verify the response
switch protocol {
case 1:
cost := server.tPeer.GetRequestCost(GetProofsV1Msg, len(proofreqs))
sendRequest(server.tPeer.app, GetProofsV1Msg, 42, cost, proofreqs)
if err := expectResponse(server.tPeer.app, ProofsV1Msg, 42, testBufLimit, proofsV1); err != nil {
t.Errorf("proofs mismatch: %v", err)
}
case 2:
cost := server.tPeer.GetRequestCost(GetProofsV2Msg, len(proofreqs))
sendRequest(server.tPeer.app, GetProofsV2Msg, 42, cost, proofreqs)
if err := expectResponse(server.tPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
t.Errorf("proofs mismatch: %v", err)
}
cost := server.tPeer.GetRequestCost(GetProofsV2Msg, len(proofreqs))
sendRequest(server.tPeer.app, GetProofsV2Msg, 42, cost, proofreqs)
if err := expectResponse(server.tPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
t.Errorf("proofs mismatch: %v", err)
}
}
// Tests that CHT proofs can be correctly retrieved.
func TestGetCHTProofsLes1(t *testing.T) { testGetCHTProofs(t, 1) }
func TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) }
func testGetCHTProofs(t *testing.T, protocol int) {
config := light.TestServerIndexerConfig
frequency := config.ChtSize
if protocol == 2 {
if protocol == 2 { //qqq
frequency = config.PairChtSize
}
@ -395,31 +369,13 @@ func testGetCHTProofs(t *testing.T, protocol int) {
key := make([]byte, 8)
binary.BigEndian.PutUint64(key, frequency-1)
proofsV1 := []ChtResp{{
Header: header,
}}
proofsV2 := HelperTrieResps{
AuxData: [][]byte{rlp},
}
switch protocol {
case 1:
root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(frequency-1).Hash())
trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
var proof light.NodeList
trie.Prove(key, 0, &proof)
proofsV1[0].Proof = proof
case 2:
root := light.GetChtRoot(server.db, (frequency/config.ChtSize)-1, bc.GetHeaderByNumber(frequency-1).Hash())
trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
trie.Prove(key, 0, &proofsV2.Proofs)
}
root := light.GetChtRoot(server.db, (frequency/config.ChtSize)-1, bc.GetHeaderByNumber(frequency-1).Hash())
trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
trie.Prove(key, 0, &proofsV2.Proofs)
// Assemble the requests for the different protocols
requestsV1 := []ChtReq{{
ChtNum: frequency / config.ChtSize,
BlockNum: frequency - 1,
}}
requestsV2 := []HelperTrieReq{{
Type: htCanonical,
TrieIdx: frequency/config.PairChtSize - 1,
@ -427,19 +383,10 @@ func testGetCHTProofs(t *testing.T, protocol int) {
AuxReq: auxHeader,
}}
// Send the proof request and verify the response
switch protocol {
case 1:
cost := server.tPeer.GetRequestCost(GetHeaderProofsMsg, len(requestsV1))
sendRequest(server.tPeer.app, GetHeaderProofsMsg, 42, cost, requestsV1)
if err := expectResponse(server.tPeer.app, HeaderProofsMsg, 42, testBufLimit, proofsV1); err != nil {
t.Errorf("proofs mismatch: %v", err)
}
case 2:
cost := server.tPeer.GetRequestCost(GetHelperTrieProofsMsg, len(requestsV2))
sendRequest(server.tPeer.app, GetHelperTrieProofsMsg, 42, cost, requestsV2)
if err := expectResponse(server.tPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
t.Errorf("proofs mismatch: %v", err)
}
cost := server.tPeer.GetRequestCost(GetHelperTrieProofsMsg, len(requestsV2))
sendRequest(server.tPeer.app, GetHelperTrieProofsMsg, 42, cost, requestsV2)
if err := expectResponse(server.tPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
t.Errorf("proofs mismatch: %v", err)
}
}

View File

@ -84,9 +84,7 @@ const (
MsgBlockBodies = iota
MsgCode
MsgReceipts
MsgProofsV1
MsgProofsV2
MsgHeaderProofs
MsgHelperTrieProofs
)

View File

@ -188,14 +188,7 @@ type TrieRequest light.TrieRequest
// GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest)
func (r *TrieRequest) GetCost(peer *peer) uint64 {
switch peer.version {
case lpv1:
return peer.GetRequestCost(GetProofsV1Msg, 1)
case lpv2:
return peer.GetRequestCost(GetProofsV2Msg, 1)
default:
panic(nil)
}
return peer.GetRequestCost(GetProofsV2Msg, 1)
}
// CanSend tells if a certain peer is suitable for serving the given request
@ -220,38 +213,22 @@ func (r *TrieRequest) Request(reqID uint64, peer *peer) error {
func (r *TrieRequest) Validate(db ethdb.Database, msg *Msg) error {
log.Debug("Validating trie proof", "root", r.Id.Root, "key", r.Key)
switch msg.MsgType {
case MsgProofsV1:
proofs := msg.Obj.([]light.NodeList)
if len(proofs) != 1 {
return errInvalidEntryCount
}
nodeSet := proofs[0].NodeSet()
// Verify the proof and store if checks out
if _, _, err := trie.VerifyProof(r.Id.Root, r.Key, nodeSet); err != nil {
return fmt.Errorf("merkle proof verification failed: %v", err)
}
r.Proof = nodeSet
return nil
case MsgProofsV2:
proofs := msg.Obj.(light.NodeList)
// Verify the proof and store if checks out
nodeSet := proofs.NodeSet()
reads := &readTraceDB{db: nodeSet}
if _, _, err := trie.VerifyProof(r.Id.Root, r.Key, reads); err != nil {
return fmt.Errorf("merkle proof verification failed: %v", err)
}
// check if all nodes have been read by VerifyProof
if len(reads.reads) != nodeSet.KeyCount() {
return errUselessNodes
}
r.Proof = nodeSet
return nil
default:
if msg.MsgType != MsgProofsV2 {
return errInvalidMessageType
}
proofs := msg.Obj.(light.NodeList)
// Verify the proof and store if checks out
nodeSet := proofs.NodeSet()
reads := &readTraceDB{db: nodeSet}
if _, _, err := trie.VerifyProof(r.Id.Root, r.Key, reads); err != nil {
return fmt.Errorf("merkle proof verification failed: %v", err)
}
// check if all nodes have been read by VerifyProof
if len(reads.reads) != nodeSet.KeyCount() {
return errUselessNodes
}
r.Proof = nodeSet
return nil
}
type CodeReq struct {
@ -330,32 +307,13 @@ type HelperTrieResps struct { // describes all responses, not just a single one
AuxData [][]byte
}
// legacy LES/1
type ChtReq struct {
ChtNum, BlockNum uint64
FromLevel uint
}
// legacy LES/1
type ChtResp struct {
Header *types.Header
Proof []rlp.RawValue
}
// ODR request type for requesting headers by Canonical Hash Trie, see LesOdrRequest interface
type ChtRequest light.ChtRequest
// GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest)
func (r *ChtRequest) GetCost(peer *peer) uint64 {
switch peer.version {
case lpv1:
return peer.GetRequestCost(GetHeaderProofsMsg, 1)
case lpv2:
return peer.GetRequestCost(GetHelperTrieProofsMsg, 1)
default:
panic(nil)
}
return peer.GetRequestCost(GetHelperTrieProofsMsg, 1)
}
// CanSend tells if a certain peer is suitable for serving the given request
@ -377,21 +335,7 @@ func (r *ChtRequest) Request(reqID uint64, peer *peer) error {
Key: encNum[:],
AuxReq: auxHeader,
}
switch peer.version {
case lpv1:
var reqsV1 ChtReq
if req.Type != htCanonical || req.AuxReq != auxHeader || len(req.Key) != 8 {
return fmt.Errorf("Request invalid in LES/1 mode")
}
blockNum := binary.BigEndian.Uint64(req.Key)
// convert HelperTrie request to old CHT request
reqsV1 = ChtReq{ChtNum: (req.TrieIdx + 1) * (r.Config.ChtSize / r.Config.PairChtSize), BlockNum: blockNum, FromLevel: req.FromLevel}
return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []ChtReq{reqsV1})
case lpv2:
return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []HelperTrieReq{req})
default:
panic(nil)
}
return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []HelperTrieReq{req})
}
// Valid processes an ODR request reply message from the LES network
@ -400,78 +344,50 @@ func (r *ChtRequest) Request(reqID uint64, peer *peer) error {
func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error {
log.Debug("Validating CHT", "cht", r.ChtNum, "block", r.BlockNum)
switch msg.MsgType {
case MsgHeaderProofs: // LES/1 backwards compatibility
proofs := msg.Obj.([]ChtResp)
if len(proofs) != 1 {
return errInvalidEntryCount
}
proof := proofs[0]
// Verify the CHT
var encNumber [8]byte
binary.BigEndian.PutUint64(encNumber[:], r.BlockNum)
value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], light.NodeList(proof.Proof).NodeSet())
if err != nil {
return err
}
var node light.ChtNode
if err := rlp.DecodeBytes(value, &node); err != nil {
return err
}
if node.Hash != proof.Header.Hash() {
return errCHTHashMismatch
}
// Verifications passed, store and return
r.Header = proof.Header
r.Proof = light.NodeList(proof.Proof).NodeSet()
r.Td = node.Td
case MsgHelperTrieProofs:
resp := msg.Obj.(HelperTrieResps)
if len(resp.AuxData) != 1 {
return errInvalidEntryCount
}
nodeSet := resp.Proofs.NodeSet()
headerEnc := resp.AuxData[0]
if len(headerEnc) == 0 {
return errHeaderUnavailable
}
header := new(types.Header)
if err := rlp.DecodeBytes(headerEnc, header); err != nil {
return errHeaderUnavailable
}
// Verify the CHT
var encNumber [8]byte
binary.BigEndian.PutUint64(encNumber[:], r.BlockNum)
reads := &readTraceDB{db: nodeSet}
value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads)
if err != nil {
return fmt.Errorf("merkle proof verification failed: %v", err)
}
if len(reads.reads) != nodeSet.KeyCount() {
return errUselessNodes
}
var node light.ChtNode
if err := rlp.DecodeBytes(value, &node); err != nil {
return err
}
if node.Hash != header.Hash() {
return errCHTHashMismatch
}
if r.BlockNum != header.Number.Uint64() {
return errCHTNumberMismatch
}
// Verifications passed, store and return
r.Header = header
r.Proof = nodeSet
r.Td = node.Td
default:
if msg.MsgType != MsgHelperTrieProofs {
return errInvalidMessageType
}
resp := msg.Obj.(HelperTrieResps)
if len(resp.AuxData) != 1 {
return errInvalidEntryCount
}
nodeSet := resp.Proofs.NodeSet()
headerEnc := resp.AuxData[0]
if len(headerEnc) == 0 {
return errHeaderUnavailable
}
header := new(types.Header)
if err := rlp.DecodeBytes(headerEnc, header); err != nil {
return errHeaderUnavailable
}
// Verify the CHT
var encNumber [8]byte
binary.BigEndian.PutUint64(encNumber[:], r.BlockNum)
reads := &readTraceDB{db: nodeSet}
value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads)
if err != nil {
return fmt.Errorf("merkle proof verification failed: %v", err)
}
if len(reads.reads) != nodeSet.KeyCount() {
return errUselessNodes
}
var node light.ChtNode
if err := rlp.DecodeBytes(value, &node); err != nil {
return err
}
if node.Hash != header.Hash() {
return errCHTHashMismatch
}
if r.BlockNum != header.Number.Uint64() {
return errCHTNumberMismatch
}
// Verifications passed, store and return
r.Header = header
r.Proof = nodeSet
r.Td = node.Td
return nil
}

View File

@ -38,8 +38,6 @@ import (
type odrTestFn func(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte
func TestOdrGetBlockLes1(t *testing.T) { testOdr(t, 1, 1, odrGetBlock) }
func TestOdrGetBlockLes2(t *testing.T) { testOdr(t, 2, 1, odrGetBlock) }
func odrGetBlock(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
@ -56,8 +54,6 @@ func odrGetBlock(ctx context.Context, db ethdb.Database, config *params.ChainCon
return rlp
}
func TestOdrGetReceiptsLes1(t *testing.T) { testOdr(t, 1, 1, odrGetReceipts) }
func TestOdrGetReceiptsLes2(t *testing.T) { testOdr(t, 2, 1, odrGetReceipts) }
func odrGetReceipts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
@ -78,8 +74,6 @@ func odrGetReceipts(ctx context.Context, db ethdb.Database, config *params.Chain
return rlp
}
func TestOdrAccountsLes1(t *testing.T) { testOdr(t, 1, 1, odrAccounts) }
func TestOdrAccountsLes2(t *testing.T) { testOdr(t, 2, 1, odrAccounts) }
func odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
@ -108,8 +102,6 @@ func odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainCon
return res
}
func TestOdrContractCallLes1(t *testing.T) { testOdr(t, 1, 2, odrContractCall) }
func TestOdrContractCallLes2(t *testing.T) { testOdr(t, 2, 2, odrContractCall) }
type callmsg struct {

View File

@ -34,10 +34,9 @@ import (
)
var (
errClosed = errors.New("peer set is closed")
errAlreadyRegistered = errors.New("peer is already registered")
errNotRegistered = errors.New("peer is not registered")
errInvalidHelpTrieReq = errors.New("invalid help trie request")
errClosed = errors.New("peer set is closed")
errAlreadyRegistered = errors.New("peer is already registered")
errNotRegistered = errors.New("peer is not registered")
)
const maxResponseErrors = 50 // number of invalid responses tolerated (makes the protocol less brittle but still avoids spam)
@ -244,18 +243,8 @@ func (p *peer) GetTxRelayCost(amount, size int) uint64 {
p.lock.RLock()
defer p.lock.RUnlock()
var msgcode uint64
switch p.version {
case lpv1:
msgcode = SendTxMsg
case lpv2:
msgcode = SendTxV2Msg
default:
panic(nil)
}
cost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(amount)
sizeCost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(size)/txSizeCostLimit
cost := p.fcCosts[SendTxV2Msg].baseCost + p.fcCosts[SendTxV2Msg].reqCost*uint64(amount)
sizeCost := p.fcCosts[SendTxV2Msg].baseCost + p.fcCosts[SendTxV2Msg].reqCost*uint64(size)/txSizeCostLimit
if sizeCost > cost {
cost = sizeCost
}
@ -307,24 +296,12 @@ func (p *peer) ReplyReceiptsRLP(reqID uint64, receipts []rlp.RawValue) *reply {
return &reply{p.rw, ReceiptsMsg, reqID, data}
}
// ReplyProofs creates a reply with a batch of legacy LES/1 merkle proofs, corresponding to the ones requested.
func (p *peer) ReplyProofs(reqID uint64, proofs proofsData) *reply {
data, _ := rlp.EncodeToBytes(proofs)
return &reply{p.rw, ProofsV1Msg, reqID, data}
}
// ReplyProofsV2 creates a reply with a batch of merkle proofs, corresponding to the ones requested.
func (p *peer) ReplyProofsV2(reqID uint64, proofs light.NodeList) *reply {
data, _ := rlp.EncodeToBytes(proofs)
return &reply{p.rw, ProofsV2Msg, reqID, data}
}
// ReplyHeaderProofs creates a reply with a batch of legacy LES/1 header proofs, corresponding to the ones requested.
func (p *peer) ReplyHeaderProofs(reqID uint64, proofs []ChtResp) *reply {
data, _ := rlp.EncodeToBytes(proofs)
return &reply{p.rw, HeaderProofsMsg, reqID, data}
}
// ReplyHelperTrieProofs creates a reply with a batch of HelperTrie proofs, corresponding to the ones requested.
func (p *peer) ReplyHelperTrieProofs(reqID uint64, resp HelperTrieResps) *reply {
data, _ := rlp.EncodeToBytes(resp)
@ -374,36 +351,13 @@ func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error {
// RequestProofs fetches a batch of merkle proofs from a remote node.
func (p *peer) RequestProofs(reqID, cost uint64, reqs []ProofReq) error {
p.Log().Debug("Fetching batch of proofs", "count", len(reqs))
switch p.version {
case lpv1:
return sendRequest(p.rw, GetProofsV1Msg, reqID, cost, reqs)
case lpv2:
return sendRequest(p.rw, GetProofsV2Msg, reqID, cost, reqs)
default:
panic(nil)
}
return sendRequest(p.rw, GetProofsV2Msg, reqID, cost, reqs)
}
// RequestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.
func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, data interface{}) error {
switch p.version {
case lpv1:
reqs, ok := data.([]ChtReq)
if !ok {
return errInvalidHelpTrieReq
}
p.Log().Debug("Fetching batch of header proofs", "count", len(reqs))
return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqs)
case lpv2:
reqs, ok := data.([]HelperTrieReq)
if !ok {
return errInvalidHelpTrieReq
}
p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs))
return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, cost, reqs)
default:
panic(nil)
}
func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, reqs []HelperTrieReq) error {
p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs))
return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, cost, reqs)
}
// RequestTxStatus fetches a batch of transaction status records from a remote node.
@ -415,14 +369,7 @@ func (p *peer) RequestTxStatus(reqID, cost uint64, txHashes []common.Hash) error
// SendTxStatus creates a reply with a batch of transactions to be added to the remote transaction pool.
func (p *peer) SendTxs(reqID, cost uint64, txs rlp.RawValue) error {
p.Log().Debug("Sending batch of transactions", "size", len(txs))
switch p.version {
case lpv1:
return p2p.Send(p.rw, SendTxMsg, txs) // old message format does not include reqID
case lpv2:
return sendRequest(p.rw, SendTxV2Msg, reqID, cost, txs)
default:
panic(nil)
}
return sendRequest(p.rw, SendTxV2Msg, reqID, cost, txs)
}
type keyValueEntry struct {

View File

@ -33,19 +33,18 @@ import (
// Constants to match up protocol versions and messages
const (
lpv1 = 1
lpv2 = 2
)
// Supported versions of the les protocol (first is primary)
var (
ClientProtocolVersions = []uint{lpv2, lpv1}
ServerProtocolVersions = []uint{lpv2, lpv1}
ClientProtocolVersions = []uint{lpv2}
ServerProtocolVersions = []uint{lpv2}
AdvertiseProtocolVersions = []uint{lpv2} // clients are searching for the first advertised protocol in the list
)
// Number of implemented message corresponding to different protocol versions.
var ProtocolLengths = map[uint]uint64{lpv1: 15, lpv2: 22}
var ProtocolLengths = map[uint]uint64{lpv2: 22}
const (
NetworkId = 1
@ -54,7 +53,7 @@ const (
// les protocol message codes
const (
// Protocol messages belonging to LPV1
// Protocol messages inherited from LPV1
StatusMsg = 0x00
AnnounceMsg = 0x01
GetBlockHeadersMsg = 0x02
@ -63,14 +62,9 @@ const (
BlockBodiesMsg = 0x05
GetReceiptsMsg = 0x06
ReceiptsMsg = 0x07
GetProofsV1Msg = 0x08
ProofsV1Msg = 0x09
GetCodeMsg = 0x0a
CodeMsg = 0x0b
SendTxMsg = 0x0c
GetHeaderProofsMsg = 0x0d
HeaderProofsMsg = 0x0e
// Protocol messages belonging to LPV2
// Protocol messages introduced in LPV2
GetProofsV2Msg = 0x0f
ProofsV2Msg = 0x10
GetHelperTrieProofsMsg = 0x11
@ -89,10 +83,7 @@ var requests = map[uint64]requestInfo{
GetBlockHeadersMsg: {"GetBlockHeaders", MaxHeaderFetch},
GetBlockBodiesMsg: {"GetBlockBodies", MaxBodyFetch},
GetReceiptsMsg: {"GetReceipts", MaxReceiptFetch},
GetProofsV1Msg: {"GetProofsV1", MaxProofsFetch},
GetCodeMsg: {"GetCode", MaxCodeFetch},
SendTxMsg: {"SendTx", MaxTxSend},
GetHeaderProofsMsg: {"GetHeaderProofs", MaxHelperTrieProofsFetch},
GetProofsV2Msg: {"GetProofsV2", MaxProofsFetch},
GetHelperTrieProofsMsg: {"GetHelperTrieProofs", MaxHelperTrieProofsFetch},
SendTxV2Msg: {"SendTxV2", MaxTxSend},

View File

@ -36,24 +36,18 @@ func secAddr(addr common.Address) []byte {
type accessTestFn func(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest
func TestBlockAccessLes1(t *testing.T) { testAccess(t, 1, tfBlockAccess) }
func TestBlockAccessLes2(t *testing.T) { testAccess(t, 2, tfBlockAccess) }
func tfBlockAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
return &light.BlockRequest{Hash: bhash, Number: number}
}
func TestReceiptsAccessLes1(t *testing.T) { testAccess(t, 1, tfReceiptsAccess) }
func TestReceiptsAccessLes2(t *testing.T) { testAccess(t, 2, tfReceiptsAccess) }
func tfReceiptsAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
return &light.ReceiptsRequest{Hash: bhash, Number: number}
}
func TestTrieEntryAccessLes1(t *testing.T) { testAccess(t, 1, tfTrieEntryAccess) }
func TestTrieEntryAccessLes2(t *testing.T) { testAccess(t, 2, tfTrieEntryAccess) }
func tfTrieEntryAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
@ -63,8 +57,6 @@ func tfTrieEntryAccess(db ethdb.Database, bhash common.Hash, number uint64) ligh
return nil
}
func TestCodeAccessLes1(t *testing.T) { testAccess(t, 1, tfCodeAccess) }
func TestCodeAccessLes2(t *testing.T) { testAccess(t, 2, tfCodeAccess) }
func tfCodeAccess(db ethdb.Database, bhash common.Hash, num uint64) light.OdrRequest {

View File

@ -99,7 +99,7 @@ func (odr *testOdr) IndexerConfig() *IndexerConfig {
type odrTestFn func(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error)
func TestOdrGetBlockLes1(t *testing.T) { testChainOdr(t, 1, odrGetBlock) }
func TestOdrGetBlockLes2(t *testing.T) { testChainOdr(t, 1, odrGetBlock) }
func odrGetBlock(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) {
var block *types.Block
@ -115,7 +115,7 @@ func odrGetBlock(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc
return rlp, nil
}
func TestOdrGetReceiptsLes1(t *testing.T) { testChainOdr(t, 1, odrGetReceipts) }
func TestOdrGetReceiptsLes2(t *testing.T) { testChainOdr(t, 1, odrGetReceipts) }
func odrGetReceipts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) {
var receipts types.Receipts
@ -137,7 +137,7 @@ func odrGetReceipts(ctx context.Context, db ethdb.Database, bc *core.BlockChain,
return rlp, nil
}
func TestOdrAccountsLes1(t *testing.T) { testChainOdr(t, 1, odrAccounts) }
func TestOdrAccountsLes2(t *testing.T) { testChainOdr(t, 1, odrAccounts) }
func odrAccounts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) {
dummyAddr := common.HexToAddress("1234567812345678123456781234567812345678")
@ -161,7 +161,7 @@ func odrAccounts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc
return res, st.Error()
}
func TestOdrContractCallLes1(t *testing.T) { testChainOdr(t, 1, odrContractCall) }
func TestOdrContractCallLes2(t *testing.T) { testChainOdr(t, 1, odrContractCall) }
type callmsg struct {
types.Message