forked from cerc-io/plugeth
Merge pull request #19403 from zsfelfoldi/remove-les1
les: remove support for LES/1
This commit is contained in:
commit
7c08e48141
@ -119,7 +119,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
|
|||||||
leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool)
|
leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool)
|
||||||
|
|
||||||
leth.odr = NewLesOdr(chainDb, light.DefaultClientIndexerConfig, leth.retriever)
|
leth.odr = NewLesOdr(chainDb, light.DefaultClientIndexerConfig, leth.retriever)
|
||||||
leth.chtIndexer = light.NewChtIndexer(chainDb, leth.odr, params.CHTFrequencyClient, params.HelperTrieConfirmations)
|
leth.chtIndexer = light.NewChtIndexer(chainDb, leth.odr, params.CHTFrequency, params.HelperTrieConfirmations)
|
||||||
leth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, leth.odr, params.BloomBitsBlocksClient, params.BloomTrieFrequency)
|
leth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, leth.odr, params.BloomBitsBlocksClient, params.BloomTrieFrequency)
|
||||||
leth.odr.SetIndexers(leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer)
|
leth.odr.SetIndexers(leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer)
|
||||||
|
|
||||||
@ -179,8 +179,6 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
|
|||||||
func lesTopic(genesisHash common.Hash, protocolVersion uint) discv5.Topic {
|
func lesTopic(genesisHash common.Hash, protocolVersion uint) discv5.Topic {
|
||||||
var name string
|
var name string
|
||||||
switch protocolVersion {
|
switch protocolVersion {
|
||||||
case lpv1:
|
|
||||||
name = "LES"
|
|
||||||
case lpv2:
|
case lpv2:
|
||||||
name = "LES2"
|
name = "LES2"
|
||||||
default:
|
default:
|
||||||
|
@ -135,8 +135,7 @@ func (b *benchmarkHelperTrie) init(pm *ProtocolManager, count int) error {
|
|||||||
b.sectionCount, b.headNum, _ = pm.server.bloomTrieIndexer.Sections()
|
b.sectionCount, b.headNum, _ = pm.server.bloomTrieIndexer.Sections()
|
||||||
} else {
|
} else {
|
||||||
b.sectionCount, _, _ = pm.server.chtIndexer.Sections()
|
b.sectionCount, _, _ = pm.server.chtIndexer.Sections()
|
||||||
b.sectionCount /= (params.CHTFrequencyClient / params.CHTFrequencyServer)
|
b.headNum = b.sectionCount*params.CHTFrequency - 1
|
||||||
b.headNum = b.sectionCount*params.CHTFrequencyClient - 1
|
|
||||||
}
|
}
|
||||||
if b.sectionCount == 0 {
|
if b.sectionCount == 0 {
|
||||||
return fmt.Errorf("no processed sections available")
|
return fmt.Errorf("no processed sections available")
|
||||||
|
@ -80,28 +80,16 @@ func (c *lesCommons) nodeInfo() interface{} {
|
|||||||
sections, _, _ := c.chtIndexer.Sections()
|
sections, _, _ := c.chtIndexer.Sections()
|
||||||
sections2, _, _ := c.bloomTrieIndexer.Sections()
|
sections2, _, _ := c.bloomTrieIndexer.Sections()
|
||||||
|
|
||||||
if !c.protocolManager.lightSync {
|
|
||||||
// convert to client section size if running in server mode
|
|
||||||
sections /= c.iConfig.PairChtSize / c.iConfig.ChtSize
|
|
||||||
}
|
|
||||||
|
|
||||||
if sections2 < sections {
|
if sections2 < sections {
|
||||||
sections = sections2
|
sections = sections2
|
||||||
}
|
}
|
||||||
if sections > 0 {
|
if sections > 0 {
|
||||||
sectionIndex := sections - 1
|
sectionIndex := sections - 1
|
||||||
sectionHead := c.bloomTrieIndexer.SectionHead(sectionIndex)
|
sectionHead := c.bloomTrieIndexer.SectionHead(sectionIndex)
|
||||||
var chtRoot common.Hash
|
|
||||||
if c.protocolManager.lightSync {
|
|
||||||
chtRoot = light.GetChtRoot(c.chainDb, sectionIndex, sectionHead)
|
|
||||||
} else {
|
|
||||||
idxV2 := (sectionIndex+1)*c.iConfig.PairChtSize/c.iConfig.ChtSize - 1
|
|
||||||
chtRoot = light.GetChtRoot(c.chainDb, idxV2, sectionHead)
|
|
||||||
}
|
|
||||||
cht = params.TrustedCheckpoint{
|
cht = params.TrustedCheckpoint{
|
||||||
SectionIndex: sectionIndex,
|
SectionIndex: sectionIndex,
|
||||||
SectionHead: sectionHead,
|
SectionHead: sectionHead,
|
||||||
CHTRoot: chtRoot,
|
CHTRoot: light.GetChtRoot(c.chainDb, sectionIndex, sectionHead),
|
||||||
BloomRoot: light.GetBloomTrieRoot(c.chainDb, sectionIndex, sectionHead),
|
BloomRoot: light.GetBloomTrieRoot(c.chainDb, sectionIndex, sectionHead),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -39,11 +39,8 @@ var (
|
|||||||
GetBlockBodiesMsg: {0, 700000},
|
GetBlockBodiesMsg: {0, 700000},
|
||||||
GetReceiptsMsg: {0, 1000000},
|
GetReceiptsMsg: {0, 1000000},
|
||||||
GetCodeMsg: {0, 450000},
|
GetCodeMsg: {0, 450000},
|
||||||
GetProofsV1Msg: {0, 600000},
|
|
||||||
GetProofsV2Msg: {0, 600000},
|
GetProofsV2Msg: {0, 600000},
|
||||||
GetHeaderProofsMsg: {0, 1000000},
|
|
||||||
GetHelperTrieProofsMsg: {0, 1000000},
|
GetHelperTrieProofsMsg: {0, 1000000},
|
||||||
SendTxMsg: {0, 450000},
|
|
||||||
SendTxV2Msg: {0, 450000},
|
SendTxV2Msg: {0, 450000},
|
||||||
GetTxStatusMsg: {0, 250000},
|
GetTxStatusMsg: {0, 250000},
|
||||||
}
|
}
|
||||||
@ -53,11 +50,8 @@ var (
|
|||||||
GetBlockBodiesMsg: {0, 40},
|
GetBlockBodiesMsg: {0, 40},
|
||||||
GetReceiptsMsg: {0, 40},
|
GetReceiptsMsg: {0, 40},
|
||||||
GetCodeMsg: {0, 80},
|
GetCodeMsg: {0, 80},
|
||||||
GetProofsV1Msg: {0, 80},
|
|
||||||
GetProofsV2Msg: {0, 80},
|
GetProofsV2Msg: {0, 80},
|
||||||
GetHeaderProofsMsg: {0, 20},
|
|
||||||
GetHelperTrieProofsMsg: {0, 20},
|
GetHelperTrieProofsMsg: {0, 20},
|
||||||
SendTxMsg: {0, 66000},
|
|
||||||
SendTxV2Msg: {0, 66000},
|
SendTxV2Msg: {0, 66000},
|
||||||
GetTxStatusMsg: {0, 50},
|
GetTxStatusMsg: {0, 50},
|
||||||
}
|
}
|
||||||
@ -67,11 +61,8 @@ var (
|
|||||||
GetBlockBodiesMsg: {0, 100000},
|
GetBlockBodiesMsg: {0, 100000},
|
||||||
GetReceiptsMsg: {0, 200000},
|
GetReceiptsMsg: {0, 200000},
|
||||||
GetCodeMsg: {0, 50000},
|
GetCodeMsg: {0, 50000},
|
||||||
GetProofsV1Msg: {0, 4000},
|
|
||||||
GetProofsV2Msg: {0, 4000},
|
GetProofsV2Msg: {0, 4000},
|
||||||
GetHeaderProofsMsg: {0, 4000},
|
|
||||||
GetHelperTrieProofsMsg: {0, 4000},
|
GetHelperTrieProofsMsg: {0, 4000},
|
||||||
SendTxMsg: {0, 0},
|
|
||||||
SendTxV2Msg: {0, 100},
|
SendTxV2Msg: {0, 100},
|
||||||
GetTxStatusMsg: {0, 100},
|
GetTxStatusMsg: {0, 100},
|
||||||
}
|
}
|
||||||
|
191
les/handler.go
191
les/handler.go
@ -772,80 +772,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
Obj: resp.Receipts,
|
Obj: resp.Receipts,
|
||||||
}
|
}
|
||||||
|
|
||||||
case GetProofsV1Msg:
|
|
||||||
p.Log().Trace("Received proofs request")
|
|
||||||
// Decode the retrieval message
|
|
||||||
var req struct {
|
|
||||||
ReqID uint64
|
|
||||||
Reqs []ProofReq
|
|
||||||
}
|
|
||||||
if err := msg.Decode(&req); err != nil {
|
|
||||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
|
||||||
}
|
|
||||||
// Gather state data until the fetch or network limits is reached
|
|
||||||
var (
|
|
||||||
bytes int
|
|
||||||
proofs proofsData
|
|
||||||
)
|
|
||||||
reqCnt := len(req.Reqs)
|
|
||||||
if !accept(req.ReqID, uint64(reqCnt), MaxProofsFetch) {
|
|
||||||
return errResp(ErrRequestRejected, "")
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
for i, req := range req.Reqs {
|
|
||||||
if i != 0 && !task.waitOrStop() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Look up the root hash belonging to the request
|
|
||||||
number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash)
|
|
||||||
if number == nil {
|
|
||||||
p.Log().Warn("Failed to retrieve block num for proof", "hash", req.BHash)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number)
|
|
||||||
if header == nil {
|
|
||||||
p.Log().Warn("Failed to retrieve header for proof", "block", *number, "hash", req.BHash)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Open the account or storage trie for the request
|
|
||||||
statedb := pm.blockchain.StateCache()
|
|
||||||
|
|
||||||
var trie state.Trie
|
|
||||||
switch len(req.AccKey) {
|
|
||||||
case 0:
|
|
||||||
// No account key specified, open an account trie
|
|
||||||
trie, err = statedb.OpenTrie(header.Root)
|
|
||||||
if trie == nil || err != nil {
|
|
||||||
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "root", header.Root, "err", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
// Account key specified, open a storage trie
|
|
||||||
account, err := pm.getAccount(statedb.TrieDB(), header.Root, common.BytesToHash(req.AccKey))
|
|
||||||
if err != nil {
|
|
||||||
p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "err", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
trie, err = statedb.OpenStorageTrie(common.BytesToHash(req.AccKey), account.Root)
|
|
||||||
if trie == nil || err != nil {
|
|
||||||
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "root", account.Root, "err", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Prove the user's request from the account or stroage trie
|
|
||||||
var proof light.NodeList
|
|
||||||
if err := trie.Prove(req.Key, 0, &proof); err != nil {
|
|
||||||
p.Log().Warn("Failed to prove state request", "block", header.Number, "hash", header.Hash(), "err", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
proofs = append(proofs, proof)
|
|
||||||
if bytes += proof.DataSize(); bytes >= softResponseLimit {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyProofs(req.ReqID, proofs), task.done())
|
|
||||||
}()
|
|
||||||
|
|
||||||
case GetProofsV2Msg:
|
case GetProofsV2Msg:
|
||||||
p.Log().Trace("Received les/2 proofs request")
|
p.Log().Trace("Received les/2 proofs request")
|
||||||
// Decode the retrieval message
|
// Decode the retrieval message
|
||||||
@ -927,27 +853,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyProofsV2(req.ReqID, nodes.NodeList()), task.done())
|
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyProofsV2(req.ReqID, nodes.NodeList()), task.done())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
case ProofsV1Msg:
|
|
||||||
if pm.odr == nil {
|
|
||||||
return errResp(ErrUnexpectedResponse, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
p.Log().Trace("Received proofs response")
|
|
||||||
// A batch of merkle proofs arrived to one of our previous requests
|
|
||||||
var resp struct {
|
|
||||||
ReqID, BV uint64
|
|
||||||
Data []light.NodeList
|
|
||||||
}
|
|
||||||
if err := msg.Decode(&resp); err != nil {
|
|
||||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
|
||||||
}
|
|
||||||
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
|
|
||||||
deliverMsg = &Msg{
|
|
||||||
MsgType: MsgProofsV1,
|
|
||||||
ReqID: resp.ReqID,
|
|
||||||
Obj: resp.Data,
|
|
||||||
}
|
|
||||||
|
|
||||||
case ProofsV2Msg:
|
case ProofsV2Msg:
|
||||||
if pm.odr == nil {
|
if pm.odr == nil {
|
||||||
return errResp(ErrUnexpectedResponse, "")
|
return errResp(ErrUnexpectedResponse, "")
|
||||||
@ -969,54 +874,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
Obj: resp.Data,
|
Obj: resp.Data,
|
||||||
}
|
}
|
||||||
|
|
||||||
case GetHeaderProofsMsg:
|
|
||||||
p.Log().Trace("Received headers proof request")
|
|
||||||
// Decode the retrieval message
|
|
||||||
var req struct {
|
|
||||||
ReqID uint64
|
|
||||||
Reqs []ChtReq
|
|
||||||
}
|
|
||||||
if err := msg.Decode(&req); err != nil {
|
|
||||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
|
||||||
}
|
|
||||||
// Gather state data until the fetch or network limits is reached
|
|
||||||
var (
|
|
||||||
bytes int
|
|
||||||
proofs []ChtResp
|
|
||||||
)
|
|
||||||
reqCnt := len(req.Reqs)
|
|
||||||
if !accept(req.ReqID, uint64(reqCnt), MaxHelperTrieProofsFetch) {
|
|
||||||
return errResp(ErrRequestRejected, "")
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
trieDb := trie.NewDatabase(rawdb.NewTable(pm.chainDb, light.ChtTablePrefix))
|
|
||||||
for i, req := range req.Reqs {
|
|
||||||
if i != 0 && !task.waitOrStop() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if header := pm.blockchain.GetHeaderByNumber(req.BlockNum); header != nil {
|
|
||||||
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, req.ChtNum*pm.iConfig.ChtSize-1)
|
|
||||||
if root := light.GetChtRoot(pm.chainDb, req.ChtNum-1, sectionHead); root != (common.Hash{}) {
|
|
||||||
trie, err := trie.New(root, trieDb)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var encNumber [8]byte
|
|
||||||
binary.BigEndian.PutUint64(encNumber[:], req.BlockNum)
|
|
||||||
|
|
||||||
var proof light.NodeList
|
|
||||||
trie.Prove(encNumber[:], 0, &proof)
|
|
||||||
|
|
||||||
proofs = append(proofs, ChtResp{Header: header, Proof: proof})
|
|
||||||
if bytes += proof.DataSize() + estHeaderRlpSize; bytes >= softResponseLimit {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyHeaderProofs(req.ReqID, proofs), task.done())
|
|
||||||
}()
|
|
||||||
|
|
||||||
case GetHelperTrieProofsMsg:
|
case GetHelperTrieProofsMsg:
|
||||||
p.Log().Trace("Received helper trie proof request")
|
p.Log().Trace("Received helper trie proof request")
|
||||||
// Decode the retrieval message
|
// Decode the retrieval message
|
||||||
@ -1081,26 +938,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyHelperTrieProofs(req.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData}), task.done())
|
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyHelperTrieProofs(req.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData}), task.done())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
case HeaderProofsMsg:
|
|
||||||
if pm.odr == nil {
|
|
||||||
return errResp(ErrUnexpectedResponse, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
p.Log().Trace("Received headers proof response")
|
|
||||||
var resp struct {
|
|
||||||
ReqID, BV uint64
|
|
||||||
Data []ChtResp
|
|
||||||
}
|
|
||||||
if err := msg.Decode(&resp); err != nil {
|
|
||||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
|
||||||
}
|
|
||||||
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
|
|
||||||
deliverMsg = &Msg{
|
|
||||||
MsgType: MsgHeaderProofs,
|
|
||||||
ReqID: resp.ReqID,
|
|
||||||
Obj: resp.Data,
|
|
||||||
}
|
|
||||||
|
|
||||||
case HelperTrieProofsMsg:
|
case HelperTrieProofsMsg:
|
||||||
if pm.odr == nil {
|
if pm.odr == nil {
|
||||||
return errResp(ErrUnexpectedResponse, "")
|
return errResp(ErrUnexpectedResponse, "")
|
||||||
@ -1122,29 +959,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
Obj: resp.Data,
|
Obj: resp.Data,
|
||||||
}
|
}
|
||||||
|
|
||||||
case SendTxMsg:
|
|
||||||
if pm.txpool == nil {
|
|
||||||
return errResp(ErrRequestRejected, "")
|
|
||||||
}
|
|
||||||
// Transactions arrived, parse all of them and deliver to the pool
|
|
||||||
var txs []*types.Transaction
|
|
||||||
if err := msg.Decode(&txs); err != nil {
|
|
||||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
|
||||||
}
|
|
||||||
reqCnt := len(txs)
|
|
||||||
if !accept(0, uint64(reqCnt), MaxTxSend) {
|
|
||||||
return errResp(ErrRequestRejected, "")
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
for i, tx := range txs {
|
|
||||||
if i != 0 && !task.waitOrStop() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pm.txpool.AddRemotes([]*types.Transaction{tx})
|
|
||||||
}
|
|
||||||
sendResponse(0, uint64(reqCnt), nil, task.done())
|
|
||||||
}()
|
|
||||||
|
|
||||||
case SendTxV2Msg:
|
case SendTxV2Msg:
|
||||||
if pm.txpool == nil {
|
if pm.txpool == nil {
|
||||||
return errResp(ErrRequestRejected, "")
|
return errResp(ErrRequestRejected, "")
|
||||||
@ -1261,9 +1075,8 @@ func (pm *ProtocolManager) getAccount(triedb *trie.Database, root, hash common.H
|
|||||||
func (pm *ProtocolManager) getHelperTrie(id uint, idx uint64) (common.Hash, string) {
|
func (pm *ProtocolManager) getHelperTrie(id uint, idx uint64) (common.Hash, string) {
|
||||||
switch id {
|
switch id {
|
||||||
case htCanonical:
|
case htCanonical:
|
||||||
idxV1 := (idx+1)*(pm.iConfig.PairChtSize/pm.iConfig.ChtSize) - 1
|
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idx+1)*pm.iConfig.ChtSize-1)
|
||||||
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idxV1+1)*pm.iConfig.ChtSize-1)
|
return light.GetChtRoot(pm.chainDb, idx, sectionHead), light.ChtTablePrefix
|
||||||
return light.GetChtRoot(pm.chainDb, idxV1, sectionHead), light.ChtTablePrefix
|
|
||||||
case htBloomBits:
|
case htBloomBits:
|
||||||
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idx+1)*pm.iConfig.BloomTrieSize-1)
|
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idx+1)*pm.iConfig.BloomTrieSize-1)
|
||||||
return light.GetBloomTrieRoot(pm.chainDb, idx, sectionHead), light.BloomTrieTablePrefix
|
return light.GetBloomTrieRoot(pm.chainDb, idx, sectionHead), light.BloomTrieTablePrefix
|
||||||
|
@ -46,7 +46,6 @@ func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that block headers can be retrieved from a remote chain based on user queries.
|
// Tests that block headers can be retrieved from a remote chain based on user queries.
|
||||||
func TestGetBlockHeadersLes1(t *testing.T) { testGetBlockHeaders(t, 1) }
|
|
||||||
func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) }
|
func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) }
|
||||||
|
|
||||||
func testGetBlockHeaders(t *testing.T, protocol int) {
|
func testGetBlockHeaders(t *testing.T, protocol int) {
|
||||||
@ -174,7 +173,6 @@ func testGetBlockHeaders(t *testing.T, protocol int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that block contents can be retrieved from a remote chain based on their hashes.
|
// Tests that block contents can be retrieved from a remote chain based on their hashes.
|
||||||
func TestGetBlockBodiesLes1(t *testing.T) { testGetBlockBodies(t, 1) }
|
|
||||||
func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) }
|
func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) }
|
||||||
|
|
||||||
func testGetBlockBodies(t *testing.T, protocol int) {
|
func testGetBlockBodies(t *testing.T, protocol int) {
|
||||||
@ -249,7 +247,6 @@ func testGetBlockBodies(t *testing.T, protocol int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that the contract codes can be retrieved based on account addresses.
|
// Tests that the contract codes can be retrieved based on account addresses.
|
||||||
func TestGetCodeLes1(t *testing.T) { testGetCode(t, 1) }
|
|
||||||
func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) }
|
func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) }
|
||||||
|
|
||||||
func testGetCode(t *testing.T, protocol int) {
|
func testGetCode(t *testing.T, protocol int) {
|
||||||
@ -281,7 +278,6 @@ func testGetCode(t *testing.T, protocol int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that the transaction receipts can be retrieved based on hashes.
|
// Tests that the transaction receipts can be retrieved based on hashes.
|
||||||
func TestGetReceiptLes1(t *testing.T) { testGetReceipt(t, 1) }
|
|
||||||
func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) }
|
func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) }
|
||||||
|
|
||||||
func testGetReceipt(t *testing.T, protocol int) {
|
func testGetReceipt(t *testing.T, protocol int) {
|
||||||
@ -307,7 +303,6 @@ func testGetReceipt(t *testing.T, protocol int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that trie merkle proofs can be retrieved
|
// Tests that trie merkle proofs can be retrieved
|
||||||
func TestGetProofsLes1(t *testing.T) { testGetProofs(t, 1) }
|
|
||||||
func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) }
|
func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) }
|
||||||
|
|
||||||
func testGetProofs(t *testing.T, protocol int) {
|
func testGetProofs(t *testing.T, protocol int) {
|
||||||
@ -316,10 +311,7 @@ func testGetProofs(t *testing.T, protocol int) {
|
|||||||
defer tearDown()
|
defer tearDown()
|
||||||
bc := server.pm.blockchain.(*core.BlockChain)
|
bc := server.pm.blockchain.(*core.BlockChain)
|
||||||
|
|
||||||
var (
|
var proofreqs []ProofReq
|
||||||
proofreqs []ProofReq
|
|
||||||
proofsV1 [][]rlp.RawValue
|
|
||||||
)
|
|
||||||
proofsV2 := light.NewNodeSet()
|
proofsV2 := light.NewNodeSet()
|
||||||
|
|
||||||
accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr, {}}
|
accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr, {}}
|
||||||
@ -334,112 +326,61 @@ func testGetProofs(t *testing.T, protocol int) {
|
|||||||
Key: crypto.Keccak256(acc[:]),
|
Key: crypto.Keccak256(acc[:]),
|
||||||
}
|
}
|
||||||
proofreqs = append(proofreqs, req)
|
proofreqs = append(proofreqs, req)
|
||||||
|
trie.Prove(crypto.Keccak256(acc[:]), 0, proofsV2)
|
||||||
switch protocol {
|
|
||||||
case 1:
|
|
||||||
var proof light.NodeList
|
|
||||||
trie.Prove(crypto.Keccak256(acc[:]), 0, &proof)
|
|
||||||
proofsV1 = append(proofsV1, proof)
|
|
||||||
case 2:
|
|
||||||
trie.Prove(crypto.Keccak256(acc[:]), 0, proofsV2)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Send the proof request and verify the response
|
// Send the proof request and verify the response
|
||||||
switch protocol {
|
cost := server.tPeer.GetRequestCost(GetProofsV2Msg, len(proofreqs))
|
||||||
case 1:
|
sendRequest(server.tPeer.app, GetProofsV2Msg, 42, cost, proofreqs)
|
||||||
cost := server.tPeer.GetRequestCost(GetProofsV1Msg, len(proofreqs))
|
if err := expectResponse(server.tPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
|
||||||
sendRequest(server.tPeer.app, GetProofsV1Msg, 42, cost, proofreqs)
|
t.Errorf("proofs mismatch: %v", err)
|
||||||
if err := expectResponse(server.tPeer.app, ProofsV1Msg, 42, testBufLimit, proofsV1); err != nil {
|
|
||||||
t.Errorf("proofs mismatch: %v", err)
|
|
||||||
}
|
|
||||||
case 2:
|
|
||||||
cost := server.tPeer.GetRequestCost(GetProofsV2Msg, len(proofreqs))
|
|
||||||
sendRequest(server.tPeer.app, GetProofsV2Msg, 42, cost, proofreqs)
|
|
||||||
if err := expectResponse(server.tPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
|
|
||||||
t.Errorf("proofs mismatch: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that CHT proofs can be correctly retrieved.
|
// Tests that CHT proofs can be correctly retrieved.
|
||||||
func TestGetCHTProofsLes1(t *testing.T) { testGetCHTProofs(t, 1) }
|
|
||||||
func TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) }
|
func TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) }
|
||||||
|
|
||||||
func testGetCHTProofs(t *testing.T, protocol int) {
|
func testGetCHTProofs(t *testing.T, protocol int) {
|
||||||
config := light.TestServerIndexerConfig
|
config := light.TestServerIndexerConfig
|
||||||
frequency := config.ChtSize
|
|
||||||
if protocol == 2 {
|
|
||||||
frequency = config.PairChtSize
|
|
||||||
}
|
|
||||||
|
|
||||||
waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
|
waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
|
||||||
expectSections := frequency / config.ChtSize
|
|
||||||
for {
|
for {
|
||||||
cs, _, _ := cIndexer.Sections()
|
cs, _, _ := cIndexer.Sections()
|
||||||
bs, _, _ := bIndexer.Sections()
|
if cs >= 1 {
|
||||||
if cs >= expectSections && bs >= expectSections {
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
server, tearDown := newServerEnv(t, int(frequency+config.ChtConfirms), protocol, waitIndexers)
|
server, tearDown := newServerEnv(t, int(config.ChtSize+config.ChtConfirms), protocol, waitIndexers)
|
||||||
defer tearDown()
|
defer tearDown()
|
||||||
bc := server.pm.blockchain.(*core.BlockChain)
|
bc := server.pm.blockchain.(*core.BlockChain)
|
||||||
|
|
||||||
// Assemble the proofs from the different protocols
|
// Assemble the proofs from the different protocols
|
||||||
header := bc.GetHeaderByNumber(frequency - 1)
|
header := bc.GetHeaderByNumber(config.ChtSize - 1)
|
||||||
rlp, _ := rlp.EncodeToBytes(header)
|
rlp, _ := rlp.EncodeToBytes(header)
|
||||||
|
|
||||||
key := make([]byte, 8)
|
key := make([]byte, 8)
|
||||||
binary.BigEndian.PutUint64(key, frequency-1)
|
binary.BigEndian.PutUint64(key, config.ChtSize-1)
|
||||||
|
|
||||||
proofsV1 := []ChtResp{{
|
|
||||||
Header: header,
|
|
||||||
}}
|
|
||||||
proofsV2 := HelperTrieResps{
|
proofsV2 := HelperTrieResps{
|
||||||
AuxData: [][]byte{rlp},
|
AuxData: [][]byte{rlp},
|
||||||
}
|
}
|
||||||
switch protocol {
|
root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash())
|
||||||
case 1:
|
trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
|
||||||
root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(frequency-1).Hash())
|
trie.Prove(key, 0, &proofsV2.Proofs)
|
||||||
trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
|
|
||||||
|
|
||||||
var proof light.NodeList
|
|
||||||
trie.Prove(key, 0, &proof)
|
|
||||||
proofsV1[0].Proof = proof
|
|
||||||
|
|
||||||
case 2:
|
|
||||||
root := light.GetChtRoot(server.db, (frequency/config.ChtSize)-1, bc.GetHeaderByNumber(frequency-1).Hash())
|
|
||||||
trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
|
|
||||||
trie.Prove(key, 0, &proofsV2.Proofs)
|
|
||||||
}
|
|
||||||
// Assemble the requests for the different protocols
|
// Assemble the requests for the different protocols
|
||||||
requestsV1 := []ChtReq{{
|
|
||||||
ChtNum: frequency / config.ChtSize,
|
|
||||||
BlockNum: frequency - 1,
|
|
||||||
}}
|
|
||||||
requestsV2 := []HelperTrieReq{{
|
requestsV2 := []HelperTrieReq{{
|
||||||
Type: htCanonical,
|
Type: htCanonical,
|
||||||
TrieIdx: frequency/config.PairChtSize - 1,
|
TrieIdx: 0,
|
||||||
Key: key,
|
Key: key,
|
||||||
AuxReq: auxHeader,
|
AuxReq: auxHeader,
|
||||||
}}
|
}}
|
||||||
// Send the proof request and verify the response
|
// Send the proof request and verify the response
|
||||||
switch protocol {
|
cost := server.tPeer.GetRequestCost(GetHelperTrieProofsMsg, len(requestsV2))
|
||||||
case 1:
|
sendRequest(server.tPeer.app, GetHelperTrieProofsMsg, 42, cost, requestsV2)
|
||||||
cost := server.tPeer.GetRequestCost(GetHeaderProofsMsg, len(requestsV1))
|
if err := expectResponse(server.tPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
|
||||||
sendRequest(server.tPeer.app, GetHeaderProofsMsg, 42, cost, requestsV1)
|
t.Errorf("proofs mismatch: %v", err)
|
||||||
if err := expectResponse(server.tPeer.app, HeaderProofsMsg, 42, testBufLimit, proofsV1); err != nil {
|
|
||||||
t.Errorf("proofs mismatch: %v", err)
|
|
||||||
}
|
|
||||||
case 2:
|
|
||||||
cost := server.tPeer.GetRequestCost(GetHelperTrieProofsMsg, len(requestsV2))
|
|
||||||
sendRequest(server.tPeer.app, GetHelperTrieProofsMsg, 42, cost, requestsV2)
|
|
||||||
if err := expectResponse(server.tPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
|
|
||||||
t.Errorf("proofs mismatch: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -449,10 +390,8 @@ func TestGetBloombitsProofs(t *testing.T) {
|
|||||||
|
|
||||||
waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
|
waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
|
||||||
for {
|
for {
|
||||||
cs, _, _ := cIndexer.Sections()
|
|
||||||
bs, _, _ := bIndexer.Sections()
|
|
||||||
bts, _, _ := btIndexer.Sections()
|
bts, _, _ := btIndexer.Sections()
|
||||||
if cs >= 8 && bs >= 8 && bts >= 1 {
|
if bts >= 1 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -84,9 +84,7 @@ const (
|
|||||||
MsgBlockBodies = iota
|
MsgBlockBodies = iota
|
||||||
MsgCode
|
MsgCode
|
||||||
MsgReceipts
|
MsgReceipts
|
||||||
MsgProofsV1
|
|
||||||
MsgProofsV2
|
MsgProofsV2
|
||||||
MsgHeaderProofs
|
|
||||||
MsgHelperTrieProofs
|
MsgHelperTrieProofs
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -188,14 +188,7 @@ type TrieRequest light.TrieRequest
|
|||||||
// GetCost returns the cost of the given ODR request according to the serving
|
// GetCost returns the cost of the given ODR request according to the serving
|
||||||
// peer's cost table (implementation of LesOdrRequest)
|
// peer's cost table (implementation of LesOdrRequest)
|
||||||
func (r *TrieRequest) GetCost(peer *peer) uint64 {
|
func (r *TrieRequest) GetCost(peer *peer) uint64 {
|
||||||
switch peer.version {
|
return peer.GetRequestCost(GetProofsV2Msg, 1)
|
||||||
case lpv1:
|
|
||||||
return peer.GetRequestCost(GetProofsV1Msg, 1)
|
|
||||||
case lpv2:
|
|
||||||
return peer.GetRequestCost(GetProofsV2Msg, 1)
|
|
||||||
default:
|
|
||||||
panic(nil)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CanSend tells if a certain peer is suitable for serving the given request
|
// CanSend tells if a certain peer is suitable for serving the given request
|
||||||
@ -220,38 +213,22 @@ func (r *TrieRequest) Request(reqID uint64, peer *peer) error {
|
|||||||
func (r *TrieRequest) Validate(db ethdb.Database, msg *Msg) error {
|
func (r *TrieRequest) Validate(db ethdb.Database, msg *Msg) error {
|
||||||
log.Debug("Validating trie proof", "root", r.Id.Root, "key", r.Key)
|
log.Debug("Validating trie proof", "root", r.Id.Root, "key", r.Key)
|
||||||
|
|
||||||
switch msg.MsgType {
|
if msg.MsgType != MsgProofsV2 {
|
||||||
case MsgProofsV1:
|
|
||||||
proofs := msg.Obj.([]light.NodeList)
|
|
||||||
if len(proofs) != 1 {
|
|
||||||
return errInvalidEntryCount
|
|
||||||
}
|
|
||||||
nodeSet := proofs[0].NodeSet()
|
|
||||||
// Verify the proof and store if checks out
|
|
||||||
if _, _, err := trie.VerifyProof(r.Id.Root, r.Key, nodeSet); err != nil {
|
|
||||||
return fmt.Errorf("merkle proof verification failed: %v", err)
|
|
||||||
}
|
|
||||||
r.Proof = nodeSet
|
|
||||||
return nil
|
|
||||||
|
|
||||||
case MsgProofsV2:
|
|
||||||
proofs := msg.Obj.(light.NodeList)
|
|
||||||
// Verify the proof and store if checks out
|
|
||||||
nodeSet := proofs.NodeSet()
|
|
||||||
reads := &readTraceDB{db: nodeSet}
|
|
||||||
if _, _, err := trie.VerifyProof(r.Id.Root, r.Key, reads); err != nil {
|
|
||||||
return fmt.Errorf("merkle proof verification failed: %v", err)
|
|
||||||
}
|
|
||||||
// check if all nodes have been read by VerifyProof
|
|
||||||
if len(reads.reads) != nodeSet.KeyCount() {
|
|
||||||
return errUselessNodes
|
|
||||||
}
|
|
||||||
r.Proof = nodeSet
|
|
||||||
return nil
|
|
||||||
|
|
||||||
default:
|
|
||||||
return errInvalidMessageType
|
return errInvalidMessageType
|
||||||
}
|
}
|
||||||
|
proofs := msg.Obj.(light.NodeList)
|
||||||
|
// Verify the proof and store if checks out
|
||||||
|
nodeSet := proofs.NodeSet()
|
||||||
|
reads := &readTraceDB{db: nodeSet}
|
||||||
|
if _, _, err := trie.VerifyProof(r.Id.Root, r.Key, reads); err != nil {
|
||||||
|
return fmt.Errorf("merkle proof verification failed: %v", err)
|
||||||
|
}
|
||||||
|
// check if all nodes have been read by VerifyProof
|
||||||
|
if len(reads.reads) != nodeSet.KeyCount() {
|
||||||
|
return errUselessNodes
|
||||||
|
}
|
||||||
|
r.Proof = nodeSet
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type CodeReq struct {
|
type CodeReq struct {
|
||||||
@ -330,32 +307,13 @@ type HelperTrieResps struct { // describes all responses, not just a single one
|
|||||||
AuxData [][]byte
|
AuxData [][]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// legacy LES/1
|
|
||||||
type ChtReq struct {
|
|
||||||
ChtNum, BlockNum uint64
|
|
||||||
FromLevel uint
|
|
||||||
}
|
|
||||||
|
|
||||||
// legacy LES/1
|
|
||||||
type ChtResp struct {
|
|
||||||
Header *types.Header
|
|
||||||
Proof []rlp.RawValue
|
|
||||||
}
|
|
||||||
|
|
||||||
// ODR request type for requesting headers by Canonical Hash Trie, see LesOdrRequest interface
|
// ODR request type for requesting headers by Canonical Hash Trie, see LesOdrRequest interface
|
||||||
type ChtRequest light.ChtRequest
|
type ChtRequest light.ChtRequest
|
||||||
|
|
||||||
// GetCost returns the cost of the given ODR request according to the serving
|
// GetCost returns the cost of the given ODR request according to the serving
|
||||||
// peer's cost table (implementation of LesOdrRequest)
|
// peer's cost table (implementation of LesOdrRequest)
|
||||||
func (r *ChtRequest) GetCost(peer *peer) uint64 {
|
func (r *ChtRequest) GetCost(peer *peer) uint64 {
|
||||||
switch peer.version {
|
return peer.GetRequestCost(GetHelperTrieProofsMsg, 1)
|
||||||
case lpv1:
|
|
||||||
return peer.GetRequestCost(GetHeaderProofsMsg, 1)
|
|
||||||
case lpv2:
|
|
||||||
return peer.GetRequestCost(GetHelperTrieProofsMsg, 1)
|
|
||||||
default:
|
|
||||||
panic(nil)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CanSend tells if a certain peer is suitable for serving the given request
|
// CanSend tells if a certain peer is suitable for serving the given request
|
||||||
@ -377,21 +335,7 @@ func (r *ChtRequest) Request(reqID uint64, peer *peer) error {
|
|||||||
Key: encNum[:],
|
Key: encNum[:],
|
||||||
AuxReq: auxHeader,
|
AuxReq: auxHeader,
|
||||||
}
|
}
|
||||||
switch peer.version {
|
return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []HelperTrieReq{req})
|
||||||
case lpv1:
|
|
||||||
var reqsV1 ChtReq
|
|
||||||
if req.Type != htCanonical || req.AuxReq != auxHeader || len(req.Key) != 8 {
|
|
||||||
return fmt.Errorf("Request invalid in LES/1 mode")
|
|
||||||
}
|
|
||||||
blockNum := binary.BigEndian.Uint64(req.Key)
|
|
||||||
// convert HelperTrie request to old CHT request
|
|
||||||
reqsV1 = ChtReq{ChtNum: (req.TrieIdx + 1) * (r.Config.ChtSize / r.Config.PairChtSize), BlockNum: blockNum, FromLevel: req.FromLevel}
|
|
||||||
return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []ChtReq{reqsV1})
|
|
||||||
case lpv2:
|
|
||||||
return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []HelperTrieReq{req})
|
|
||||||
default:
|
|
||||||
panic(nil)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Valid processes an ODR request reply message from the LES network
|
// Valid processes an ODR request reply message from the LES network
|
||||||
@ -400,78 +344,50 @@ func (r *ChtRequest) Request(reqID uint64, peer *peer) error {
|
|||||||
func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error {
|
func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error {
|
||||||
log.Debug("Validating CHT", "cht", r.ChtNum, "block", r.BlockNum)
|
log.Debug("Validating CHT", "cht", r.ChtNum, "block", r.BlockNum)
|
||||||
|
|
||||||
switch msg.MsgType {
|
if msg.MsgType != MsgHelperTrieProofs {
|
||||||
case MsgHeaderProofs: // LES/1 backwards compatibility
|
|
||||||
proofs := msg.Obj.([]ChtResp)
|
|
||||||
if len(proofs) != 1 {
|
|
||||||
return errInvalidEntryCount
|
|
||||||
}
|
|
||||||
proof := proofs[0]
|
|
||||||
|
|
||||||
// Verify the CHT
|
|
||||||
var encNumber [8]byte
|
|
||||||
binary.BigEndian.PutUint64(encNumber[:], r.BlockNum)
|
|
||||||
|
|
||||||
value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], light.NodeList(proof.Proof).NodeSet())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var node light.ChtNode
|
|
||||||
if err := rlp.DecodeBytes(value, &node); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if node.Hash != proof.Header.Hash() {
|
|
||||||
return errCHTHashMismatch
|
|
||||||
}
|
|
||||||
// Verifications passed, store and return
|
|
||||||
r.Header = proof.Header
|
|
||||||
r.Proof = light.NodeList(proof.Proof).NodeSet()
|
|
||||||
r.Td = node.Td
|
|
||||||
case MsgHelperTrieProofs:
|
|
||||||
resp := msg.Obj.(HelperTrieResps)
|
|
||||||
if len(resp.AuxData) != 1 {
|
|
||||||
return errInvalidEntryCount
|
|
||||||
}
|
|
||||||
nodeSet := resp.Proofs.NodeSet()
|
|
||||||
headerEnc := resp.AuxData[0]
|
|
||||||
if len(headerEnc) == 0 {
|
|
||||||
return errHeaderUnavailable
|
|
||||||
}
|
|
||||||
header := new(types.Header)
|
|
||||||
if err := rlp.DecodeBytes(headerEnc, header); err != nil {
|
|
||||||
return errHeaderUnavailable
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify the CHT
|
|
||||||
var encNumber [8]byte
|
|
||||||
binary.BigEndian.PutUint64(encNumber[:], r.BlockNum)
|
|
||||||
|
|
||||||
reads := &readTraceDB{db: nodeSet}
|
|
||||||
value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("merkle proof verification failed: %v", err)
|
|
||||||
}
|
|
||||||
if len(reads.reads) != nodeSet.KeyCount() {
|
|
||||||
return errUselessNodes
|
|
||||||
}
|
|
||||||
|
|
||||||
var node light.ChtNode
|
|
||||||
if err := rlp.DecodeBytes(value, &node); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if node.Hash != header.Hash() {
|
|
||||||
return errCHTHashMismatch
|
|
||||||
}
|
|
||||||
if r.BlockNum != header.Number.Uint64() {
|
|
||||||
return errCHTNumberMismatch
|
|
||||||
}
|
|
||||||
// Verifications passed, store and return
|
|
||||||
r.Header = header
|
|
||||||
r.Proof = nodeSet
|
|
||||||
r.Td = node.Td
|
|
||||||
default:
|
|
||||||
return errInvalidMessageType
|
return errInvalidMessageType
|
||||||
}
|
}
|
||||||
|
resp := msg.Obj.(HelperTrieResps)
|
||||||
|
if len(resp.AuxData) != 1 {
|
||||||
|
return errInvalidEntryCount
|
||||||
|
}
|
||||||
|
nodeSet := resp.Proofs.NodeSet()
|
||||||
|
headerEnc := resp.AuxData[0]
|
||||||
|
if len(headerEnc) == 0 {
|
||||||
|
return errHeaderUnavailable
|
||||||
|
}
|
||||||
|
header := new(types.Header)
|
||||||
|
if err := rlp.DecodeBytes(headerEnc, header); err != nil {
|
||||||
|
return errHeaderUnavailable
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the CHT
|
||||||
|
var encNumber [8]byte
|
||||||
|
binary.BigEndian.PutUint64(encNumber[:], r.BlockNum)
|
||||||
|
|
||||||
|
reads := &readTraceDB{db: nodeSet}
|
||||||
|
value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("merkle proof verification failed: %v", err)
|
||||||
|
}
|
||||||
|
if len(reads.reads) != nodeSet.KeyCount() {
|
||||||
|
return errUselessNodes
|
||||||
|
}
|
||||||
|
|
||||||
|
var node light.ChtNode
|
||||||
|
if err := rlp.DecodeBytes(value, &node); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if node.Hash != header.Hash() {
|
||||||
|
return errCHTHashMismatch
|
||||||
|
}
|
||||||
|
if r.BlockNum != header.Number.Uint64() {
|
||||||
|
return errCHTNumberMismatch
|
||||||
|
}
|
||||||
|
// Verifications passed, store and return
|
||||||
|
r.Header = header
|
||||||
|
r.Proof = nodeSet
|
||||||
|
r.Td = node.Td
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,8 +38,6 @@ import (
|
|||||||
|
|
||||||
type odrTestFn func(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte
|
type odrTestFn func(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte
|
||||||
|
|
||||||
func TestOdrGetBlockLes1(t *testing.T) { testOdr(t, 1, 1, odrGetBlock) }
|
|
||||||
|
|
||||||
func TestOdrGetBlockLes2(t *testing.T) { testOdr(t, 2, 1, odrGetBlock) }
|
func TestOdrGetBlockLes2(t *testing.T) { testOdr(t, 2, 1, odrGetBlock) }
|
||||||
|
|
||||||
func odrGetBlock(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
|
func odrGetBlock(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
|
||||||
@ -56,8 +54,6 @@ func odrGetBlock(ctx context.Context, db ethdb.Database, config *params.ChainCon
|
|||||||
return rlp
|
return rlp
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOdrGetReceiptsLes1(t *testing.T) { testOdr(t, 1, 1, odrGetReceipts) }
|
|
||||||
|
|
||||||
func TestOdrGetReceiptsLes2(t *testing.T) { testOdr(t, 2, 1, odrGetReceipts) }
|
func TestOdrGetReceiptsLes2(t *testing.T) { testOdr(t, 2, 1, odrGetReceipts) }
|
||||||
|
|
||||||
func odrGetReceipts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
|
func odrGetReceipts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
|
||||||
@ -78,8 +74,6 @@ func odrGetReceipts(ctx context.Context, db ethdb.Database, config *params.Chain
|
|||||||
return rlp
|
return rlp
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOdrAccountsLes1(t *testing.T) { testOdr(t, 1, 1, odrAccounts) }
|
|
||||||
|
|
||||||
func TestOdrAccountsLes2(t *testing.T) { testOdr(t, 2, 1, odrAccounts) }
|
func TestOdrAccountsLes2(t *testing.T) { testOdr(t, 2, 1, odrAccounts) }
|
||||||
|
|
||||||
func odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
|
func odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
|
||||||
@ -108,8 +102,6 @@ func odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainCon
|
|||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOdrContractCallLes1(t *testing.T) { testOdr(t, 1, 2, odrContractCall) }
|
|
||||||
|
|
||||||
func TestOdrContractCallLes2(t *testing.T) { testOdr(t, 2, 2, odrContractCall) }
|
func TestOdrContractCallLes2(t *testing.T) { testOdr(t, 2, 2, odrContractCall) }
|
||||||
|
|
||||||
type callmsg struct {
|
type callmsg struct {
|
||||||
|
73
les/peer.go
73
les/peer.go
@ -34,10 +34,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errClosed = errors.New("peer set is closed")
|
errClosed = errors.New("peer set is closed")
|
||||||
errAlreadyRegistered = errors.New("peer is already registered")
|
errAlreadyRegistered = errors.New("peer is already registered")
|
||||||
errNotRegistered = errors.New("peer is not registered")
|
errNotRegistered = errors.New("peer is not registered")
|
||||||
errInvalidHelpTrieReq = errors.New("invalid help trie request")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const maxResponseErrors = 50 // number of invalid responses tolerated (makes the protocol less brittle but still avoids spam)
|
const maxResponseErrors = 50 // number of invalid responses tolerated (makes the protocol less brittle but still avoids spam)
|
||||||
@ -244,18 +243,8 @@ func (p *peer) GetTxRelayCost(amount, size int) uint64 {
|
|||||||
p.lock.RLock()
|
p.lock.RLock()
|
||||||
defer p.lock.RUnlock()
|
defer p.lock.RUnlock()
|
||||||
|
|
||||||
var msgcode uint64
|
cost := p.fcCosts[SendTxV2Msg].baseCost + p.fcCosts[SendTxV2Msg].reqCost*uint64(amount)
|
||||||
switch p.version {
|
sizeCost := p.fcCosts[SendTxV2Msg].baseCost + p.fcCosts[SendTxV2Msg].reqCost*uint64(size)/txSizeCostLimit
|
||||||
case lpv1:
|
|
||||||
msgcode = SendTxMsg
|
|
||||||
case lpv2:
|
|
||||||
msgcode = SendTxV2Msg
|
|
||||||
default:
|
|
||||||
panic(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
cost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(amount)
|
|
||||||
sizeCost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(size)/txSizeCostLimit
|
|
||||||
if sizeCost > cost {
|
if sizeCost > cost {
|
||||||
cost = sizeCost
|
cost = sizeCost
|
||||||
}
|
}
|
||||||
@ -307,24 +296,12 @@ func (p *peer) ReplyReceiptsRLP(reqID uint64, receipts []rlp.RawValue) *reply {
|
|||||||
return &reply{p.rw, ReceiptsMsg, reqID, data}
|
return &reply{p.rw, ReceiptsMsg, reqID, data}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReplyProofs creates a reply with a batch of legacy LES/1 merkle proofs, corresponding to the ones requested.
|
|
||||||
func (p *peer) ReplyProofs(reqID uint64, proofs proofsData) *reply {
|
|
||||||
data, _ := rlp.EncodeToBytes(proofs)
|
|
||||||
return &reply{p.rw, ProofsV1Msg, reqID, data}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplyProofsV2 creates a reply with a batch of merkle proofs, corresponding to the ones requested.
|
// ReplyProofsV2 creates a reply with a batch of merkle proofs, corresponding to the ones requested.
|
||||||
func (p *peer) ReplyProofsV2(reqID uint64, proofs light.NodeList) *reply {
|
func (p *peer) ReplyProofsV2(reqID uint64, proofs light.NodeList) *reply {
|
||||||
data, _ := rlp.EncodeToBytes(proofs)
|
data, _ := rlp.EncodeToBytes(proofs)
|
||||||
return &reply{p.rw, ProofsV2Msg, reqID, data}
|
return &reply{p.rw, ProofsV2Msg, reqID, data}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReplyHeaderProofs creates a reply with a batch of legacy LES/1 header proofs, corresponding to the ones requested.
|
|
||||||
func (p *peer) ReplyHeaderProofs(reqID uint64, proofs []ChtResp) *reply {
|
|
||||||
data, _ := rlp.EncodeToBytes(proofs)
|
|
||||||
return &reply{p.rw, HeaderProofsMsg, reqID, data}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplyHelperTrieProofs creates a reply with a batch of HelperTrie proofs, corresponding to the ones requested.
|
// ReplyHelperTrieProofs creates a reply with a batch of HelperTrie proofs, corresponding to the ones requested.
|
||||||
func (p *peer) ReplyHelperTrieProofs(reqID uint64, resp HelperTrieResps) *reply {
|
func (p *peer) ReplyHelperTrieProofs(reqID uint64, resp HelperTrieResps) *reply {
|
||||||
data, _ := rlp.EncodeToBytes(resp)
|
data, _ := rlp.EncodeToBytes(resp)
|
||||||
@ -374,36 +351,13 @@ func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error {
|
|||||||
// RequestProofs fetches a batch of merkle proofs from a remote node.
|
// RequestProofs fetches a batch of merkle proofs from a remote node.
|
||||||
func (p *peer) RequestProofs(reqID, cost uint64, reqs []ProofReq) error {
|
func (p *peer) RequestProofs(reqID, cost uint64, reqs []ProofReq) error {
|
||||||
p.Log().Debug("Fetching batch of proofs", "count", len(reqs))
|
p.Log().Debug("Fetching batch of proofs", "count", len(reqs))
|
||||||
switch p.version {
|
return sendRequest(p.rw, GetProofsV2Msg, reqID, cost, reqs)
|
||||||
case lpv1:
|
|
||||||
return sendRequest(p.rw, GetProofsV1Msg, reqID, cost, reqs)
|
|
||||||
case lpv2:
|
|
||||||
return sendRequest(p.rw, GetProofsV2Msg, reqID, cost, reqs)
|
|
||||||
default:
|
|
||||||
panic(nil)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.
|
// RequestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.
|
||||||
func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, data interface{}) error {
|
func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, reqs []HelperTrieReq) error {
|
||||||
switch p.version {
|
p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs))
|
||||||
case lpv1:
|
return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, cost, reqs)
|
||||||
reqs, ok := data.([]ChtReq)
|
|
||||||
if !ok {
|
|
||||||
return errInvalidHelpTrieReq
|
|
||||||
}
|
|
||||||
p.Log().Debug("Fetching batch of header proofs", "count", len(reqs))
|
|
||||||
return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqs)
|
|
||||||
case lpv2:
|
|
||||||
reqs, ok := data.([]HelperTrieReq)
|
|
||||||
if !ok {
|
|
||||||
return errInvalidHelpTrieReq
|
|
||||||
}
|
|
||||||
p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs))
|
|
||||||
return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, cost, reqs)
|
|
||||||
default:
|
|
||||||
panic(nil)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestTxStatus fetches a batch of transaction status records from a remote node.
|
// RequestTxStatus fetches a batch of transaction status records from a remote node.
|
||||||
@ -415,14 +369,7 @@ func (p *peer) RequestTxStatus(reqID, cost uint64, txHashes []common.Hash) error
|
|||||||
// SendTxStatus creates a reply with a batch of transactions to be added to the remote transaction pool.
|
// SendTxStatus creates a reply with a batch of transactions to be added to the remote transaction pool.
|
||||||
func (p *peer) SendTxs(reqID, cost uint64, txs rlp.RawValue) error {
|
func (p *peer) SendTxs(reqID, cost uint64, txs rlp.RawValue) error {
|
||||||
p.Log().Debug("Sending batch of transactions", "size", len(txs))
|
p.Log().Debug("Sending batch of transactions", "size", len(txs))
|
||||||
switch p.version {
|
return sendRequest(p.rw, SendTxV2Msg, reqID, cost, txs)
|
||||||
case lpv1:
|
|
||||||
return p2p.Send(p.rw, SendTxMsg, txs) // old message format does not include reqID
|
|
||||||
case lpv2:
|
|
||||||
return sendRequest(p.rw, SendTxV2Msg, reqID, cost, txs)
|
|
||||||
default:
|
|
||||||
panic(nil)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type keyValueEntry struct {
|
type keyValueEntry struct {
|
||||||
|
@ -33,19 +33,18 @@ import (
|
|||||||
|
|
||||||
// Constants to match up protocol versions and messages
|
// Constants to match up protocol versions and messages
|
||||||
const (
|
const (
|
||||||
lpv1 = 1
|
|
||||||
lpv2 = 2
|
lpv2 = 2
|
||||||
)
|
)
|
||||||
|
|
||||||
// Supported versions of the les protocol (first is primary)
|
// Supported versions of the les protocol (first is primary)
|
||||||
var (
|
var (
|
||||||
ClientProtocolVersions = []uint{lpv2, lpv1}
|
ClientProtocolVersions = []uint{lpv2}
|
||||||
ServerProtocolVersions = []uint{lpv2, lpv1}
|
ServerProtocolVersions = []uint{lpv2}
|
||||||
AdvertiseProtocolVersions = []uint{lpv2} // clients are searching for the first advertised protocol in the list
|
AdvertiseProtocolVersions = []uint{lpv2} // clients are searching for the first advertised protocol in the list
|
||||||
)
|
)
|
||||||
|
|
||||||
// Number of implemented message corresponding to different protocol versions.
|
// Number of implemented message corresponding to different protocol versions.
|
||||||
var ProtocolLengths = map[uint]uint64{lpv1: 15, lpv2: 22}
|
var ProtocolLengths = map[uint]uint64{lpv2: 22}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
NetworkId = 1
|
NetworkId = 1
|
||||||
@ -54,7 +53,7 @@ const (
|
|||||||
|
|
||||||
// les protocol message codes
|
// les protocol message codes
|
||||||
const (
|
const (
|
||||||
// Protocol messages belonging to LPV1
|
// Protocol messages inherited from LPV1
|
||||||
StatusMsg = 0x00
|
StatusMsg = 0x00
|
||||||
AnnounceMsg = 0x01
|
AnnounceMsg = 0x01
|
||||||
GetBlockHeadersMsg = 0x02
|
GetBlockHeadersMsg = 0x02
|
||||||
@ -63,14 +62,9 @@ const (
|
|||||||
BlockBodiesMsg = 0x05
|
BlockBodiesMsg = 0x05
|
||||||
GetReceiptsMsg = 0x06
|
GetReceiptsMsg = 0x06
|
||||||
ReceiptsMsg = 0x07
|
ReceiptsMsg = 0x07
|
||||||
GetProofsV1Msg = 0x08
|
|
||||||
ProofsV1Msg = 0x09
|
|
||||||
GetCodeMsg = 0x0a
|
GetCodeMsg = 0x0a
|
||||||
CodeMsg = 0x0b
|
CodeMsg = 0x0b
|
||||||
SendTxMsg = 0x0c
|
// Protocol messages introduced in LPV2
|
||||||
GetHeaderProofsMsg = 0x0d
|
|
||||||
HeaderProofsMsg = 0x0e
|
|
||||||
// Protocol messages belonging to LPV2
|
|
||||||
GetProofsV2Msg = 0x0f
|
GetProofsV2Msg = 0x0f
|
||||||
ProofsV2Msg = 0x10
|
ProofsV2Msg = 0x10
|
||||||
GetHelperTrieProofsMsg = 0x11
|
GetHelperTrieProofsMsg = 0x11
|
||||||
@ -89,10 +83,7 @@ var requests = map[uint64]requestInfo{
|
|||||||
GetBlockHeadersMsg: {"GetBlockHeaders", MaxHeaderFetch},
|
GetBlockHeadersMsg: {"GetBlockHeaders", MaxHeaderFetch},
|
||||||
GetBlockBodiesMsg: {"GetBlockBodies", MaxBodyFetch},
|
GetBlockBodiesMsg: {"GetBlockBodies", MaxBodyFetch},
|
||||||
GetReceiptsMsg: {"GetReceipts", MaxReceiptFetch},
|
GetReceiptsMsg: {"GetReceipts", MaxReceiptFetch},
|
||||||
GetProofsV1Msg: {"GetProofsV1", MaxProofsFetch},
|
|
||||||
GetCodeMsg: {"GetCode", MaxCodeFetch},
|
GetCodeMsg: {"GetCode", MaxCodeFetch},
|
||||||
SendTxMsg: {"SendTx", MaxTxSend},
|
|
||||||
GetHeaderProofsMsg: {"GetHeaderProofs", MaxHelperTrieProofsFetch},
|
|
||||||
GetProofsV2Msg: {"GetProofsV2", MaxProofsFetch},
|
GetProofsV2Msg: {"GetProofsV2", MaxProofsFetch},
|
||||||
GetHelperTrieProofsMsg: {"GetHelperTrieProofs", MaxHelperTrieProofsFetch},
|
GetHelperTrieProofsMsg: {"GetHelperTrieProofs", MaxHelperTrieProofsFetch},
|
||||||
SendTxV2Msg: {"SendTxV2", MaxTxSend},
|
SendTxV2Msg: {"SendTxV2", MaxTxSend},
|
||||||
|
@ -36,24 +36,18 @@ func secAddr(addr common.Address) []byte {
|
|||||||
|
|
||||||
type accessTestFn func(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest
|
type accessTestFn func(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest
|
||||||
|
|
||||||
func TestBlockAccessLes1(t *testing.T) { testAccess(t, 1, tfBlockAccess) }
|
|
||||||
|
|
||||||
func TestBlockAccessLes2(t *testing.T) { testAccess(t, 2, tfBlockAccess) }
|
func TestBlockAccessLes2(t *testing.T) { testAccess(t, 2, tfBlockAccess) }
|
||||||
|
|
||||||
func tfBlockAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
|
func tfBlockAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
|
||||||
return &light.BlockRequest{Hash: bhash, Number: number}
|
return &light.BlockRequest{Hash: bhash, Number: number}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReceiptsAccessLes1(t *testing.T) { testAccess(t, 1, tfReceiptsAccess) }
|
|
||||||
|
|
||||||
func TestReceiptsAccessLes2(t *testing.T) { testAccess(t, 2, tfReceiptsAccess) }
|
func TestReceiptsAccessLes2(t *testing.T) { testAccess(t, 2, tfReceiptsAccess) }
|
||||||
|
|
||||||
func tfReceiptsAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
|
func tfReceiptsAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
|
||||||
return &light.ReceiptsRequest{Hash: bhash, Number: number}
|
return &light.ReceiptsRequest{Hash: bhash, Number: number}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTrieEntryAccessLes1(t *testing.T) { testAccess(t, 1, tfTrieEntryAccess) }
|
|
||||||
|
|
||||||
func TestTrieEntryAccessLes2(t *testing.T) { testAccess(t, 2, tfTrieEntryAccess) }
|
func TestTrieEntryAccessLes2(t *testing.T) { testAccess(t, 2, tfTrieEntryAccess) }
|
||||||
|
|
||||||
func tfTrieEntryAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
|
func tfTrieEntryAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
|
||||||
@ -63,8 +57,6 @@ func tfTrieEntryAccess(db ethdb.Database, bhash common.Hash, number uint64) ligh
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCodeAccessLes1(t *testing.T) { testAccess(t, 1, tfCodeAccess) }
|
|
||||||
|
|
||||||
func TestCodeAccessLes2(t *testing.T) { testAccess(t, 2, tfCodeAccess) }
|
func TestCodeAccessLes2(t *testing.T) { testAccess(t, 2, tfCodeAccess) }
|
||||||
|
|
||||||
func tfCodeAccess(db ethdb.Database, bhash common.Hash, num uint64) light.OdrRequest {
|
func tfCodeAccess(db ethdb.Database, bhash common.Hash, num uint64) light.OdrRequest {
|
||||||
|
@ -89,7 +89,7 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
|||||||
config: config,
|
config: config,
|
||||||
chainDb: eth.ChainDb(),
|
chainDb: eth.ChainDb(),
|
||||||
iConfig: light.DefaultServerIndexerConfig,
|
iConfig: light.DefaultServerIndexerConfig,
|
||||||
chtIndexer: light.NewChtIndexer(eth.ChainDb(), nil, params.CHTFrequencyServer, params.HelperTrieProcessConfirmations),
|
chtIndexer: light.NewChtIndexer(eth.ChainDb(), nil, params.CHTFrequency, params.HelperTrieProcessConfirmations),
|
||||||
bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency),
|
bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency),
|
||||||
protocolManager: pm,
|
protocolManager: pm,
|
||||||
},
|
},
|
||||||
@ -108,15 +108,11 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
|||||||
srv.thcBlockProcessing = config.LightServ/100 + 1
|
srv.thcBlockProcessing = config.LightServ/100 + 1
|
||||||
srv.fcManager = flowcontrol.NewClientManager(nil, &mclock.System{})
|
srv.fcManager = flowcontrol.NewClientManager(nil, &mclock.System{})
|
||||||
|
|
||||||
chtV1SectionCount, _, _ := srv.chtIndexer.Sections() // indexer still uses LES/1 4k section size for backwards server compatibility
|
chtSectionCount, _, _ := srv.chtIndexer.Sections()
|
||||||
chtV2SectionCount := chtV1SectionCount / (params.CHTFrequencyClient / params.CHTFrequencyServer)
|
if chtSectionCount != 0 {
|
||||||
if chtV2SectionCount != 0 {
|
chtLastSection := chtSectionCount - 1
|
||||||
// convert to LES/2 section
|
chtSectionHead := srv.chtIndexer.SectionHead(chtLastSection)
|
||||||
chtLastSection := chtV2SectionCount - 1
|
chtRoot := light.GetChtRoot(pm.chainDb, chtLastSection, chtSectionHead)
|
||||||
// convert last LES/2 section index back to LES/1 index for chtIndexer.SectionHead
|
|
||||||
chtLastSectionV1 := (chtLastSection+1)*(params.CHTFrequencyClient/params.CHTFrequencyServer) - 1
|
|
||||||
chtSectionHead := srv.chtIndexer.SectionHead(chtLastSectionV1)
|
|
||||||
chtRoot := light.GetChtRoot(pm.chainDb, chtLastSectionV1, chtSectionHead)
|
|
||||||
logger.Info("Loaded CHT", "section", chtLastSection, "head", chtSectionHead, "root", chtRoot)
|
logger.Info("Loaded CHT", "section", chtLastSection, "head", chtSectionHead, "root", chtRoot)
|
||||||
}
|
}
|
||||||
bloomTrieSectionCount, _, _ := srv.bloomTrieIndexer.Sections()
|
bloomTrieSectionCount, _, _ := srv.bloomTrieIndexer.Sections()
|
||||||
|
@ -99,7 +99,7 @@ func (odr *testOdr) IndexerConfig() *IndexerConfig {
|
|||||||
|
|
||||||
type odrTestFn func(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error)
|
type odrTestFn func(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error)
|
||||||
|
|
||||||
func TestOdrGetBlockLes1(t *testing.T) { testChainOdr(t, 1, odrGetBlock) }
|
func TestOdrGetBlockLes2(t *testing.T) { testChainOdr(t, 1, odrGetBlock) }
|
||||||
|
|
||||||
func odrGetBlock(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) {
|
func odrGetBlock(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) {
|
||||||
var block *types.Block
|
var block *types.Block
|
||||||
@ -115,7 +115,7 @@ func odrGetBlock(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc
|
|||||||
return rlp, nil
|
return rlp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOdrGetReceiptsLes1(t *testing.T) { testChainOdr(t, 1, odrGetReceipts) }
|
func TestOdrGetReceiptsLes2(t *testing.T) { testChainOdr(t, 1, odrGetReceipts) }
|
||||||
|
|
||||||
func odrGetReceipts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) {
|
func odrGetReceipts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) {
|
||||||
var receipts types.Receipts
|
var receipts types.Receipts
|
||||||
@ -137,7 +137,7 @@ func odrGetReceipts(ctx context.Context, db ethdb.Database, bc *core.BlockChain,
|
|||||||
return rlp, nil
|
return rlp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOdrAccountsLes1(t *testing.T) { testChainOdr(t, 1, odrAccounts) }
|
func TestOdrAccountsLes2(t *testing.T) { testChainOdr(t, 1, odrAccounts) }
|
||||||
|
|
||||||
func odrAccounts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) {
|
func odrAccounts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) {
|
||||||
dummyAddr := common.HexToAddress("1234567812345678123456781234567812345678")
|
dummyAddr := common.HexToAddress("1234567812345678123456781234567812345678")
|
||||||
@ -161,7 +161,7 @@ func odrAccounts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc
|
|||||||
return res, st.Error()
|
return res, st.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOdrContractCallLes1(t *testing.T) { testChainOdr(t, 1, odrContractCall) }
|
func TestOdrContractCallLes2(t *testing.T) { testChainOdr(t, 1, odrContractCall) }
|
||||||
|
|
||||||
type callmsg struct {
|
type callmsg struct {
|
||||||
types.Message
|
types.Message
|
||||||
|
@ -41,9 +41,6 @@ type IndexerConfig struct {
|
|||||||
// The block frequency for creating CHTs.
|
// The block frequency for creating CHTs.
|
||||||
ChtSize uint64
|
ChtSize uint64
|
||||||
|
|
||||||
// A special auxiliary field represents client's chtsize for server config, otherwise represents server's chtsize.
|
|
||||||
PairChtSize uint64
|
|
||||||
|
|
||||||
// The number of confirmations needed to generate/accept a canonical hash help trie.
|
// The number of confirmations needed to generate/accept a canonical hash help trie.
|
||||||
ChtConfirms uint64
|
ChtConfirms uint64
|
||||||
|
|
||||||
@ -64,8 +61,7 @@ type IndexerConfig struct {
|
|||||||
var (
|
var (
|
||||||
// DefaultServerIndexerConfig wraps a set of configs as a default indexer config for server side.
|
// DefaultServerIndexerConfig wraps a set of configs as a default indexer config for server side.
|
||||||
DefaultServerIndexerConfig = &IndexerConfig{
|
DefaultServerIndexerConfig = &IndexerConfig{
|
||||||
ChtSize: params.CHTFrequencyServer,
|
ChtSize: params.CHTFrequency,
|
||||||
PairChtSize: params.CHTFrequencyClient,
|
|
||||||
ChtConfirms: params.HelperTrieProcessConfirmations,
|
ChtConfirms: params.HelperTrieProcessConfirmations,
|
||||||
BloomSize: params.BloomBitsBlocks,
|
BloomSize: params.BloomBitsBlocks,
|
||||||
BloomConfirms: params.BloomConfirms,
|
BloomConfirms: params.BloomConfirms,
|
||||||
@ -74,8 +70,7 @@ var (
|
|||||||
}
|
}
|
||||||
// DefaultClientIndexerConfig wraps a set of configs as a default indexer config for client side.
|
// DefaultClientIndexerConfig wraps a set of configs as a default indexer config for client side.
|
||||||
DefaultClientIndexerConfig = &IndexerConfig{
|
DefaultClientIndexerConfig = &IndexerConfig{
|
||||||
ChtSize: params.CHTFrequencyClient,
|
ChtSize: params.CHTFrequency,
|
||||||
PairChtSize: params.CHTFrequencyServer,
|
|
||||||
ChtConfirms: params.HelperTrieConfirmations,
|
ChtConfirms: params.HelperTrieConfirmations,
|
||||||
BloomSize: params.BloomBitsBlocksClient,
|
BloomSize: params.BloomBitsBlocksClient,
|
||||||
BloomConfirms: params.HelperTrieConfirmations,
|
BloomConfirms: params.HelperTrieConfirmations,
|
||||||
@ -84,8 +79,7 @@ var (
|
|||||||
}
|
}
|
||||||
// TestServerIndexerConfig wraps a set of configs as a test indexer config for server side.
|
// TestServerIndexerConfig wraps a set of configs as a test indexer config for server side.
|
||||||
TestServerIndexerConfig = &IndexerConfig{
|
TestServerIndexerConfig = &IndexerConfig{
|
||||||
ChtSize: 64,
|
ChtSize: 512,
|
||||||
PairChtSize: 512,
|
|
||||||
ChtConfirms: 4,
|
ChtConfirms: 4,
|
||||||
BloomSize: 64,
|
BloomSize: 64,
|
||||||
BloomConfirms: 4,
|
BloomConfirms: 4,
|
||||||
@ -95,7 +89,6 @@ var (
|
|||||||
// TestClientIndexerConfig wraps a set of configs as a test indexer config for client side.
|
// TestClientIndexerConfig wraps a set of configs as a test indexer config for client side.
|
||||||
TestClientIndexerConfig = &IndexerConfig{
|
TestClientIndexerConfig = &IndexerConfig{
|
||||||
ChtSize: 512,
|
ChtSize: 512,
|
||||||
PairChtSize: 64,
|
|
||||||
ChtConfirms: 32,
|
ChtConfirms: 32,
|
||||||
BloomSize: 512,
|
BloomSize: 512,
|
||||||
BloomConfirms: 32,
|
BloomConfirms: 32,
|
||||||
@ -116,7 +109,7 @@ var (
|
|||||||
ErrNoTrustedCht = errors.New("no trusted canonical hash trie")
|
ErrNoTrustedCht = errors.New("no trusted canonical hash trie")
|
||||||
ErrNoTrustedBloomTrie = errors.New("no trusted bloom trie")
|
ErrNoTrustedBloomTrie = errors.New("no trusted bloom trie")
|
||||||
ErrNoHeader = errors.New("header not found")
|
ErrNoHeader = errors.New("header not found")
|
||||||
chtPrefix = []byte("chtRoot-") // chtPrefix + chtNum (uint64 big endian) -> trie root hash
|
chtPrefix = []byte("chtRootV2-") // chtPrefix + chtNum (uint64 big endian) -> trie root hash
|
||||||
ChtTablePrefix = "cht-"
|
ChtTablePrefix = "cht-"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -127,7 +120,6 @@ type ChtNode struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetChtRoot reads the CHT root associated to the given section from the database
|
// GetChtRoot reads the CHT root associated to the given section from the database
|
||||||
// Note that sectionIdx is specified according to LES/1 CHT section size.
|
|
||||||
func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
|
func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
|
||||||
var encNumber [8]byte
|
var encNumber [8]byte
|
||||||
binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
|
binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
|
||||||
@ -136,7 +128,6 @@ func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) c
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StoreChtRoot writes the CHT root associated to the given section into the database
|
// StoreChtRoot writes the CHT root associated to the given section into the database
|
||||||
// Note that sectionIdx is specified according to LES/1 CHT section size.
|
|
||||||
func StoreChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) {
|
func StoreChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) {
|
||||||
var encNumber [8]byte
|
var encNumber [8]byte
|
||||||
binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
|
binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
|
||||||
@ -163,7 +154,7 @@ func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64) *co
|
|||||||
triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down
|
triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down
|
||||||
sectionSize: size,
|
sectionSize: size,
|
||||||
}
|
}
|
||||||
return core.NewChainIndexer(db, rawdb.NewTable(db, "chtIndex-"), backend, size, confirms, time.Millisecond*100, "cht")
|
return core.NewChainIndexer(db, rawdb.NewTable(db, "chtIndexV2-"), backend, size, confirms, time.Millisecond*100, "cht")
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetchMissingNodes tries to retrieve the last entry of the latest trusted CHT from the
|
// fetchMissingNodes tries to retrieve the last entry of the latest trusted CHT from the
|
||||||
@ -235,9 +226,7 @@ func (c *ChtIndexerBackend) Commit() error {
|
|||||||
}
|
}
|
||||||
c.triedb.Commit(root, false)
|
c.triedb.Commit(root, false)
|
||||||
|
|
||||||
if ((c.section+1)*c.sectionSize)%params.CHTFrequencyClient == 0 {
|
log.Info("Storing CHT", "section", c.section, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
|
||||||
log.Info("Storing CHT", "section", c.section*c.sectionSize/params.CHTFrequencyClient, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
|
|
||||||
}
|
|
||||||
StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
|
StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -32,13 +32,8 @@ const (
|
|||||||
// considered probably final and its rotated bits are calculated.
|
// considered probably final and its rotated bits are calculated.
|
||||||
BloomConfirms = 256
|
BloomConfirms = 256
|
||||||
|
|
||||||
// CHTFrequencyClient is the block frequency for creating CHTs on the client side.
|
// CHTFrequency is the block frequency for creating CHTs
|
||||||
CHTFrequencyClient = 32768
|
CHTFrequency = 32768
|
||||||
|
|
||||||
// CHTFrequencyServer is the block frequency for creating CHTs on the server side.
|
|
||||||
// Eventually this can be merged back with the client version, but that requires a
|
|
||||||
// full database upgrade, so that should be left for a suitable moment.
|
|
||||||
CHTFrequencyServer = 4096
|
|
||||||
|
|
||||||
// BloomTrieFrequency is the block frequency for creating BloomTrie on both
|
// BloomTrieFrequency is the block frequency for creating BloomTrie on both
|
||||||
// server/client sides.
|
// server/client sides.
|
||||||
|
Loading…
Reference in New Issue
Block a user