eth/downloader: move the pivot in beacon sync mode too (#26453)

In legacy (pre-merge) sync mode, headers were contiguously downloaded from the network and when no more headers were available, we checked every few seconds whether there are 64 new blocks to move the pivot.

In beacon (post-merge) sync mode, we don't need to check for new skeleton headers non stop, since those re delivered one by one by the engine API. The missing code snippet from the header fetcher was to actually look at the latest head and move the pivot if it was more than 2*64-8 away. This PR adds the missing movement logic.
This commit is contained in:
Péter Szilágyi 2023-01-10 19:16:54 +02:00 committed by GitHub
parent 2c6dda5ad7
commit 452a12aa79
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -23,6 +23,7 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
@ -270,7 +271,8 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) {
// fetchBeaconHeaders feeds skeleton headers to the downloader queue for scheduling // fetchBeaconHeaders feeds skeleton headers to the downloader queue for scheduling
// until sync errors or is finished. // until sync errors or is finished.
func (d *Downloader) fetchBeaconHeaders(from uint64) error { func (d *Downloader) fetchBeaconHeaders(from uint64) error {
head, tail, err := d.skeleton.Bounds() var head *types.Header
_, tail, err := d.skeleton.Bounds()
if err != nil { if err != nil {
return err return err
} }
@ -288,6 +290,47 @@ func (d *Downloader) fetchBeaconHeaders(from uint64) error {
log.Warn("Retrieved beacon headers from local", "from", from, "count", count) log.Warn("Retrieved beacon headers from local", "from", from, "count", count)
} }
for { for {
// Some beacon headers might have appeared since the last cycle, make
// sure we're always syncing to all available ones
head, _, err = d.skeleton.Bounds()
if err != nil {
return err
}
// If the pivot became stale (older than 2*64-8 (bit of wiggle room)),
// move it ahead to HEAD-64
d.pivotLock.Lock()
if d.pivotHeader != nil {
if head.Number.Uint64() > d.pivotHeader.Number.Uint64()+2*uint64(fsMinFullBlocks)-8 {
// Retrieve the next pivot header, either from skeleton chain
// or the filled chain
number := head.Number.Uint64() - uint64(fsMinFullBlocks)
log.Warn("Pivot seemingly stale, moving", "old", d.pivotHeader.Number, "new", number)
if d.pivotHeader = d.skeleton.Header(number); d.pivotHeader == nil {
if number < tail.Number.Uint64() {
dist := tail.Number.Uint64() - number
if len(localHeaders) >= int(dist) {
d.pivotHeader = localHeaders[dist-1]
log.Warn("Retrieved pivot header from local", "number", d.pivotHeader.Number, "hash", d.pivotHeader.Hash(), "latest", head.Number, "oldest", tail.Number)
}
}
}
// Print an error log and return directly in case the pivot header
// is still not found. It means the skeleton chain is not linked
// correctly with local chain.
if d.pivotHeader == nil {
log.Error("Pivot header is not found", "number", number)
d.pivotLock.Unlock()
return errNoPivotHeader
}
// Write out the pivot into the database so a rollback beyond
// it will reenable snap sync and update the state root that
// the state syncer will be downloading
rawdb.WriteLastPivotNumber(d.stateDB, d.pivotHeader.Number.Uint64())
}
}
d.pivotLock.Unlock()
// Retrieve a batch of headers and feed it to the header processor // Retrieve a batch of headers and feed it to the header processor
var ( var (
headers = make([]*types.Header, 0, maxHeadersProcess) headers = make([]*types.Header, 0, maxHeadersProcess)
@ -343,9 +386,5 @@ func (d *Downloader) fetchBeaconHeaders(from uint64) error {
case <-d.cancelCh: case <-d.cancelCh:
return errCanceled return errCanceled
} }
head, _, err = d.skeleton.Bounds()
if err != nil {
return err
}
} }
} }