Fix Block Cache Range Math for Faster Syncing (#3358)
## Issue Addressed While messing with the deposit snapshot stuff, I had my proxy running and noticed the beacon node wasn't syncing the block cache continuously. There were long periods where it did nothing. I believe this was caused by a logical error introduced in #3234 that dealt with an issue that arose while syncing the block cache on Ropsten. The problem is that when the block cache is initially syncing, it will trigger the logic that detects the cache is far behind the execution chain in time. This will trigger a batch syncing mechanism which is intended to sync further ahead than the chain would normally. But the batch syncing is actually slower than the range this function usually estimates (in this scenario). ## Proposed Changes I believe I've fixed this function by taking the end of the range to be the maximum of (batch syncing range, usual range). I've also renamed and restructured some things a bit. It's equivalent logic but I think it's more clear what's going on.
This commit is contained in:
parent
20ebf1f3c1
commit
f7354abe0f
@ -908,11 +908,12 @@ impl Service {
|
||||
/// Returns the range of new block numbers to be considered for the given head type.
|
||||
fn relevant_new_block_numbers(
|
||||
&self,
|
||||
remote_highest_block: u64,
|
||||
remote_highest_block_number: u64,
|
||||
remote_highest_block_timestamp: Option<u64>,
|
||||
head_type: HeadType,
|
||||
) -> Result<Option<RangeInclusive<u64>>, SingleEndpointError> {
|
||||
let follow_distance = self.cache_follow_distance();
|
||||
let latest_cached_block = self.latest_cached_block();
|
||||
let next_required_block = match head_type {
|
||||
HeadType::Deposit => self
|
||||
.deposits()
|
||||
@ -920,18 +921,14 @@ impl Service {
|
||||
.last_processed_block
|
||||
.map(|n| n + 1)
|
||||
.unwrap_or_else(|| self.config().deposit_contract_deploy_block),
|
||||
HeadType::BlockCache => self
|
||||
.inner
|
||||
.block_cache
|
||||
.read()
|
||||
.highest_block_number()
|
||||
.map(|n| n + 1)
|
||||
HeadType::BlockCache => latest_cached_block
|
||||
.as_ref()
|
||||
.map(|block| block.number + 1)
|
||||
.unwrap_or_else(|| self.config().lowest_cached_block_number),
|
||||
};
|
||||
let latest_cached_block = self.latest_cached_block();
|
||||
|
||||
relevant_block_range(
|
||||
remote_highest_block,
|
||||
remote_highest_block_number,
|
||||
remote_highest_block_timestamp,
|
||||
next_required_block,
|
||||
follow_distance,
|
||||
@ -1293,9 +1290,12 @@ fn relevant_block_range(
|
||||
let lagging = latest_cached_block.timestamp
|
||||
+ cache_follow_distance * spec.seconds_per_eth1_block
|
||||
< remote_highest_block_timestamp;
|
||||
let end_block = std::cmp::min(
|
||||
remote_highest_block_number.saturating_sub(CATCHUP_MIN_FOLLOW_DISTANCE),
|
||||
next_required_block + CATCHUP_BATCH_SIZE,
|
||||
let end_block = std::cmp::max(
|
||||
std::cmp::min(
|
||||
remote_highest_block_number.saturating_sub(CATCHUP_MIN_FOLLOW_DISTANCE),
|
||||
next_required_block + CATCHUP_BATCH_SIZE,
|
||||
),
|
||||
remote_highest_block_number.saturating_sub(cache_follow_distance),
|
||||
);
|
||||
if lagging && next_required_block <= end_block {
|
||||
return Ok(Some(next_required_block..=end_block));
|
||||
|
Loading…
Reference in New Issue
Block a user