Avoid repeteadly updating blob info for multiple head candidates
This commit is contained in:
parent
d58a30b3de
commit
fb2ce909f6
@ -50,7 +50,6 @@ use crate::persisted_fork_choice::PersistedForkChoice;
|
||||
use crate::pre_finalization_cache::PreFinalizationBlockCache;
|
||||
use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache};
|
||||
use crate::snapshot_cache::{BlockProductionPreState, SnapshotCache};
|
||||
use crate::store::Split;
|
||||
use crate::sync_committee_verification::{
|
||||
Error as SyncCommitteeError, VerifiedSyncCommitteeMessage, VerifiedSyncContribution,
|
||||
};
|
||||
@ -3029,33 +3028,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
}
|
||||
}
|
||||
|
||||
if Some(current_epoch)
|
||||
> self.spec.eip4844_fork_epoch.map(|eip4844_fork_epoch| {
|
||||
eip4844_fork_epoch + *MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS
|
||||
})
|
||||
{
|
||||
let current_epoch_start_slot = current_epoch.start_slot(T::EthSpec::slots_per_epoch());
|
||||
|
||||
// Update db's metadata for blobs pruning.
|
||||
if current_slot == current_epoch_start_slot {
|
||||
if let Some(mut blob_info) = self.store.get_blob_info() {
|
||||
if let Some(data_availability_boundary) = self.data_availability_boundary() {
|
||||
let dab_slot =
|
||||
data_availability_boundary.end_slot(T::EthSpec::slots_per_epoch());
|
||||
if let Some(dab_state_root) = self.state_root_at_slot(dab_slot)? {
|
||||
blob_info.data_availability_boundary =
|
||||
Split::new(dab_slot, dab_state_root);
|
||||
|
||||
self.store.compare_and_set_blob_info_with_write(
|
||||
self.store.get_blob_info(),
|
||||
Some(blob_info),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let txn_lock = self.store.hot_db.begin_rw_transaction();
|
||||
|
||||
kv_store_ops.extend(self.store.convert_to_kv_batch(ops)?);
|
||||
|
@ -54,8 +54,9 @@ use slog::{crit, debug, error, warn, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use store::{iter::StateRootsIterator, KeyValueStoreOp, StoreItem};
|
||||
use store::{iter::StateRootsIterator, KeyValueStoreOp, Split, StoreItem};
|
||||
use task_executor::{JoinHandle, ShutdownReason};
|
||||
use types::consts::eip4844::MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS;
|
||||
use types::*;
|
||||
|
||||
/// Simple wrapper around `RwLock` that uses private visibility to prevent any other modules from
|
||||
@ -794,8 +795,40 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.is_optimistic_or_invalid();
|
||||
|
||||
if self.store.get_config().prune_blobs {
|
||||
let current_slot = self.slot()?;
|
||||
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
|
||||
if Some(current_epoch)
|
||||
> self.spec.eip4844_fork_epoch.map(|eip4844_fork_epoch| {
|
||||
eip4844_fork_epoch + *MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS
|
||||
})
|
||||
{
|
||||
let current_epoch_start_slot =
|
||||
current_epoch.start_slot(T::EthSpec::slots_per_epoch());
|
||||
|
||||
// Update db's metadata for blobs pruning.
|
||||
if current_slot == current_epoch_start_slot {
|
||||
if let Some(mut blob_info) = self.store.get_blob_info() {
|
||||
if let Some(data_availability_boundary) = self.data_availability_boundary()
|
||||
{
|
||||
let dab_slot =
|
||||
data_availability_boundary.end_slot(T::EthSpec::slots_per_epoch());
|
||||
if let Some(dab_state_root) = self.state_root_at_slot(dab_slot)? {
|
||||
blob_info.data_availability_boundary =
|
||||
Split::new(dab_slot, dab_state_root);
|
||||
|
||||
self.store.compare_and_set_blob_info_with_write(
|
||||
self.store.get_blob_info(),
|
||||
Some(blob_info),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let store = self.store.clone();
|
||||
let log = self.log.clone();
|
||||
|
||||
self.task_executor.spawn_blocking(
|
||||
move || {
|
||||
if let Err(e) = store.try_prune_blobs(false) {
|
||||
|
Loading…
Reference in New Issue
Block a user