Fix rebase conflict

This commit is contained in:
Emilia Hane 2023-02-06 11:01:09 +01:00
parent a211e6afee
commit ce2db355de
No known key found for this signature in database
GPG Key ID: E73394F9C09206FA
7 changed files with 54 additions and 27 deletions

View File

@ -1074,24 +1074,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
) -> Result<Option<BlobsSidecar<T::EthSpec>>, Error> { ) -> Result<Option<BlobsSidecar<T::EthSpec>>, Error> {
match self.store.get_blobs(block_root)? { match self.store.get_blobs(block_root)? {
Some(blobs) => Ok(Some(blobs)), Some(blobs) => Ok(Some(blobs)),
<<<<<<< HEAD
None => { None => {
// Check for the corresponding block to understand whether we *should* have blobs. // Check for the corresponding block to understand whether we *should* have blobs.
self.get_blinded_block(block_root)? self.get_blinded_block(block_root)?
.map(|block| { .map(|block| {
// If there are no KZG commitments in the block, we know the sidecar should // If there are no KZG commitments in the block, we know the sidecar should
// be empty. // be empty.
=======
None => match self.get_blinded_block(block_root)? {
Some(block) => {
let current_slot = self.slot()?;
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
if block.slot().epoch(T::EthSpec::slots_per_epoch())
+ *MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS
>= current_epoch
{
>>>>>>> 292426505 (Improve syntax)
let expected_kzg_commitments = let expected_kzg_commitments =
match block.message().body().blob_kzg_commitments() { match block.message().body().blob_kzg_commitments() {
Ok(kzg_commitments) => kzg_commitments, Ok(kzg_commitments) => kzg_commitments,
@ -3027,21 +3015,34 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutBlock(block_root, signed_block.clone()));
ops.push(StoreOp::PutState(block.state_root(), &state)); ops.push(StoreOp::PutState(block.state_root(), &state));
if let Some(blobs) = blobs { let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch());
// Only store blobs that haven't passed the data availability boundary.
if Some(block_epoch) >= self.data_availability_boundary() {
if let Some(blobs) = blobs? {
if blobs.blobs.len() > 0 { if blobs.blobs.len() > 0 {
//FIXME(sean) using this for debugging for now //FIXME(sean) using this for debugging for now
info!(self.log, "Writing blobs to store"; "block_root" => ?block_root); info!(self.log, "Writing blobs to store"; "block_root" => ?block_root);
ops.push(StoreOp::PutBlobs(block_root, blobs)); ops.push(StoreOp::PutBlobs(block_root, blobs));
} }
}
}
if Some(current_epoch)
>= self.spec.eip4844_fork_epoch.map(|eip4844_fork_epoch| {
eip4844_fork_epoch + *MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS
})
{
let current_epoch_start_slot = current_epoch.start_slot(T::EthSpec::slots_per_epoch());
// Update db's metadata for blobs pruning. // Update db's metadata for blobs pruning.
if current_slot == current_epoch.start_slot(T::EthSpec::slots_per_epoch()) { if current_slot == current_epoch_start_slot {
if let Some(mut blob_info) = self.store.get_blob_info() { if let Some(mut blob_info) = self.store.get_blob_info() {
let next_epoch_to_prune = // Pruning enabled until data availability boundary.
blob_info.last_pruned_epoch + *MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS; if let Some(data_availability_boundary) = self.data_availability_boundary() {
blob_info.data_availability_boundary = self.state_root_at_slot(
if current_epoch > next_epoch_to_prune { data_availability_boundary.start_slot(T::EthSpec::slots_per_epoch()),
blob_info.data_availability_breakpoint = Some(block_root); )?;
self.store.compare_and_set_blob_info_with_write( self.store.compare_and_set_blob_info_with_write(
self.store.get_blob_info(), self.store.get_blob_info(),
Some(blob_info), Some(blob_info),
@ -3049,7 +3050,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
} }
} }
} }
}; }
let txn_lock = self.store.hot_db.begin_rw_transaction(); let txn_lock = self.store.hot_db.begin_rw_transaction();
kv_store_ops.extend(self.store.convert_to_kv_batch(ops)?); kv_store_ops.extend(self.store.convert_to_kv_batch(ops)?);

View File

@ -551,6 +551,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.takes_value(true) .takes_value(true)
.default_value("true") .default_value("true")
) )
.arg(
Arg::with_name("prune-blobs")
.long("prune-blobs")
.help("Prune blobs from Lighthouse's database when they are older than the data \
data availability boundary relative to the current head.")
.takes_value(true)
.default_value("true")
)
/* /*
* Misc. * Misc.

View File

@ -411,6 +411,10 @@ pub fn get_config<E: EthSpec>(
client_config.store.prune_payloads = prune_payloads; client_config.store.prune_payloads = prune_payloads;
} }
if let Some(prune_blobs) = clap_utils::parse_optional(cli_args, "prune-blobs")? {
client_config.store.prune_blobs = prune_blobs;
}
/* /*
* Zero-ports * Zero-ports
* *

View File

@ -20,7 +20,7 @@ pub enum Error {
RlpError(String), RlpError(String),
BlockNotFound(Hash256), BlockNotFound(Hash256),
/// The blobs sidecar mapping to this block root is older than the data availability boundary. /// The blobs sidecar mapping to this block root is older than the data availability boundary.
BlobsTooOld(Hash256), BlobsTooOld(Hash256, Slot),
NoContinuationData, NoContinuationData,
SplitPointModified(Slot, Slot), SplitPointModified(Slot, Slot),
ConfigError(StoreConfigError), ConfigError(StoreConfigError),

View File

@ -1705,7 +1705,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
let data_availability_breakpoint: Hash256; let data_availability_breakpoint: Hash256;
match blob_info.data_availability_breakpoint { match blob_info.data_availability_boundary {
Some(breakpoint) => { Some(breakpoint) => {
if breakpoint == blob_info.oldest_blob_parent { if breakpoint == blob_info.oldest_blob_parent {
return Ok(()); return Ok(());

View File

@ -125,7 +125,7 @@ pub struct BlobInfo {
/// The latest epoch that blobs were pruned. /// The latest epoch that blobs were pruned.
pub last_pruned_epoch: Epoch, pub last_pruned_epoch: Epoch,
/// The block root of the next blobs to prune from. /// The block root of the next blobs to prune from.
pub data_availability_breakpoint: Option<Hash256>, pub data_availability_boundary: Option<Hash256>,
/// The block root of the next blob that needs to be added to fill in the history. /// The block root of the next blob that needs to be added to fill in the history.
pub oldest_blob_parent: Hash256, pub oldest_blob_parent: Hash256,
/// The slot before which blobs are available. /// The slot before which blobs are available.

View File

@ -1341,6 +1341,19 @@ fn prune_payloads_on_startup_false() {
.with_config(|config| assert!(!config.store.prune_payloads)); .with_config(|config| assert!(!config.store.prune_payloads));
} }
#[test] #[test]
fn prune_blobs_default() {
CommandLineTest::new()
.run_with_zero_port()
.with_config(|config| assert!(config.store.prune_blobs));
}
#[test]
fn prune_blobs_on_startup_false() {
CommandLineTest::new()
.flag("prune-blobs", Some("false"))
.run_with_zero_port()
.with_config(|config| assert!(!config.store.prune_blobs));
}
#[test]
fn reconstruct_historic_states_flag() { fn reconstruct_historic_states_flag() {
CommandLineTest::new() CommandLineTest::new()
.flag("reconstruct-historic-states", None) .flag("reconstruct-historic-states", None)