775d222299
## Proposed Changes With proposer boosting implemented (#2822) we have an opportunity to re-org out late blocks. This PR adds three flags to the BN to control this behaviour: * `--disable-proposer-reorgs`: turn aggressive re-orging off (it's on by default). * `--proposer-reorg-threshold N`: attempt to orphan blocks with less than N% of the committee vote. If this parameter isn't set then N defaults to 20% when the feature is enabled. * `--proposer-reorg-epochs-since-finalization N`: only attempt to re-org late blocks when the number of epochs since finalization is less than or equal to N. The default is 2 epochs, meaning re-orgs will only be attempted when the chain is finalizing optimally. For safety Lighthouse will only attempt a re-org under very specific conditions: 1. The block being proposed is 1 slot after the canonical head, and the canonical head is 1 slot after its parent. i.e. at slot `n + 1` rather than building on the block from slot `n` we build on the block from slot `n - 1`. 2. The current canonical head received less than N% of the committee vote. N should be set depending on the proposer boost fraction itself, the fraction of the network that is believed to be applying it, and the size of the largest entity that could be hoarding votes. 3. The current canonical head arrived after the attestation deadline from our perspective. This condition was only added to support suppression of forkchoiceUpdated messages, but makes intuitive sense. 4. The block is being proposed in the first 2 seconds of the slot. This gives it time to propagate and receive the proposer boost. ## Additional Info For the initial idea and background, see: https://github.com/ethereum/consensus-specs/pull/2353#issuecomment-950238004 There is also a specification for this feature here: https://github.com/ethereum/consensus-specs/pull/3034 Co-authored-by: Michael Sproul <micsproul@gmail.com> Co-authored-by: pawan <pawandhananjay@gmail.com>
221 lines
8.4 KiB
Rust
221 lines
8.4 KiB
Rust
use crate::{BeaconForkChoiceStore, BeaconSnapshot};
|
|
use fork_choice::{CountUnrealized, ForkChoice, PayloadVerificationStatus};
|
|
use itertools::process_results;
|
|
use proto_array::CountUnrealizedFull;
|
|
use slog::{info, warn, Logger};
|
|
use state_processing::state_advance::complete_state_advance;
|
|
use state_processing::{
|
|
per_block_processing, per_block_processing::BlockSignatureStrategy, ConsensusContext,
|
|
VerifyBlockRoot,
|
|
};
|
|
use std::sync::Arc;
|
|
use std::time::Duration;
|
|
use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore};
|
|
use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot};
|
|
|
|
const CORRUPT_DB_MESSAGE: &str = "The database could be corrupt. Check its file permissions or \
|
|
consider deleting it by running with the --purge-db flag.";
|
|
|
|
/// Revert the head to the last block before the most recent hard fork.
|
|
///
|
|
/// This function is destructive and should only be used if there is no viable alternative. It will
|
|
/// cause the reverted blocks and states to be completely forgotten, lying dormant in the database
|
|
/// forever.
|
|
///
|
|
/// Return the `(head_block_root, head_block)` that should be used post-reversion.
|
|
pub fn revert_to_fork_boundary<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
|
current_slot: Slot,
|
|
head_block_root: Hash256,
|
|
store: Arc<HotColdDB<E, Hot, Cold>>,
|
|
spec: &ChainSpec,
|
|
log: &Logger,
|
|
) -> Result<(Hash256, SignedBeaconBlock<E>), String> {
|
|
let current_fork = spec.fork_name_at_slot::<E>(current_slot);
|
|
let fork_epoch = spec
|
|
.fork_epoch(current_fork)
|
|
.ok_or_else(|| format!("Current fork '{}' never activates", current_fork))?;
|
|
|
|
if current_fork == ForkName::Base {
|
|
return Err(format!(
|
|
"Cannot revert to before phase0 hard fork. {}",
|
|
CORRUPT_DB_MESSAGE
|
|
));
|
|
}
|
|
|
|
warn!(
|
|
log,
|
|
"Reverting invalid head block";
|
|
"target_fork" => %current_fork,
|
|
"fork_epoch" => fork_epoch,
|
|
);
|
|
let block_iter = ParentRootBlockIterator::fork_tolerant(&store, head_block_root);
|
|
|
|
let (block_root, blinded_block) = process_results(block_iter, |mut iter| {
|
|
iter.find_map(|(block_root, block)| {
|
|
if block.slot() < fork_epoch.start_slot(E::slots_per_epoch()) {
|
|
Some((block_root, block))
|
|
} else {
|
|
info!(
|
|
log,
|
|
"Reverting block";
|
|
"block_root" => ?block_root,
|
|
"slot" => block.slot(),
|
|
);
|
|
None
|
|
}
|
|
})
|
|
})
|
|
.map_err(|e| {
|
|
format!(
|
|
"Error fetching blocks to revert: {:?}. {}",
|
|
e, CORRUPT_DB_MESSAGE
|
|
)
|
|
})?
|
|
.ok_or_else(|| format!("No pre-fork blocks found. {}", CORRUPT_DB_MESSAGE))?;
|
|
|
|
let block = store
|
|
.make_full_block(&block_root, blinded_block)
|
|
.map_err(|e| format!("Unable to add payload to new head block: {:?}", e))?;
|
|
|
|
Ok((block_root, block))
|
|
}
|
|
|
|
/// Reset fork choice to the finalized checkpoint of the supplied head state.
|
|
///
|
|
/// The supplied `head_block_root` should correspond to the most recently applied block on
|
|
/// `head_state`.
|
|
///
|
|
/// This function avoids quirks of fork choice initialization by replaying all of the blocks from
|
|
/// the checkpoint to the head.
|
|
///
|
|
/// See this issue for details: https://github.com/ethereum/consensus-specs/issues/2566
|
|
///
|
|
/// It will fail if the finalized state or any of the blocks to replay are unavailable.
|
|
///
|
|
/// WARNING: this function is destructive and causes fork choice to permanently forget all
|
|
/// chains other than the chain leading to `head_block_root`. It should only be used in extreme
|
|
/// circumstances when there is no better alternative.
|
|
pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
|
head_block_root: Hash256,
|
|
head_state: &BeaconState<E>,
|
|
store: Arc<HotColdDB<E, Hot, Cold>>,
|
|
current_slot: Option<Slot>,
|
|
spec: &ChainSpec,
|
|
count_unrealized_config: CountUnrealized,
|
|
count_unrealized_full_config: CountUnrealizedFull,
|
|
) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> {
|
|
// Fetch finalized block.
|
|
let finalized_checkpoint = head_state.finalized_checkpoint();
|
|
let finalized_block_root = finalized_checkpoint.root;
|
|
let finalized_block = store
|
|
.get_full_block(&finalized_block_root)
|
|
.map_err(|e| format!("Error loading finalized block: {:?}", e))?
|
|
.ok_or_else(|| {
|
|
format!(
|
|
"Finalized block missing for revert: {:?}",
|
|
finalized_block_root
|
|
)
|
|
})?;
|
|
|
|
// Advance finalized state to finalized epoch (to handle skipped slots).
|
|
let finalized_state_root = finalized_block.state_root();
|
|
let mut finalized_state = store
|
|
.get_state(&finalized_state_root, Some(finalized_block.slot()))
|
|
.map_err(|e| format!("Error loading finalized state: {:?}", e))?
|
|
.ok_or_else(|| {
|
|
format!(
|
|
"Finalized block state missing from database: {:?}",
|
|
finalized_state_root
|
|
)
|
|
})?;
|
|
let finalized_slot = finalized_checkpoint.epoch.start_slot(E::slots_per_epoch());
|
|
complete_state_advance(
|
|
&mut finalized_state,
|
|
Some(finalized_state_root),
|
|
finalized_slot,
|
|
spec,
|
|
)
|
|
.map_err(|e| {
|
|
format!(
|
|
"Error advancing finalized state to finalized epoch: {:?}",
|
|
e
|
|
)
|
|
})?;
|
|
let finalized_snapshot = BeaconSnapshot {
|
|
beacon_block_root: finalized_block_root,
|
|
beacon_block: Arc::new(finalized_block),
|
|
beacon_state: finalized_state,
|
|
};
|
|
|
|
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store.clone(), &finalized_snapshot)
|
|
.map_err(|e| format!("Unable to reset fork choice store for revert: {e:?}"))?;
|
|
|
|
let mut fork_choice = ForkChoice::from_anchor(
|
|
fc_store,
|
|
finalized_block_root,
|
|
&finalized_snapshot.beacon_block,
|
|
&finalized_snapshot.beacon_state,
|
|
current_slot,
|
|
count_unrealized_full_config,
|
|
spec,
|
|
)
|
|
.map_err(|e| format!("Unable to reset fork choice for revert: {:?}", e))?;
|
|
|
|
// Replay blocks from finalized checkpoint back to head.
|
|
// We do not replay attestations presently, relying on the absence of other blocks
|
|
// to guarantee `head_block_root` as the head.
|
|
let blocks = store
|
|
.load_blocks_to_replay(finalized_slot + 1, head_state.slot(), head_block_root)
|
|
.map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?;
|
|
|
|
let mut state = finalized_snapshot.beacon_state;
|
|
let blocks_len = blocks.len();
|
|
for (i, block) in blocks.into_iter().enumerate() {
|
|
complete_state_advance(&mut state, None, block.slot(), spec)
|
|
.map_err(|e| format!("State advance failed: {:?}", e))?;
|
|
|
|
let mut ctxt = ConsensusContext::new(block.slot())
|
|
.set_proposer_index(block.message().proposer_index());
|
|
per_block_processing(
|
|
&mut state,
|
|
&block,
|
|
BlockSignatureStrategy::NoVerification,
|
|
VerifyBlockRoot::True,
|
|
&mut ctxt,
|
|
spec,
|
|
)
|
|
.map_err(|e| format!("Error replaying block: {:?}", e))?;
|
|
|
|
// Setting this to unverified is the safest solution, since we don't have a way to
|
|
// retro-actively determine if they were valid or not.
|
|
//
|
|
// This scenario is so rare that it seems OK to double-verify some blocks.
|
|
let payload_verification_status = PayloadVerificationStatus::Optimistic;
|
|
|
|
// Because we are replaying a single chain of blocks, we only need to calculate unrealized
|
|
// justification for the last block in the chain.
|
|
let is_last_block = i + 1 == blocks_len;
|
|
let count_unrealized = if is_last_block {
|
|
count_unrealized_config
|
|
} else {
|
|
CountUnrealized::False
|
|
};
|
|
|
|
fork_choice
|
|
.on_block(
|
|
block.slot(),
|
|
block.message(),
|
|
block.canonical_root(),
|
|
// Reward proposer boost. We are reinforcing the canonical chain.
|
|
Duration::from_secs(0),
|
|
&state,
|
|
payload_verification_status,
|
|
spec,
|
|
count_unrealized,
|
|
)
|
|
.map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?;
|
|
}
|
|
|
|
Ok(fork_choice)
|
|
}
|