diff --git a/Cargo.lock b/Cargo.lock index 05bb5e0e5..7f00a2807 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -684,7 +684,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "4.2.0" +version = "4.3.0" dependencies = [ "beacon_chain", "clap", @@ -891,7 +891,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "4.2.0" +version = "4.3.0" dependencies = [ "beacon_node", "clap", @@ -4165,7 +4165,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "4.2.0" +version = "4.3.0" dependencies = [ "account_utils", "beacon_chain", @@ -4812,7 +4812,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "4.2.0" +version = "4.3.0" dependencies = [ "account_manager", "account_utils", @@ -9108,6 +9108,7 @@ dependencies = [ "smallvec", "ssz_types", "state_processing", + "strum", "superstruct 0.6.0", "swap_or_not_shuffle", "tempfile", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index d3f43fad7..bd578099a 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "4.2.0" +version = "4.3.0" authors = ["Paul Hauner ", "Age Manning BeaconChain { signature_verified_block.block_root(), signature_verified_block, notify_execution_layer, + || Ok(()), ) .await { @@ -2783,6 +2784,7 @@ impl BeaconChain { block_root: Hash256, unverified_block: B, notify_execution_layer: NotifyExecutionLayer, + publish_fn: impl FnOnce() -> Result<(), BlockError> + Send + 'static, ) -> Result> { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); @@ -2798,6 +2800,9 @@ impl BeaconChain { notify_execution_layer, )?; + //TODO(sean) error handling? + publish_fn()?; + let executed_block = self .clone() .into_executed_block(execution_pending) @@ -3073,7 +3078,9 @@ impl BeaconChain { block_delay, &state, payload_verification_status, + self.config.progressive_balances_mode, &self.spec, + &self.log, ) .map_err(|e| BlockError::BeaconChainError(e.into()))?; } @@ -6012,13 +6019,9 @@ impl BeaconChain { /// Since we are likely calling this during the slot we are going to propose in, don't take into /// account the current slot when accounting for skips. pub fn is_healthy(&self, parent_root: &Hash256) -> Result { + let cached_head = self.canonical_head.cached_head(); // Check if the merge has been finalized. - if let Some(finalized_hash) = self - .canonical_head - .cached_head() - .forkchoice_update_parameters() - .finalized_hash - { + if let Some(finalized_hash) = cached_head.forkchoice_update_parameters().finalized_hash { if ExecutionBlockHash::zero() == finalized_hash { return Ok(ChainHealth::PreMerge); } @@ -6045,17 +6048,13 @@ impl BeaconChain { // Check slots at the head of the chain. let prev_slot = current_slot.saturating_sub(Slot::new(1)); - let head_skips = prev_slot.saturating_sub(self.canonical_head.cached_head().head_slot()); + let head_skips = prev_slot.saturating_sub(cached_head.head_slot()); let head_skips_check = head_skips.as_usize() <= self.config.builder_fallback_skips; // Check if finalization is advancing. let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - let epochs_since_finalization = current_epoch.saturating_sub( - self.canonical_head - .cached_head() - .finalized_checkpoint() - .epoch, - ); + let epochs_since_finalization = + current_epoch.saturating_sub(cached_head.finalized_checkpoint().epoch); let finalization_check = epochs_since_finalization.as_usize() <= self.config.builder_fallback_epochs_since_finalization; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 21fe10431..0f8f03e5f 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -57,6 +57,7 @@ use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, }; +use crate::observed_block_producers::SeenBlock; use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; @@ -189,13 +190,6 @@ pub enum BlockError { /// /// The block is valid and we have already imported a block with this hash. BlockIsAlreadyKnown, - /// A block for this proposer and slot has already been observed. - /// - /// ## Peer scoring - /// - /// The `proposer` has already proposed a block at this slot. The existing block may or may not - /// be equal to the given block. - RepeatProposal { proposer: u64, slot: Slot }, /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER. /// /// ## Peer scoring @@ -291,6 +285,14 @@ pub enum BlockError { /// problems to worry about than losing peers, and we're doing the network a favour by /// disconnecting. ParentExecutionPayloadInvalid { parent_root: Hash256 }, + /// The block is a slashable equivocation from the proposer. + /// + /// ## Peer scoring + /// + /// Honest peers shouldn't forward more than 1 equivocating block from the same proposer, so + /// we penalise them with a mid-tolerance error. + Slashable, + //TODO(sean) peer scoring docs /// A blob alone failed validation. BlobValidation(BlobError), /// The block and blob together failed validation. @@ -892,19 +894,6 @@ impl GossipVerifiedBlock { return Err(BlockError::BlockIsAlreadyKnown); } - // Check that we have not already received a block with a valid signature for this slot. - if chain - .observed_block_producers - .read() - .proposer_has_been_observed(block.message()) - .map_err(|e| BlockError::BeaconChainError(e.into()))? - { - return Err(BlockError::RepeatProposal { - proposer: block.message().proposer_index(), - slot: block.slot(), - }); - } - // Do not process a block that doesn't descend from the finalized root. // // We check this *before* we load the parent so that we can return a more detailed error. @@ -1020,17 +1009,16 @@ impl GossipVerifiedBlock { // // It's important to double-check that the proposer still hasn't been observed so we don't // have a race-condition when verifying two blocks simultaneously. - if chain + match chain .observed_block_producers .write() - .observe_proposer(block.message()) + .observe_proposal(block_root, block.message()) .map_err(|e| BlockError::BeaconChainError(e.into()))? { - return Err(BlockError::RepeatProposal { - proposer: block.message().proposer_index(), - slot: block.slot(), - }); - } + SeenBlock::Slashable => return Err(BlockError::Slashable), + SeenBlock::Duplicate => return Err(BlockError::BlockIsAlreadyKnown), + SeenBlock::UniqueNonSlashable => {} + }; if block.message().proposer_index() != expected_proposer as u64 { return Err(BlockError::IncorrectBlockProposer { @@ -1293,6 +1281,12 @@ impl ExecutionPendingBlock { chain: &Arc>, notify_execution_layer: NotifyExecutionLayer, ) -> Result> { + chain + .observed_block_producers + .write() + .observe_proposal(block_root, block.message()) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; + if let Some(parent) = chain .canonical_head .fork_choice_read_lock() diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 23f7692af..55fc24fea 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -343,7 +343,7 @@ where let beacon_block = genesis_block(&mut beacon_state, &self.spec)?; beacon_state - .build_all_caches(&self.spec) + .build_caches(&self.spec) .map_err(|e| format!("Failed to build genesis state caches: {:?}", e))?; let beacon_state_root = beacon_block.message().state_root(); @@ -433,7 +433,7 @@ where // Prime all caches before storing the state in the database and computing the tree hash // root. weak_subj_state - .build_all_caches(&self.spec) + .build_caches(&self.spec) .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; weak_subj_state .update_tree_hash_cache() @@ -701,6 +701,8 @@ where store.clone(), Some(current_slot), &self.spec, + self.chain_config.progressive_balances_mode, + &log, )?; } @@ -714,7 +716,7 @@ where head_snapshot .beacon_state - .build_all_caches(&self.spec) + .build_caches(&self.spec) .map_err(|e| format!("Failed to build state caches: {:?}", e))?; // Perform a check to ensure that the finalization points of the head and fork choice are @@ -840,9 +842,7 @@ where observed_sync_aggregators: <_>::default(), // TODO: allow for persisting and loading the pool from disk. observed_block_producers: <_>::default(), - // TODO: allow for persisting and loading the pool from disk. observed_blob_sidecars: <_>::default(), - // TODO: allow for persisting and loading the pool from disk. observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 34a5c9a4e..cc7a957ec 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,7 +1,7 @@ pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use serde_derive::{Deserialize, Serialize}; use std::time::Duration; -use types::{Checkpoint, Epoch}; +use types::{Checkpoint, Epoch, ProgressiveBalancesMode}; pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20); pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2); @@ -81,6 +81,8 @@ pub struct ChainConfig { pub always_prepare_payload: bool, /// Whether backfill sync processing should be rate-limited. pub enable_backfill_rate_limiting: bool, + /// Whether to use `ProgressiveBalancesCache` in unrealized FFG progression calculation. + pub progressive_balances_mode: ProgressiveBalancesMode, } impl Default for ChainConfig { @@ -111,6 +113,7 @@ impl Default for ChainConfig { genesis_backfill: false, always_prepare_payload: false, enable_backfill_rate_limiting: true, + progressive_balances_mode: ProgressiveBalancesMode::Checked, } } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index e4c4ff251..774fb8618 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -216,6 +216,7 @@ pub enum BeaconChainError { BlsToExecutionConflictsWithPool, InconsistentFork(InconsistentFork), ProposerHeadForkChoiceError(fork_choice::Error), + UnableToPublish, AvailabilityCheckError(AvailabilityCheckError), } diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 084ae95e0..dc0e34277 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -10,7 +10,10 @@ use state_processing::{ use std::sync::Arc; use std::time::Duration; use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore}; -use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot}; +use types::{ + BeaconState, ChainSpec, EthSpec, ForkName, Hash256, ProgressiveBalancesMode, SignedBeaconBlock, + Slot, +}; const CORRUPT_DB_MESSAGE: &str = "The database could be corrupt. Check its file permissions or \ consider deleting it by running with the --purge-db flag."; @@ -100,6 +103,8 @@ pub fn reset_fork_choice_to_finalization, Cold: It store: Arc>, current_slot: Option, spec: &ChainSpec, + progressive_balances_mode: ProgressiveBalancesMode, + log: &Logger, ) -> Result, E>, String> { // Fetch finalized block. let finalized_checkpoint = head_state.finalized_checkpoint(); @@ -197,7 +202,9 @@ pub fn reset_fork_choice_to_finalization, Cold: It Duration::from_secs(0), &state, payload_verification_status, + progressive_balances_mode, spec, + log, ) .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; } diff --git a/beacon_node/beacon_chain/src/observed_block_producers.rs b/beacon_node/beacon_chain/src/observed_block_producers.rs index b5995121b..f76fc5379 100644 --- a/beacon_node/beacon_chain/src/observed_block_producers.rs +++ b/beacon_node/beacon_chain/src/observed_block_producers.rs @@ -1,9 +1,10 @@ //! Provides the `ObservedBlockProducers` struct which allows for rejecting gossip blocks from //! validators that have already produced a block. +use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; -use types::{BeaconBlockRef, Epoch, EthSpec, Slot, Unsigned}; +use types::{BeaconBlockRef, Epoch, EthSpec, Hash256, Slot, Unsigned}; #[derive(Debug, PartialEq)] pub enum Error { @@ -14,6 +15,12 @@ pub enum Error { ValidatorIndexTooHigh(u64), } +#[derive(Eq, Hash, PartialEq, Debug, Default)] +struct ProposalKey { + slot: Slot, + proposer: u64, +} + /// Maintains a cache of observed `(block.slot, block.proposer)`. /// /// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you @@ -27,7 +34,7 @@ pub enum Error { /// known_distinct_shufflings` which is much smaller. pub struct ObservedBlockProducers { finalized_slot: Slot, - items: HashMap>, + items: HashMap>, _phantom: PhantomData, } @@ -42,6 +49,24 @@ impl Default for ObservedBlockProducers { } } +pub enum SeenBlock { + Duplicate, + Slashable, + UniqueNonSlashable, +} + +impl SeenBlock { + pub fn proposer_previously_observed(self) -> bool { + match self { + Self::Duplicate | Self::Slashable => true, + Self::UniqueNonSlashable => false, + } + } + pub fn is_slashable(&self) -> bool { + matches!(self, Self::Slashable) + } +} + impl ObservedBlockProducers { /// Observe that the `block` was produced by `block.proposer_index` at `block.slot`. This will /// update `self` so future calls to it indicate that this block is known. @@ -52,16 +77,44 @@ impl ObservedBlockProducers { /// /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`. - pub fn observe_proposer(&mut self, block: BeaconBlockRef<'_, E>) -> Result { + pub fn observe_proposal( + &mut self, + block_root: Hash256, + block: BeaconBlockRef<'_, E>, + ) -> Result { self.sanitize_block(block)?; - let did_not_exist = self - .items - .entry(block.slot()) - .or_insert_with(|| HashSet::with_capacity(E::SlotsPerEpoch::to_usize())) - .insert(block.proposer_index()); + let key = ProposalKey { + slot: block.slot(), + proposer: block.proposer_index(), + }; - Ok(!did_not_exist) + let entry = self.items.entry(key); + + let slashable_proposal = match entry { + Entry::Occupied(mut occupied_entry) => { + let block_roots = occupied_entry.get_mut(); + let newly_inserted = block_roots.insert(block_root); + + let is_equivocation = block_roots.len() > 1; + + if is_equivocation { + SeenBlock::Slashable + } else if !newly_inserted { + SeenBlock::Duplicate + } else { + SeenBlock::UniqueNonSlashable + } + } + Entry::Vacant(vacant_entry) => { + let block_roots = HashSet::from([block_root]); + vacant_entry.insert(block_roots); + + SeenBlock::UniqueNonSlashable + } + }; + + Ok(slashable_proposal) } /// Returns `Ok(true)` if the `block` has been observed before, `Ok(false)` if not. Does not @@ -72,15 +125,33 @@ impl ObservedBlockProducers { /// /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`. - pub fn proposer_has_been_observed(&self, block: BeaconBlockRef<'_, E>) -> Result { + pub fn proposer_has_been_observed( + &self, + block: BeaconBlockRef<'_, E>, + block_root: Hash256, + ) -> Result { self.sanitize_block(block)?; - let exists = self - .items - .get(&block.slot()) - .map_or(false, |set| set.contains(&block.proposer_index())); + let key = ProposalKey { + slot: block.slot(), + proposer: block.proposer_index(), + }; - Ok(exists) + if let Some(block_roots) = self.items.get(&key) { + let block_already_known = block_roots.contains(&block_root); + let no_prev_known_blocks = + block_roots.difference(&HashSet::from([block_root])).count() == 0; + + if !no_prev_known_blocks { + Ok(SeenBlock::Slashable) + } else if block_already_known { + Ok(SeenBlock::Duplicate) + } else { + Ok(SeenBlock::UniqueNonSlashable) + } + } else { + Ok(SeenBlock::UniqueNonSlashable) + } } /// Returns `Ok(())` if the given `block` is sane. @@ -112,15 +183,15 @@ impl ObservedBlockProducers { } self.finalized_slot = finalized_slot; - self.items.retain(|slot, _set| *slot > finalized_slot); + self.items.retain(|key, _| key.slot > finalized_slot); } /// Returns `true` if the given `validator_index` has been stored in `self` at `epoch`. /// /// This is useful for doppelganger detection. pub fn index_seen_at_epoch(&self, validator_index: u64, epoch: Epoch) -> bool { - self.items.iter().any(|(slot, producers)| { - slot.epoch(E::slots_per_epoch()) == epoch && producers.contains(&validator_index) + self.items.iter().any(|(key, _)| { + key.slot.epoch(E::slots_per_epoch()) == epoch && key.proposer == validator_index }) } } @@ -148,9 +219,12 @@ mod tests { // Slot 0, proposer 0 let block_a = get_block(0, 0); + let block_root = block_a.canonical_root(); assert_eq!( - cache.observe_proposer(block_a.to_ref()), + cache + .observe_proposal(block_root, block_a.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(false), "can observe proposer, indicates proposer unobserved" ); @@ -164,7 +238,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(0)) + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) .expect("slot zero should be present") .len(), 1, @@ -182,7 +259,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(0)) + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) .expect("slot zero should be present") .len(), 1, @@ -207,9 +287,12 @@ mod tests { // First slot of finalized epoch, proposer 0 let block_b = get_block(E::slots_per_epoch(), 0); + let block_root_b = block_b.canonical_root(); assert_eq!( - cache.observe_proposer(block_b.to_ref()), + cache + .observe_proposal(block_root_b, block_b.to_ref()) + .map(SeenBlock::proposer_previously_observed), Err(Error::FinalizedBlock { slot: E::slots_per_epoch().into(), finalized_slot: E::slots_per_epoch().into(), @@ -229,7 +312,9 @@ mod tests { let block_b = get_block(three_epochs, 0); assert_eq!( - cache.observe_proposer(block_b.to_ref()), + cache + .observe_proposal(block_root_b, block_b.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(false), "can insert non-finalized block" ); @@ -238,7 +323,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(three_epochs)) + .get(&ProposalKey { + slot: Slot::new(three_epochs), + proposer: 0 + }) .expect("the three epochs slot should be present") .len(), 1, @@ -262,7 +350,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(three_epochs)) + .get(&ProposalKey { + slot: Slot::new(three_epochs), + proposer: 0 + }) .expect("the three epochs slot should be present") .len(), 1, @@ -276,24 +367,33 @@ mod tests { // Slot 0, proposer 0 let block_a = get_block(0, 0); + let block_root_a = block_a.canonical_root(); assert_eq!( - cache.proposer_has_been_observed(block_a.to_ref()), + cache + .proposer_has_been_observed(block_a.to_ref(), block_a.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(false), "no observation in empty cache" ); assert_eq!( - cache.observe_proposer(block_a.to_ref()), + cache + .observe_proposal(block_root_a, block_a.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(false), "can observe proposer, indicates proposer unobserved" ); assert_eq!( - cache.proposer_has_been_observed(block_a.to_ref()), + cache + .proposer_has_been_observed(block_a.to_ref(), block_a.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(true), "observed block is indicated as true" ); assert_eq!( - cache.observe_proposer(block_a.to_ref()), + cache + .observe_proposal(block_root_a, block_a.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(true), "observing again indicates true" ); @@ -303,7 +403,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(0)) + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) .expect("slot zero should be present") .len(), 1, @@ -312,24 +415,33 @@ mod tests { // Slot 1, proposer 0 let block_b = get_block(1, 0); + let block_root_b = block_b.canonical_root(); assert_eq!( - cache.proposer_has_been_observed(block_b.to_ref()), + cache + .proposer_has_been_observed(block_b.to_ref(), block_b.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(false), "no observation for new slot" ); assert_eq!( - cache.observe_proposer(block_b.to_ref()), + cache + .observe_proposal(block_root_b, block_b.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(false), "can observe proposer for new slot, indicates proposer unobserved" ); assert_eq!( - cache.proposer_has_been_observed(block_b.to_ref()), + cache + .proposer_has_been_observed(block_b.to_ref(), block_b.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(true), "observed block in slot 1 is indicated as true" ); assert_eq!( - cache.observe_proposer(block_b.to_ref()), + cache + .observe_proposal(block_root_b, block_b.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(true), "observing slot 1 again indicates true" ); @@ -339,7 +451,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(0)) + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) .expect("slot zero should be present") .len(), 1, @@ -348,7 +463,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(1)) + .get(&ProposalKey { + slot: Slot::new(1), + proposer: 0 + }) .expect("slot zero should be present") .len(), 1, @@ -357,45 +475,54 @@ mod tests { // Slot 0, proposer 1 let block_c = get_block(0, 1); + let block_root_c = block_c.canonical_root(); assert_eq!( - cache.proposer_has_been_observed(block_c.to_ref()), + cache + .proposer_has_been_observed(block_c.to_ref(), block_c.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(false), "no observation for new proposer" ); assert_eq!( - cache.observe_proposer(block_c.to_ref()), + cache + .observe_proposal(block_root_c, block_c.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(false), "can observe new proposer, indicates proposer unobserved" ); assert_eq!( - cache.proposer_has_been_observed(block_c.to_ref()), + cache + .proposer_has_been_observed(block_c.to_ref(), block_c.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(true), "observed new proposer block is indicated as true" ); assert_eq!( - cache.observe_proposer(block_c.to_ref()), + cache + .observe_proposal(block_root_c, block_c.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(true), "observing new proposer again indicates true" ); assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); - assert_eq!(cache.items.len(), 2, "two slots should be present"); + assert_eq!(cache.items.len(), 3, "three slots should be present"); assert_eq!( cache .items - .get(&Slot::new(0)) - .expect("slot zero should be present") - .len(), + .iter() + .filter(|(k, _)| k.slot == cache.finalized_slot) + .count(), 2, "two proposers should be present in slot 0" ); assert_eq!( cache .items - .get(&Slot::new(1)) - .expect("slot zero should be present") - .len(), + .iter() + .filter(|(k, _)| k.slot == Slot::new(1)) + .count(), 1, "only one proposer should be present in slot 1" ); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 32b5f2139..c34cab1e7 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -808,6 +808,15 @@ where state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() } + pub async fn make_blinded_block( + &self, + state: BeaconState, + slot: Slot, + ) -> (SignedBlindedBeaconBlock, BeaconState) { + let (unblinded, new_state) = self.make_block(state, slot).await; + (unblinded.into(), new_state) + } + /// Returns a newly created block, signed by the proposer for the given slot. pub async fn make_block( &self, @@ -820,9 +829,7 @@ where complete_state_advance(&mut state, None, slot, &self.spec) .expect("should be able to advance state to slot"); - state - .build_all_caches(&self.spec) - .expect("should build caches"); + state.build_caches(&self.spec).expect("should build caches"); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); @@ -906,9 +913,7 @@ where complete_state_advance(&mut state, None, slot, &self.spec) .expect("should be able to advance state to slot"); - state - .build_all_caches(&self.spec) - .expect("should build caches"); + state.build_caches(&self.spec).expect("should build caches"); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); @@ -1626,6 +1631,36 @@ where .sign(sk, &fork, genesis_validators_root, &self.chain.spec) } + pub fn add_proposer_slashing(&self, validator_index: u64) -> Result<(), String> { + let propposer_slashing = self.make_proposer_slashing(validator_index); + if let ObservationOutcome::New(verified_proposer_slashing) = self + .chain + .verify_proposer_slashing_for_gossip(propposer_slashing) + .expect("should verify proposer slashing for gossip") + { + self.chain + .import_proposer_slashing(verified_proposer_slashing); + Ok(()) + } else { + Err("should observe new proposer slashing".to_string()) + } + } + + pub fn add_attester_slashing(&self, validator_indices: Vec) -> Result<(), String> { + let attester_slashing = self.make_attester_slashing(validator_indices); + if let ObservationOutcome::New(verified_attester_slashing) = self + .chain + .verify_attester_slashing_for_gossip(attester_slashing) + .expect("should verify attester slashing for gossip") + { + self.chain + .import_attester_slashing(verified_attester_slashing); + Ok(()) + } else { + Err("should observe new attester slashing".to_string()) + } + } + pub fn add_bls_to_execution_change( &self, validator_index: u64, @@ -1804,7 +1839,7 @@ where self.set_current_slot(slot); let block_hash: SignedBeaconBlockHash = self .chain - .process_block(block_root, block.into(), NotifyExecutionLayer::Yes) + .process_block(block_root, block.into(), NotifyExecutionLayer::Yes,|| Ok(()),) .await? .try_into() .unwrap(); @@ -1823,6 +1858,7 @@ where wrapped_block.canonical_root(), wrapped_block, NotifyExecutionLayer::Yes, + || Ok(()), ) .await? .try_into() diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 5280fd518..48bd8f2dd 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -459,6 +459,7 @@ async fn assert_invalid_signature( chain_segment_blobs[block_index].clone(), ), NotifyExecutionLayer::Yes, + || Ok(()), ) .await; assert!( @@ -526,6 +527,7 @@ async fn invalid_signature_gossip_block() { signed_block.canonical_root(), Arc::new(signed_block), NotifyExecutionLayer::Yes, + || Ok(()), ) .await, Err(BlockError::InvalidSignature) @@ -859,6 +861,7 @@ async fn block_gossip_verification() { gossip_verified.block_root, gossip_verified, NotifyExecutionLayer::Yes, + || Ok(()), ) .await .expect("should import valid gossip verified block"); @@ -1069,11 +1072,7 @@ async fn block_gossip_verification() { assert!( matches!( unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await), - BlockError::RepeatProposal { - proposer, - slot, - } - if proposer == other_proposer && slot == block.message().slot() + BlockError::BlockIsAlreadyKnown, ), "should register any valid signature against the proposer, even if the block failed later verification" ); @@ -1106,11 +1105,7 @@ async fn block_gossip_verification() { .await .err() .expect("should error when processing known block"), - BlockError::RepeatProposal { - proposer, - slot, - } - if proposer == block.message().proposer_index() && slot == block.message().slot() + BlockError::BlockIsAlreadyKnown ), "the second proposal by this validator should be rejected" ); @@ -1159,6 +1154,7 @@ async fn verify_block_for_gossip_slashing_detection() { verified_block.block_root, verified_block, NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); @@ -1198,6 +1194,7 @@ async fn verify_block_for_gossip_doppelganger_detection() { verified_block.block_root, verified_block, NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); @@ -1345,6 +1342,7 @@ async fn add_base_block_to_altair_chain() { base_block.canonical_root(), Arc::new(base_block.clone()), NotifyExecutionLayer::Yes, + || Ok(()), ) .await .err() @@ -1479,6 +1477,7 @@ async fn add_altair_block_to_base_chain() { altair_block.canonical_root(), Arc::new(altair_block.clone()), NotifyExecutionLayer::Yes, + || Ok(()), ) .await .err() diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs index e910e8134..f0b799ec9 100644 --- a/beacon_node/beacon_chain/tests/capella.rs +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -133,13 +133,8 @@ async fn base_altair_merge_capella() { for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() { harness.extend_slots(1).await; let block = &harness.chain.head_snapshot().beacon_block; - let full_payload: FullPayload = block - .message() - .body() - .execution_payload() - .unwrap() - .clone() - .into(); + let full_payload: FullPayload = + block.message().body().execution_payload().unwrap().into(); // pre-capella shouldn't have withdrawals assert!(full_payload.withdrawals_root().is_err()); execution_payloads.push(full_payload); @@ -151,13 +146,8 @@ async fn base_altair_merge_capella() { for _ in 0..16 { harness.extend_slots(1).await; let block = &harness.chain.head_snapshot().beacon_block; - let full_payload: FullPayload = block - .message() - .body() - .execution_payload() - .unwrap() - .clone() - .into(); + let full_payload: FullPayload = + block.message().body().execution_payload().unwrap().into(); // post-capella should have withdrawals assert!(full_payload.withdrawals_root().is_ok()); execution_payloads.push(full_payload); diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 65860d204..60ba8f288 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -698,6 +698,7 @@ async fn invalidates_all_descendants() { fork_block.canonical_root(), Arc::new(fork_block), NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap() @@ -797,6 +798,7 @@ async fn switches_heads() { fork_block.canonical_root(), Arc::new(fork_block), NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap() @@ -1055,7 +1057,9 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for import. assert!(matches!( - rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes).await, + rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes, + || Ok(()), + ).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); @@ -1069,8 +1073,9 @@ async fn invalid_parent() { Duration::from_secs(0), &state, PayloadVerificationStatus::Optimistic, + rig.harness.chain.config.progressive_balances_mode, &rig.harness.chain.spec, - + rig.harness.logger() ), Err(ForkChoiceError::ProtoArrayStringError(message)) if message.contains(&format!( @@ -1341,7 +1346,12 @@ async fn build_optimistic_chain( for block in blocks { rig.harness .chain - .process_block(block.canonical_root(), block, NotifyExecutionLayer::Yes) + .process_block( + block.canonical_root(), + block, + NotifyExecutionLayer::Yes, + || Ok(()), + ) .await .unwrap(); } @@ -1901,6 +1911,7 @@ async fn recover_from_invalid_head_by_importing_blocks() { fork_block.canonical_root(), fork_block.clone(), NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index c58234b6e..7a86b5f93 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2178,6 +2178,7 @@ async fn weak_subjectivity_sync() { full_block.canonical_root(), BlockWrapper::new(Arc::new(full_block), blobs), NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 163930f39..80e4992ea 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -686,6 +686,7 @@ async fn run_skip_slot_test(skip_slots: u64) { harness_a.chain.head_snapshot().beacon_block_root, harness_a.get_head_block(), NotifyExecutionLayer::Yes, + || Ok(()) ) .await .unwrap(); diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 828be8e57..299bc019c 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -49,7 +49,7 @@ pub fn get_block_rewards( .map_err(beacon_chain_error)?; state - .build_all_caches(&chain.spec) + .build_caches(&chain.spec) .map_err(beacon_state_error)?; let mut reward_cache = Default::default(); diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 1a2a14611..c0d8a9615 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -32,7 +32,7 @@ use beacon_chain::{ pub use block_id::BlockId; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ - self as api_types, EndpointVersion, ForkChoice, ForkChoiceNode, SignedBlockContents, + self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, SignedBlockContents, SkipRandaoVerification, ValidatorId, ValidatorStatus, }; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; @@ -41,7 +41,9 @@ use logging::SSELoggingComponents; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; use operation_pool::ReceivedPreCapella; use parking_lot::RwLock; -use publish_blocks::ProvenancedBlock; +pub use publish_blocks::{ + publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock, +}; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; @@ -325,6 +327,7 @@ pub fn serve( }; let eth_v1 = single_version(V1); + let eth_v2 = single_version(V2); // Create a `warp` filter that provides access to the network globals. let inner_network_globals = ctx.network_globals.clone(); @@ -1223,16 +1226,59 @@ pub fn serve( log: Logger| async move { publish_blocks::publish_block( None, - ProvenancedBlock::Local(block_contents), + ProvenancedBlock::local(block_contents), chain, &network_tx, log, + BroadcastValidation::default(), ) .await .map(|()| warp::reply().into_response()) }, ); + let post_beacon_blocks_v2 = eth_v2 + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .then( + |validation_level: api_types::BroadcastValidationQuery, + block_contents: SignedBlockContents, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| async move { + match publish_blocks::publish_block( + None, + ProvenancedBlock::local(block_contents), + chain, + &network_tx, + log, + validation_level.broadcast_validation, + ) + .await + { + Ok(()) => warp::reply().into_response(), + Err(e) => match warp_utils::reject::handle_rejection(e).await { + Ok(reply) => reply.into_response(), + Err(_) => warp::reply::with_status( + StatusCode::INTERNAL_SERVER_ERROR, + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response(), + }, + } + }, + ); + + /* + * beacon/blocks + */ + // POST beacon/blinded_blocks let post_beacon_blinded_blocks = eth_v1 .and(warp::path("beacon")) @@ -1247,9 +1293,52 @@ pub fn serve( chain: Arc>, network_tx: UnboundedSender>, log: Logger| async move { - publish_blocks::publish_blinded_block(block, chain, &network_tx, log) - .await - .map(|()| warp::reply().into_response()) + publish_blocks::publish_blinded_block( + block, + chain, + &network_tx, + log, + BroadcastValidation::default(), + ) + .await + .map(|()| warp::reply().into_response()) + }, + ); + + let post_beacon_blinded_blocks_v2 = eth_v2 + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .then( + |validation_level: api_types::BroadcastValidationQuery, + block_contents: SignedBlockContents>, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| async move { + match publish_blocks::publish_blinded_block( + block_contents, + chain, + &network_tx, + log, + validation_level.broadcast_validation, + ) + .await + { + Ok(()) => warp::reply().into_response(), + Err(e) => match warp_utils::reject::handle_rejection(e).await { + Ok(reply) => reply.into_response(), + Err(_) => warp::reply::with_status( + StatusCode::INTERNAL_SERVER_ERROR, + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response(), + }, + } }, ); @@ -2369,24 +2458,41 @@ pub fn serve( .and(warp::path("health")) .and(warp::path::end()) .and(network_globals.clone()) - .and_then(|network_globals: Arc>| { - blocking_response_task(move || match *network_globals.sync_state.read() { - SyncState::SyncingFinalized { .. } - | SyncState::SyncingHead { .. } - | SyncState::SyncTransition - | SyncState::BackFillSyncing { .. } => Ok(warp::reply::with_status( - warp::reply(), - warp::http::StatusCode::PARTIAL_CONTENT, - )), - SyncState::Synced => Ok(warp::reply::with_status( - warp::reply(), - warp::http::StatusCode::OK, - )), - SyncState::Stalled => Err(warp_utils::reject::not_synced( - "sync stalled, beacon chain may not yet be initialized.".to_string(), - )), - }) - }); + .and(chain_filter.clone()) + .and_then( + |network_globals: Arc>, chain: Arc>| { + async move { + let el_offline = if let Some(el) = &chain.execution_layer { + el.is_offline_or_erroring().await + } else { + true + }; + + blocking_response_task(move || { + let is_optimistic = chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::beacon_chain_error)?; + + let is_syncing = !network_globals.sync_state.read().is_synced(); + + if el_offline { + Err(warp_utils::reject::not_synced("execution layer is offline".to_string())) + } else if is_syncing || is_optimistic { + Ok(warp::reply::with_status( + warp::reply(), + warp::http::StatusCode::PARTIAL_CONTENT, + )) + } else { + Ok(warp::reply::with_status( + warp::reply(), + warp::http::StatusCode::OK, + )) + } + }) + .await + } + }, + ); // GET node/peers/{peer_id} let get_node_peers_by_id = eth_v1 @@ -3866,6 +3972,8 @@ pub fn serve( warp::post().and( post_beacon_blocks .uor(post_beacon_blinded_blocks) + .uor(post_beacon_blocks_v2) + .uor(post_beacon_blinded_blocks_v2) .uor(post_beacon_pool_attestations) .uor(post_beacon_pool_attester_slashings) .uor(post_beacon_pool_proposer_slashings) diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index b8d940ff5..3a4875113 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -2,9 +2,12 @@ use crate::metrics; use beacon_chain::blob_verification::{AsBlock, BlockWrapper}; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; -use beacon_chain::{AvailabilityProcessingStatus, NotifyExecutionLayer}; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError}; +use beacon_chain::{ + BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, GossipVerifiedBlock, + NotifyExecutionLayer, AvailabilityProcessingStatus +}; use eth2::types::SignedBlockContents; +use eth2::types::BroadcastValidation; use execution_layer::ProvenancedPayload; use lighthouse_network::PubsubMessage; use network::NetworkMessage; @@ -29,6 +32,16 @@ pub enum ProvenancedBlock { Builder(SignedBlockContents>), } +impl ProvenancedBlock { + pub fn local(block: Arc>) -> Self { + Self::Local(block) + } + + pub fn builder(block: Arc>) -> Self { + Self::Builder(block) + } +} + /// Handles a request from the HTTP API for full blocks. pub async fn publish_block( block_root: Option, @@ -36,6 +49,7 @@ pub async fn publish_block( chain: Arc>, network_tx: &UnboundedSender>, log: Logger, + validation_level: BroadcastValidation, ) -> Result<(), Rejection> { let seen_timestamp = timestamp_now(); let (block, maybe_blobs, is_locally_built_block) = match provenanced_block { @@ -48,17 +62,24 @@ pub async fn publish_block( (Arc::new(block), maybe_blobs, false) } }; - let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); - - //FIXME(sean) have to move this to prior to publishing because it's included in the blobs sidecar message. + let beacon_block = block.clone(); + let delay = get_block_delay_ms(seen_timestamp, beacon_block.message(), &chain.slot_clock); + //FIXME(sean) have to move this to prior to publishing because it's included in the blobs sidecar message. //this may skew metrics let block_root = block_root.unwrap_or_else(|| block.canonical_root()); - debug!( - log, - "Signed block published to HTTP API"; - "slot" => block.slot() - ); + debug!(log, "Signed block received in HTTP API"; "slot" => beacon_block.slot()); + /* actually publish a block */ + let publish_block = move |block: Arc>, + sender, + log, + seen_timestamp| { + let publish_timestamp = timestamp_now(); + let publish_delay = publish_timestamp + .checked_sub(seen_timestamp) + .unwrap_or_else(|| Duration::from_secs(0)); + + info!(log, "Signed block published to network via HTTP API"; "slot" => block.slot(), "publish_delay" => ?publish_delay); // Send the block, regardless of whether or not it is valid. The API // specification is very clear that this is the desired behaviour. let wrapped_block: BlockWrapper = match block.as_ref() { @@ -88,13 +109,70 @@ pub async fn publish_block( } } }; - // Determine the delay after the start of the slot, register it with metrics. + let message = PubsubMessage::BeaconBlock(block); + crate::publish_pubsub_message(&sender, message) + .map_err(|_| BeaconChainError::UnableToPublish.into()) + }; + /* if we can form a `GossipVerifiedBlock`, we've passed our basic gossip checks */ + let gossip_verified_block = GossipVerifiedBlock::new(block, &chain).map_err(|e| { + warn!(log, "Not publishing block, not gossip verified"; "slot" => beacon_block.slot(), "error" => ?e); + warp_utils::reject::custom_bad_request(e.to_string()) + })?; + + let block_root = block_root.unwrap_or(gossip_verified_block.block_root); + + if let BroadcastValidation::Gossip = validation_level { + publish_block( + beacon_block.clone(), + network_tx.clone(), + log.clone(), + seen_timestamp, + ) + .map_err(|_| warp_utils::reject::custom_server_error("unable to publish".into()))?; + } + + /* only publish if gossip- and consensus-valid and equivocation-free */ + let chain_clone = chain.clone(); + let block_clone = beacon_block.clone(); + let log_clone = log.clone(); + let sender_clone = network_tx.clone(); + + let publish_fn = move || match validation_level { + BroadcastValidation::Gossip => Ok(()), + BroadcastValidation::Consensus => { + publish_block(block_clone, sender_clone, log_clone, seen_timestamp) + } + BroadcastValidation::ConsensusAndEquivocation => { + if chain_clone + .observed_block_producers + .read() + .proposer_has_been_observed(block_clone.message(), block_root) + .map_err(|e| BlockError::BeaconChainError(e.into()))? + .is_slashable() + { + warn!( + log_clone, + "Not publishing equivocating block"; + "slot" => block_clone.slot() + ); + Err(BlockError::Slashable) + } else { + publish_block(block_clone, sender_clone, log_clone, seen_timestamp) + } + } + }; let block_clone = wrapped_block.block_cloned(); let slot = block_clone.message().slot(); let proposer_index = block_clone.message().proposer_index(); + match chain - .process_block(block_root, wrapped_block, NotifyExecutionLayer::Yes) + .process_block( + block_root, + gossip_verified_block, + NotifyExecutionLayer::Yes, + publish_fn, + ) .await { Ok(AvailabilityProcessingStatus::Imported(root)) => { @@ -144,35 +222,32 @@ pub async fn publish_block( ); Err(warp_utils::reject::broadcast_without_import(msg)) } - Err(BlockError::BlockIsAlreadyKnown) => { - info!( - log, - "Block from HTTP API already known"; - "block" => ?block_root, - "slot" => slot, - ); - Ok(()) + Err(BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) => { + Err(warp_utils::reject::custom_server_error( + "unable to publish to network channel".to_string(), + )) } - Err(BlockError::RepeatProposal { proposer, slot }) => { - warn!( - log, - "Block ignored due to repeat proposal"; - "msg" => "this can happen when a VC uses fallback BNs. \ - whilst this is not necessarily an error, it can indicate issues with a BN \ - or between the VC and BN.", - "slot" => slot, - "proposer" => proposer, - ); + Err(BlockError::Slashable) => Err(warp_utils::reject::custom_bad_request( + "proposal for this slot and proposer has already been seen".to_string(), + )), + Err(BlockError::BlockIsAlreadyKnown) => { + info!(log, "Block from HTTP API already known"; "block" => ?block_root); Ok(()) } Err(e) => { - let msg = format!("{:?}", e); - error!( - log, - "Invalid block provided to HTTP API"; - "reason" => &msg - ); - Err(warp_utils::reject::broadcast_without_import(msg)) + if let BroadcastValidation::Gossip = validation_level { + Err(warp_utils::reject::broadcast_without_import(format!("{e}"))) + } else { + let msg = format!("{:?}", e); + error!( + log, + "Invalid block provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::custom_bad_request(format!( + "Invalid block: {e}" + ))) + } } } } @@ -184,21 +259,31 @@ pub async fn publish_blinded_block( chain: Arc>, network_tx: &UnboundedSender>, log: Logger, + validation_level: BroadcastValidation, ) -> Result<(), Rejection> { let block_root = block.canonical_root(); - let full_block = reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; - publish_block::(Some(block_root), full_block, chain, network_tx, log).await + let full_block: ProvenancedBlock = + reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; + publish_block::( + Some(block_root), + full_block, + chain, + network_tx, + log, + validation_level, + ) + .await } /// Deconstruct the given blinded block, and construct a full block. This attempts to use the /// execution layer's payload cache, and if that misses, attempts a blind block proposal to retrieve /// the full payload. -async fn reconstruct_block( +pub async fn reconstruct_block( chain: Arc>, block_root: Hash256, block: SignedBeaconBlock>, log: Logger, -) -> Result, Rejection> { +) -> Result, Rejection> { let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { let el = chain.execution_layer.as_ref().ok_or_else(|| { warp_utils::reject::custom_server_error("Missing execution layer".to_string()) @@ -264,15 +349,15 @@ async fn reconstruct_block( None => block .try_into_full_block(None) .map(SignedBlockContents::Block) - .map(ProvenancedBlock::Local), + .map(ProvenancedBlock::local), Some(ProvenancedPayload::Local(full_payload)) => block .try_into_full_block(Some(full_payload)) .map(SignedBlockContents::Block) - .map(ProvenancedBlock::Local), + .map(ProvenancedBlock::local), Some(ProvenancedPayload::Builder(full_payload)) => block .try_into_full_block(Some(full_payload)) .map(SignedBlockContents::Block) - .map(ProvenancedBlock::Builder), + .map(ProvenancedBlock::builder), } .ok_or_else(|| { warp_utils::reject::custom_server_error("Unable to add payload to block".to_string()) diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs new file mode 100644 index 000000000..4819dd99e --- /dev/null +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -0,0 +1,1270 @@ +use beacon_chain::{ + test_utils::{AttestationStrategy, BlockStrategy}, + GossipVerifiedBlock, +}; +use eth2::types::{BroadcastValidation, SignedBeaconBlock, SignedBlindedBeaconBlock}; +use http_api::test_utils::InteractiveTester; +use http_api::{publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock}; +use tree_hash::TreeHash; +use types::{Hash256, MainnetEthSpec, Slot}; +use warp::Rejection; +use warp_utils::reject::CustomBadRequest; + +use eth2::reqwest::StatusCode; + +type E = MainnetEthSpec; + +/* + * We have the following test cases, which are duplicated for the blinded variant of the route: + * + * - `broadcast_validation=gossip` + * - Invalid (400) + * - Full Pass (200) + * - Partial Pass (202) + * - `broadcast_validation=consensus` + * - Invalid (400) + * - Only gossip (400) + * - Only consensus pass (i.e., equivocates) (200) + * - Full pass (200) + * - `broadcast_validation=consensus_and_equivocation` + * - Invalid (400) + * - Invalid due to early equivocation (400) + * - Only gossip (400) + * - Only consensus (400) + * - Pass (200) + * + */ + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn gossip_invalid() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is valid from a gossip perspective is accepted when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn gossip_partial_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::random() + }) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response = response.unwrap_err(); + + assert_eq!(error_response.status(), Some(StatusCode::ACCEPTED)); +} + +// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn gossip_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn consensus_invalid() { + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn consensus_gossip() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective, but nonetheless equivocates, is accepted when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn consensus_partial_pass_only_consensus() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + let test_logger = tester.harness.logger().clone(); + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block_a, state_after_a): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a.clone(), slot_b).await; + let (block_b, state_after_b): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a, slot_b).await; + + /* check for `make_block` curios */ + assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); + assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_ne!(block_a.state_root(), block_b.state_root()); + + let gossip_block_b = GossipVerifiedBlock::new(block_b.clone().into(), &tester.harness.chain); + assert!(gossip_block_b.is_ok()); + let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain); + assert!(gossip_block_a.is_err()); + + /* submit `block_b` which should induce equivocation */ + let channel = tokio::sync::mpsc::unbounded_channel(); + + let publication_result: Result<(), Rejection> = publish_block( + None, + ProvenancedBlock::local(gossip_block_b.unwrap()), + tester.harness.chain.clone(), + &channel.0, + test_logger, + validation_level.unwrap(), + ) + .await; + + assert!(publication_result.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block_b.canonical_root())); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn consensus_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn equivocation_invalid() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn equivocation_consensus_early_equivocation() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block_a, state_after_a): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a.clone(), slot_b).await; + let (block_b, state_after_b): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a, slot_b).await; + + /* check for `make_block` curios */ + assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); + assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_ne!(block_a.state_root(), block_b.state_root()); + + /* submit `block_a` as valid */ + assert!(tester + .client + .post_beacon_blocks_v2(&block_a, validation_level) + .await + .is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block_a.canonical_root())); + + /* submit `block_b` which should induce equivocation */ + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block_b, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Slashable".to_string()) + ); +} + +/// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn equivocation_gossip() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective but that equivocates **late** is rejected when using `broadcast_validation=consensus_and_equivocation`. +/// +/// This test is unique in that we can't actually test the HTTP API directly, but instead have to hook into the `publish_blocks` code manually. This is in order to handle the late equivocation case. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn equivocation_consensus_late_equivocation() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + let test_logger = tester.harness.logger().clone(); + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block_a, state_after_a): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a.clone(), slot_b).await; + let (block_b, state_after_b): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a, slot_b).await; + + /* check for `make_block` curios */ + assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); + assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_ne!(block_a.state_root(), block_b.state_root()); + + let gossip_block_b = GossipVerifiedBlock::new(block_b.clone().into(), &tester.harness.chain); + assert!(gossip_block_b.is_ok()); + let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain); + assert!(gossip_block_a.is_err()); + + let channel = tokio::sync::mpsc::unbounded_channel(); + + let publication_result: Result<(), Rejection> = publish_block( + None, + ProvenancedBlock::local(gossip_block_b.unwrap()), + tester.harness.chain, + &channel.0, + test_logger, + validation_level.unwrap(), + ) + .await; + + assert!(publication_result.is_err()); + + let publication_error = publication_result.unwrap_err(); + + assert!(publication_error.find::().is_some()); + + assert_eq!( + *publication_error.find::().unwrap().0, + "proposal for this slot and proposer has already been seen".to_string() + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective (and does not equivocate) is accepted when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn equivocation_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_gossip_invalid() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is valid from a gossip perspective is accepted when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_gossip_partial_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero() + }) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response = response.unwrap_err(); + + assert_eq!(error_response.status(), Some(StatusCode::ACCEPTED)); +} + +// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_gossip_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_consensus_invalid() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_consensus_gossip() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_consensus_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_equivocation_invalid() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_equivocation_consensus_early_equivocation() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block_a, state_after_a): (SignedBlindedBeaconBlock, _) = tester + .harness + .make_blinded_block(state_a.clone(), slot_b) + .await; + let (block_b, state_after_b): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + /* check for `make_blinded_block` curios */ + assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); + assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_ne!(block_a.state_root(), block_b.state_root()); + + /* submit `block_a` as valid */ + assert!(tester + .client + .post_beacon_blinded_blocks_v2(&block_a, validation_level) + .await + .is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block_a.canonical_root())); + + /* submit `block_b` which should induce equivocation */ + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&block_b, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Slashable".to_string()) + ); +} + +/// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_equivocation_gossip() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective but that equivocates **late** is rejected when using `broadcast_validation=consensus_and_equivocation`. +/// +/// This test is unique in that we can't actually test the HTTP API directly, but instead have to hook into the `publish_blocks` code manually. This is in order to handle the late equivocation case. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_equivocation_consensus_late_equivocation() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + let test_logger = tester.harness.logger().clone(); + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block_a, state_after_a): (SignedBlindedBeaconBlock, _) = tester + .harness + .make_blinded_block(state_a.clone(), slot_b) + .await; + let (block_b, state_after_b): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + /* check for `make_blinded_block` curios */ + assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); + assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_ne!(block_a.state_root(), block_b.state_root()); + + let unblinded_block_a = reconstruct_block( + tester.harness.chain.clone(), + block_a.state_root(), + block_a, + test_logger.clone(), + ) + .await + .unwrap(); + let unblinded_block_b = reconstruct_block( + tester.harness.chain.clone(), + block_b.clone().state_root(), + block_b.clone(), + test_logger.clone(), + ) + .await + .unwrap(); + + let inner_block_a = match unblinded_block_a { + ProvenancedBlock::Local(a, _) => a, + ProvenancedBlock::Builder(a, _) => a, + }; + let inner_block_b = match unblinded_block_b { + ProvenancedBlock::Local(b, _) => b, + ProvenancedBlock::Builder(b, _) => b, + }; + + let gossip_block_b = GossipVerifiedBlock::new(inner_block_b, &tester.harness.chain); + assert!(gossip_block_b.is_ok()); + let gossip_block_a = GossipVerifiedBlock::new(inner_block_a, &tester.harness.chain); + assert!(gossip_block_a.is_err()); + + let channel = tokio::sync::mpsc::unbounded_channel(); + + let publication_result: Result<(), Rejection> = publish_blinded_block( + block_b, + tester.harness.chain, + &channel.0, + test_logger, + validation_level.unwrap(), + ) + .await; + + assert!(publication_result.is_err()); + + let publication_error: Rejection = publication_result.unwrap_err(); + + assert!(publication_error.find::().is_some()); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective (and does not equivocate) is accepted when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_equivocation_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} diff --git a/beacon_node/http_api/tests/main.rs b/beacon_node/http_api/tests/main.rs index f5916d850..e0636424e 100644 --- a/beacon_node/http_api/tests/main.rs +++ b/beacon_node/http_api/tests/main.rs @@ -1,5 +1,6 @@ #![cfg(not(debug_assertions))] // Tests are too slow in debug. +pub mod broadcast_validation_tests; pub mod fork_tests; pub mod interactive_tests; pub mod status_tests; diff --git a/beacon_node/http_api/tests/status_tests.rs b/beacon_node/http_api/tests/status_tests.rs index 04df2379b..664c5f2ea 100644 --- a/beacon_node/http_api/tests/status_tests.rs +++ b/beacon_node/http_api/tests/status_tests.rs @@ -3,6 +3,7 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, BlockError, }; +use eth2::StatusCode; use execution_layer::{PayloadStatusV1, PayloadStatusV1Status}; use http_api::test_utils::InteractiveTester; use types::{EthSpec, ExecPayload, ForkName, MinimalEthSpec, Slot}; @@ -149,3 +150,82 @@ async fn el_error_on_new_payload() { assert_eq!(api_response.is_optimistic, Some(false)); assert_eq!(api_response.is_syncing, false); } + +/// Check `node health` endpoint when the EL is offline. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn node_health_el_offline() { + let num_blocks = E::slots_per_epoch() / 2; + let num_validators = E::slots_per_epoch(); + let tester = post_merge_tester(num_blocks, num_validators).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + + // EL offline + mock_el.server.set_syncing_response(Err("offline".into())); + mock_el.el.upcheck().await; + + let status = tester.client.get_node_health().await; + match status { + Ok(_) => { + panic!("should return 503 error status code"); + } + Err(e) => { + assert_eq!(e.status().unwrap(), 503); + } + } +} + +/// Check `node health` endpoint when the EL is online and synced. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn node_health_el_online_and_synced() { + let num_blocks = E::slots_per_epoch() / 2; + let num_validators = E::slots_per_epoch(); + let tester = post_merge_tester(num_blocks, num_validators).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + + // EL synced + mock_el.server.set_syncing_response(Ok(false)); + mock_el.el.upcheck().await; + + let status = tester.client.get_node_health().await; + match status { + Ok(response) => { + assert_eq!(response, StatusCode::OK); + } + Err(_) => { + panic!("should return 200 status code"); + } + } +} + +/// Check `node health` endpoint when the EL is online but not synced. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn node_health_el_online_and_not_synced() { + let num_blocks = E::slots_per_epoch() / 2; + let num_validators = E::slots_per_epoch(); + let tester = post_merge_tester(num_blocks, num_validators).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + + // EL not synced + harness.advance_slot(); + mock_el.server.all_payloads_syncing(true); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let status = tester.client.get_node_health().await; + match status { + Ok(response) => { + assert_eq!(response, StatusCode::PARTIAL_CONTENT); + } + Err(_) => { + panic!("should return 206 status code"); + } + } +} diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 491c55845..9d35c710a 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -8,7 +8,7 @@ use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *}, - BeaconNodeHttpClient, Error, StatusCode, Timeouts, + BeaconNodeHttpClient, Error, Timeouts, }; use execution_layer::test_utils::TestingBuilder; use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI; @@ -160,7 +160,7 @@ impl ApiTester { // `make_block` adds random graffiti, so this will produce an alternate block let (reorg_block, _reorg_state) = harness - .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap() + 1) .await; let reorg_block = SignedBlockContents::from(reorg_block); @@ -1252,18 +1252,23 @@ impl ApiTester { } pub async fn test_post_beacon_blocks_invalid(mut self) -> Self { - let mut next_block = self.next_block.clone().deconstruct().0; - *next_block.message_mut().proposer_index_mut() += 1; - - assert!(self - .client - .post_beacon_blocks(&SignedBlockContents::from(next_block)) + let block = self + .harness + .make_block_with_modifier( + self.harness.get_current_state(), + self.harness.get_current_slot(), + |b| { + *b.state_root_mut() = Hash256::zero(); + }, + ) .await - .is_err()); + .0; + + assert!(self.client.post_beacon_blocks(&SignedBlockContents::from(block)).await.is_err()); assert!( self.network_rx.network_recv.recv().await.is_some(), - "invalid blocks should be sent to network" + "gossip valid blocks should be sent to network" ); self @@ -1761,9 +1766,15 @@ impl ApiTester { } pub async fn test_get_node_health(self) -> Self { - let status = self.client.get_node_health().await.unwrap(); - assert_eq!(status, StatusCode::OK); - + let status = self.client.get_node_health().await; + match status { + Ok(_) => { + panic!("should return 503 error status code"); + } + Err(e) => { + assert_eq!(e.status().unwrap(), 503); + } + } self } @@ -4142,7 +4153,7 @@ impl ApiTester { .unwrap(); let expected_reorg = EventKind::ChainReorg(SseChainReorg { - slot: self.next_block.signed_block().slot(), + slot: self.reorg_block.slot(), depth: 1, old_head_block: self.next_block.signed_block().canonical_root(), old_head_state: self.next_block.signed_block().state_root(), @@ -4156,6 +4167,8 @@ impl ApiTester { execution_optimistic: false, }); + self.harness.advance_slot(); + self.client .post_beacon_blocks(&self.reorg_block) .await diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index c2e43e921..e051b1330 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -836,6 +836,24 @@ impl std::convert::From> for WorkEvent { } } +pub struct BeaconProcessorSend(pub mpsc::Sender>); + +impl BeaconProcessorSend { + pub fn try_send(&self, message: WorkEvent) -> Result<(), Box>>> { + let work_type = message.work_type(); + match self.0.try_send(message) { + Ok(res) => Ok(res), + Err(e) => { + metrics::inc_counter_vec( + &metrics::BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE, + &[work_type], + ); + Err(Box::new(e)) + } + } + } +} + /// A consensus message (or multiple) from the network that requires processing. #[derive(Derivative)] #[derivative(Debug(bound = "T: BeaconChainTypes"))] diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 77418b23c..bd663be96 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -929,6 +929,20 @@ impl Worker { verified_block } + Err(e @ BlockError::Slashable) => { + warn!( + self.log, + "Received equivocating block from peer"; + "error" => ?e + ); + /* punish peer for submitting an equivocation, but not too harshly as honest peers may conceivably forward equivocating blocks to us from time to time */ + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "gossip_block_mid", + ); + return None; + } Err(BlockError::ParentUnknown(block)) => { debug!( self.log, @@ -950,7 +964,6 @@ impl Worker { Err(e @ BlockError::FutureSlot { .. }) | Err(e @ BlockError::WouldRevertFinalizedSlot { .. }) | Err(e @ BlockError::BlockIsAlreadyKnown) - | Err(e @ BlockError::RepeatProposal { .. }) | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { debug!(self.log, "Could not verify block for gossip. Ignoring the block"; "error" => %e); @@ -1103,7 +1116,12 @@ impl Worker { let result = self .chain - .process_block(block_root, verified_block, NotifyExecutionLayer::Yes) + .process_block( + block_root, + verified_block, + NotifyExecutionLayer::Yes, + || Ok(()), + ) .await; match &result { diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 45c3e5168..56630c3bc 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -103,33 +103,21 @@ impl Worker { }); // Checks if a block from this proposer is already known. - let proposal_already_known = || { + let block_equivocates = || { match self .chain .observed_block_producers .read() - .proposer_has_been_observed(block.message()) + .proposer_has_been_observed(block.message(), block.canonical_root()) { - Ok(is_observed) => is_observed, - // Both of these blocks will be rejected, so reject them now rather + Ok(seen_status) => seen_status.is_slashable(), + //Both of these blocks will be rejected, so reject them now rather // than re-queuing them. Err(ObserveError::FinalizedBlock { .. }) | Err(ObserveError::ValidatorIndexTooHigh { .. }) => false, } }; - // Returns `true` if the block is already known to fork choice. Notably, - // this will return `false` for blocks that we've already imported but - // ancestors of the finalized checkpoint. That should not be an issue - // for our use here since finalized blocks will always be late and won't - // be requeued anyway. - let block_is_already_known = || { - self.chain - .canonical_head - .fork_choice_read_lock() - .contains_block(&block_root) - }; - // If we've already seen a block from this proposer *and* the block // arrived before the attestation deadline, requeue it to ensure it is // imported late enough that it won't receive a proposer boost. @@ -137,7 +125,7 @@ impl Worker { // Don't requeue blocks if they're already known to fork choice, just // push them through to block processing so they can be handled through // the normal channels. - if !block_is_late && proposal_already_known() && !block_is_already_known() { + if !block_is_late && block_equivocates() { debug!( self.log, "Delaying processing of duplicate RPC block"; @@ -171,7 +159,7 @@ impl Worker { let result = self .chain - .process_block(block_root, block, NotifyExecutionLayer::Yes) + .process_block(block_root, block, NotifyExecutionLayer::Yes, || Ok(())) .await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 20a530ce8..62e59d14e 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -301,6 +301,12 @@ lazy_static! { "Gossipsub light_client_optimistic_update errors per error type", &["type"] ); + pub static ref BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE: Result = + try_create_int_counter_vec( + "beacon_processor_send_error_per_work_type", + "Total number of beacon processor send error per work type", + &["type"] + ); /* diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index e9a4d1e3c..aeecb507d 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -6,7 +6,8 @@ #![allow(clippy::unit_arg)] use crate::beacon_processor::{ - BeaconProcessor, InvalidBlockStorage, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN, + BeaconProcessor, BeaconProcessorSend, InvalidBlockStorage, WorkEvent as BeaconWorkEvent, + MAX_WORK_EVENT_QUEUE_LEN, }; use crate::error; use crate::service::{NetworkMessage, RequestId}; @@ -14,11 +15,14 @@ use crate::status::status_message; use crate::sync::manager::RequestId as SyncId; use crate::sync::SyncMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use futures::{future, StreamExt}; - -use lighthouse_network::{rpc::*, PubsubMessage}; -use lighthouse_network::{MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response}; -use slog::{debug, error, o, trace, warn}; +use futures::prelude::*; +use lighthouse_network::rpc::*; +use lighthouse_network::{ + MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, +}; +use logging::TimeLatch; +use slog::{debug, o, trace}; +use slog::{error, warn}; use std::cmp; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; @@ -37,9 +41,11 @@ pub struct Router { /// A network context to return and handle RPC requests. network: HandlerNetworkContext, /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. - beacon_processor_send: mpsc::Sender>, + beacon_processor_send: BeaconProcessorSend, /// The `Router` logger. log: slog::Logger, + /// Provides de-bounce functionality for logging. + logger_debounce: TimeLatch, } /// Types of messages the router can receive. @@ -99,7 +105,7 @@ impl Router { beacon_chain.clone(), network_globals.clone(), network_send.clone(), - beacon_processor_send.clone(), + BeaconProcessorSend(beacon_processor_send.clone()), sync_logger, ); @@ -123,8 +129,9 @@ impl Router { chain: beacon_chain, sync_send, network: HandlerNetworkContext::new(network_send, log.clone()), - beacon_processor_send, + beacon_processor_send: BeaconProcessorSend(beacon_processor_send), log: message_handler_log, + logger_debounce: TimeLatch::default(), }; // spawn handler task and move the message handler instance into the spawned thread @@ -570,12 +577,15 @@ impl Router { self.beacon_processor_send .try_send(work) .unwrap_or_else(|e| { - let work_type = match &e { + let work_type = match &*e { mpsc::error::TrySendError::Closed(work) | mpsc::error::TrySendError::Full(work) => work.work_type(), }; - error!(&self.log, "Unable to send message to the beacon processor"; - "error" => %e, "type" => work_type) + + if self.logger_debounce.elapsed() { + error!(&self.log, "Unable to send message to the beacon processor"; + "error" => %e, "type" => work_type) + } }) } } diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index ea6fee0af..bf7ac2776 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1,6 +1,7 @@ #![cfg(feature = "spec-minimal")] use std::sync::Arc; +use crate::beacon_processor::BeaconProcessorSend; use crate::service::RequestId; use crate::sync::manager::RequestId as SyncId; use crate::NetworkMessage; @@ -80,7 +81,7 @@ impl TestRig { SyncNetworkContext::new( network_tx, globals, - beacon_processor_tx, + BeaconProcessorSend(beacon_processor_tx), chain, log.new(slog::o!("component" => "network_context")), ) diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index be2ecf850..2965de8e4 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -38,7 +38,7 @@ use super::block_lookups::{BlockLookups, PeerShouldHave}; use super::network_context::{BlockOrBlob, SyncNetworkContext}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; -use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; +use crate::beacon_processor::{BeaconProcessorSend, ChainSegmentProcessId}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::block_lookups::delayed_lookup; @@ -238,7 +238,7 @@ pub fn spawn( beacon_chain: Arc>, network_globals: Arc>, network_send: mpsc::UnboundedSender>, - beacon_processor_send: mpsc::Sender>, + beacon_processor_send: BeaconProcessorSend, log: slog::Logger, ) -> mpsc::UnboundedSender> { assert!( diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 5f5c1bcf3..7cdc9a29b 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -4,7 +4,7 @@ use super::block_sidecar_coupling::BlocksAndBlobsRequestInfo; use super::manager::{Id, RequestId as SyncRequestId}; use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; -use crate::beacon_processor::WorkEvent; +use crate::beacon_processor::BeaconProcessorSend; use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; use crate::sync::block_lookups::{BlobRequestId, BlockRequestId}; @@ -60,7 +60,7 @@ pub struct SyncNetworkContext { execution_engine_state: EngineState, /// Channel to send work to the beacon processor. - beacon_processor_send: mpsc::Sender>, + beacon_processor_send: BeaconProcessorSend, pub chain: Arc>, @@ -90,7 +90,7 @@ impl SyncNetworkContext { pub fn new( network_send: mpsc::UnboundedSender>, network_globals: Arc>, - beacon_processor_send: mpsc::Sender>, + beacon_processor_send: BeaconProcessorSend, chain: Arc>, log: slog::Logger, ) -> Self { @@ -564,12 +564,12 @@ impl SyncNetworkContext { }) } - pub fn processor_channel_if_enabled(&self) -> Option<&mpsc::Sender>> { + pub fn processor_channel_if_enabled(&self) -> Option<&BeaconProcessorSend> { self.is_execution_engine_online() .then_some(&self.beacon_processor_send) } - pub fn processor_channel(&self) -> &mpsc::Sender> { + pub fn processor_channel(&self) -> &BeaconProcessorSend { &self.beacon_processor_send } diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index b0b99e3ca..186f69264 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -381,7 +381,7 @@ where mod tests { use super::*; - use crate::beacon_processor::WorkEvent as BeaconWorkEvent; + use crate::beacon_processor::{BeaconProcessorSend, WorkEvent as BeaconWorkEvent}; use crate::service::RequestId; use crate::NetworkMessage; use beacon_chain::{ @@ -610,7 +610,7 @@ mod tests { let cx = SyncNetworkContext::new( network_tx, globals.clone(), - beacon_processor_tx, + BeaconProcessorSend(beacon_processor_tx), chain, log.new(o!("component" => "network_context")), ); diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 41f033607..8c8ec666d 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1,5 +1,6 @@ use clap::{App, Arg}; use strum::VariantNames; +use types::ProgressiveBalancesMode; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new("beacon_node") @@ -1159,4 +1160,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { developers. This directory is not pruned, users should be careful to avoid \ filling up their disks.") ) + .arg( + Arg::with_name("progressive-balances") + .long("progressive-balances") + .value_name("MODE") + .help("Options to enable or disable the progressive balances cache for \ + unrealized FFG progression calculation. The default `checked` mode compares \ + the progressive balances from the cache against results from the existing \ + method. If there is a mismatch, it falls back to the existing method. The \ + optimized mode (`fast`) is faster but is still experimental, and is \ + not recommended for mainnet usage at this time.") + .takes_value(true) + .possible_values(ProgressiveBalancesMode::VARIANTS) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index b15536031..8d2cb3822 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -837,6 +837,12 @@ pub fn get_config( client_config.network.invalid_block_storage = Some(path); } + if let Some(progressive_balances_mode) = + clap_utils::parse_optional(cli_args, "progressive-balances")? + { + client_config.chain.progressive_balances_mode = progressive_balances_mode; + } + Ok(client_config) } diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 5f864c44a..1fb5751a0 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -395,6 +395,7 @@ macro_rules! impl_try_into_beacon_state { // Caching total_active_balance: <_>::default(), + progressive_balances_cache: <_>::default(), committee_caches: <_>::default(), pubkey_cache: <_>::default(), exit_cache: <_>::default(), diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index cd50babdb..bac5d3cc8 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -63,7 +63,7 @@ where .load_cold_state_by_slot(lower_limit_slot)? .ok_or(HotColdDBError::MissingLowerLimitState(lower_limit_slot))?; - state.build_all_caches(&self.spec)?; + state.build_caches(&self.spec)?; process_results(block_root_iter, |iter| -> Result<(), Error> { let mut io_batch = vec![]; diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 5c2dc9b4f..c3dd3bd19 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "4.2.0" +version = "4.3.0" authors = ["Sigma Prime "] edition = "2021" diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 32d1bc2be..ba5f6016b 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -322,6 +322,26 @@ impl BeaconNodeHttpClient { ok_or_error(response).await } + /// Generic POST function supporting arbitrary responses and timeouts. + async fn post_generic_with_consensus_version( + &self, + url: U, + body: &T, + timeout: Option, + fork: ForkName, + ) -> Result { + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder + .header(CONSENSUS_VERSION_HEADER, fork.to_string()) + .json(body) + .send() + .await?; + ok_or_error(response).await + } + /// `GET beacon/genesis` /// /// ## Errors @@ -654,6 +674,76 @@ impl BeaconNodeHttpClient { Ok(()) } + pub fn post_beacon_blocks_v2_path( + &self, + validation_level: Option, + ) -> Result { + let mut path = self.eth_path(V2)?; + path.path_segments_mut() + .map_err(|_| Error::InvalidUrl(self.server.clone()))? + .extend(&["beacon", "blocks"]); + + path.set_query( + validation_level + .map(|v| format!("broadcast_validation={}", v)) + .as_deref(), + ); + + Ok(path) + } + + pub fn post_beacon_blinded_blocks_v2_path( + &self, + validation_level: Option, + ) -> Result { + let mut path = self.eth_path(V2)?; + path.path_segments_mut() + .map_err(|_| Error::InvalidUrl(self.server.clone()))? + .extend(&["beacon", "blinded_blocks"]); + + path.set_query( + validation_level + .map(|v| format!("broadcast_validation={}", v)) + .as_deref(), + ); + + Ok(path) + } + + /// `POST v2/beacon/blocks` + pub async fn post_beacon_blocks_v2>( + &self, + block: &SignedBeaconBlock, + validation_level: Option, + ) -> Result<(), Error> { + self.post_generic_with_consensus_version( + self.post_beacon_blocks_v2_path(validation_level)?, + block, + Some(self.timeouts.proposal), + block.message().body().fork_name(), + ) + .await?; + + Ok(()) + } + + /// `POST v2/beacon/blinded_blocks` + pub async fn post_beacon_blinded_blocks_v2( + &self, + block: &SignedBlindedBeaconBlock, + validation_level: Option, + ) -> Result<(), Error> { + self.post_generic_with_consensus_version( + self.post_beacon_blinded_blocks_v2_path(validation_level)?, + block, + Some(self.timeouts.proposal), + block.message().body().fork_name(), + ) + .await?; + + Ok(()) + } + /// Path for `v2/beacon/blocks` pub fn get_beacon_blocks_path(&self, block_id: BlockId) -> Result { let mut path = self.eth_path(V2)?; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 6e22f1b0c..b7fc67304 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -7,7 +7,7 @@ use mediatype::{names, MediaType, MediaTypeList}; use serde::{Deserialize, Serialize}; use ssz_derive::Encode; use std::convert::TryFrom; -use std::fmt; +use std::fmt::{self, Display}; use std::str::{from_utf8, FromStr}; use std::time::Duration; pub use types::*; @@ -1261,6 +1261,50 @@ pub struct ForkChoiceNode { pub execution_block_hash: Option, } +#[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum BroadcastValidation { + Gossip, + Consensus, + ConsensusAndEquivocation, +} + +impl Default for BroadcastValidation { + fn default() -> Self { + Self::Gossip + } +} + +impl Display for BroadcastValidation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Gossip => write!(f, "gossip"), + Self::Consensus => write!(f, "consensus"), + Self::ConsensusAndEquivocation => write!(f, "consensus_and_equivocation"), + } + } +} + +impl FromStr for BroadcastValidation { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + match s { + "gossip" => Ok(Self::Gossip), + "consensus" => Ok(Self::Consensus), + "consensus_and_equivocation" => Ok(Self::ConsensusAndEquivocation), + _ => Err("Invalid broadcast validation level"), + } + } +} + +#[derive(Default, Deserialize, Serialize)] +#[serde(rename_all = "snake_case")] +pub struct BroadcastValidationQuery { + #[serde(default)] + pub broadcast_validation: BroadcastValidation, +} + #[cfg(test)] mod tests { use super::*; diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml index 196629cb8..428a082cc 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml @@ -1,6 +1,8 @@ # Lighthouse Team (Sigma Prime) -- enr:-Jq4QItoFUuug_n_qbYbU0OY04-np2wT8rUCauOOXNi0H3BWbDj-zbfZb7otA7jZ6flbBpx1LNZK2TDebZ9dEKx84LYBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISsaa0ZiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMo -- enr:-Jq4QN_YBsUOqQsty1OGvYv48PMaiEt1AzGD1NkYQHaxZoTyVGqMYXg0K9c0LPNWC9pkXmggApp8nygYLsQwScwAgfgBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISLosQxiXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMo +- enr:-Le4QPUXJS2BTORXxyx2Ia-9ae4YqA_JWX3ssj4E_J-3z1A-HmFGrU8BpvpqhNabayXeOZ2Nq_sbeDgtzMJpLLnXFgAChGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISsaa0Zg2lwNpAkAIkHAAAAAPA8kv_-awoTiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMohHVkcDaCI4I +- enr:-Le4QLHZDSvkLfqgEo8IWGG96h6mxwe_PsggC20CL3neLBjfXLGAQFOPSltZ7oP6ol54OvaNqO02Rnvb8YmDR274uq8ChGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLosQxg2lwNpAqAX4AAAAAAPA8kv_-ax65iXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMohHVkcDaCI4I +- enr:-Le4QH6LQrusDbAHPjU_HcKOuMeXfdEB5NJyXgHWFadfHgiySqeDyusQMvfphdYWOzuSZO9Uq2AMRJR5O4ip7OvVma8BhGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLY9ncg2lwNpAkAh8AgQIBAAAAAAAAAAmXiXNlY3AyNTZrMaECDYCZTZEksF-kmgPholqgVt8IXr-8L7Nu7YrZ7HUpgxmDdWRwgiMohHVkcDaCI4I +- enr:-Le4QIqLuWybHNONr933Lk0dcMmAB5WgvGKRyDihy1wHDIVlNuuztX62W51voT4I8qD34GcTEOTmag1bcdZ_8aaT4NUBhGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLY04ng2lwNpAkAh8AgAIBAAAAAAAAAA-fiXNlY3AyNTZrMaEDscnRV6n1m-D9ID5UsURk0jsoKNXt1TIrj8uKOGW6iluDdWRwgiMohHVkcDaCI4I # EF Team - enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg - enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg @@ -15,4 +17,4 @@ - enr:-Ku4QPp9z1W4tAO8Ber_NQierYaOStqhDqQdOPY3bB3jDgkjcbk6YrEnVYIiCBbTxuar3CzS528d2iE7TdJsrL-dEKoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMw5fqqkw2hHC4F5HZZDPsNmPdB1Gi8JPQK7pRc9XHh-oN1ZHCCKvg # Nimbus team - enr:-LK4QA8FfhaAjlb_BXsXxSfiysR7R52Nhi9JBt4F8SPssu8hdE1BXQQEtVDC3qStCW60LSO7hEsVHv5zm8_6Vnjhcn0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAN4aBKJc2VjcDI1NmsxoQJerDhsJ-KxZ8sHySMOCmTO6sHM3iCFQ6VMvLTe948MyYN0Y3CCI4yDdWRwgiOM -- enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM +- enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM \ No newline at end of file diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 3f2745bf9..e874432fb 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v4.2.0-", - fallback = "Lighthouse/v4.2.0" + prefix = "Lighthouse/v4.3.0-", + fallback = "Lighthouse/v4.3.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 1026f996f..8ed312923 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,10 +1,15 @@ use crate::{ForkChoiceStore, InvalidationOperation}; +use per_epoch_processing::altair::participation_cache::Error as ParticipationCacheError; use proto_array::{ Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; -use slog::{crit, debug, warn, Logger}; +use slog::{crit, debug, error, warn, Logger}; use ssz_derive::{Decode, Encode}; +use state_processing::per_epoch_processing::altair::ParticipationCache; +use state_processing::per_epoch_processing::{ + weigh_justification_and_finalization, JustificationAndFinalizationState, +}; use state_processing::{ per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing, }; @@ -18,6 +23,7 @@ use types::{ EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, }; +use types::{ProgressiveBalancesCache, ProgressiveBalancesMode}; #[derive(Debug)] pub enum Error { @@ -72,7 +78,9 @@ pub enum Error { }, UnrealizedVoteProcessing(state_processing::EpochProcessingError), ParticipationCacheBuild(BeaconStateError), + ParticipationCacheError(ParticipationCacheError), ValidatorStatuses(BeaconStateError), + ProgressiveBalancesCacheCheckFailed(String), } impl From for Error { @@ -93,6 +101,18 @@ impl From for Error { } } +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Error::BeaconStateError(e) + } +} + +impl From for Error { + fn from(e: ParticipationCacheError) -> Self { + Error::ParticipationCacheError(e) + } +} + #[derive(Debug, Clone, Copy)] /// Controls how fork choice should behave when restoring from a persisted fork choice. pub enum ResetPayloadStatuses { @@ -645,7 +665,9 @@ where block_delay: Duration, state: &BeaconState, payload_verification_status: PayloadVerificationStatus, + progressive_balances_mode: ProgressiveBalancesMode, spec: &ChainSpec, + log: &Logger, ) -> Result<(), Error> { // If this block has already been processed we do not need to reprocess it. // We check this immediately in case re-processing the block mutates some property of the @@ -739,46 +761,84 @@ where parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 >= block_epoch }); - let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = - if let Some((parent_justified, parent_finalized)) = parent_checkpoints { - (parent_justified, parent_finalized) - } else { - let justification_and_finalization_state = match block { - // TODO(deneb): Ensure that the final specification - // does not substantially modify per epoch processing. - BeaconBlockRef::Deneb(_) - | BeaconBlockRef::Capella(_) - | BeaconBlockRef::Merge(_) - | BeaconBlockRef::Altair(_) => { - let participation_cache = - per_epoch_processing::altair::ParticipationCache::new(state, spec) - .map_err(Error::ParticipationCacheBuild)?; + let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if let Some(( + parent_justified, + parent_finalized, + )) = + parent_checkpoints + { + (parent_justified, parent_finalized) + } else { + let justification_and_finalization_state = match block { + BeaconBlockRef::Deneb(_)| BeaconBlockRef::Capella(_) + | BeaconBlockRef::Merge(_) + | BeaconBlockRef::Altair(_) => match progressive_balances_mode { + ProgressiveBalancesMode::Disabled => { + let participation_cache = ParticipationCache::new(state, spec) + .map_err(Error::ParticipationCacheBuild)?; per_epoch_processing::altair::process_justification_and_finalization( state, &participation_cache, )? } - BeaconBlockRef::Base(_) => { - let mut validator_statuses = - per_epoch_processing::base::ValidatorStatuses::new(state, spec) - .map_err(Error::ValidatorStatuses)?; - validator_statuses - .process_attestations(state) - .map_err(Error::ValidatorStatuses)?; - per_epoch_processing::base::process_justification_and_finalization( - state, - &validator_statuses.total_balances, - spec, - )? - } - }; + ProgressiveBalancesMode::Fast + | ProgressiveBalancesMode::Checked + | ProgressiveBalancesMode::Strict => { + let maybe_participation_cache = progressive_balances_mode + .perform_comparative_checks() + .then(|| { + ParticipationCache::new(state, spec) + .map_err(Error::ParticipationCacheBuild) + }) + .transpose()?; - ( - justification_and_finalization_state.current_justified_checkpoint(), - justification_and_finalization_state.finalized_checkpoint(), - ) + process_justification_and_finalization_from_progressive_cache::( + state, + maybe_participation_cache.as_ref(), + ) + .or_else(|e| { + if progressive_balances_mode != ProgressiveBalancesMode::Strict { + error!( + log, + "Processing with progressive balances cache failed"; + "info" => "falling back to the non-optimized processing method", + "error" => ?e, + ); + let participation_cache = maybe_participation_cache + .map(Ok) + .unwrap_or_else(|| ParticipationCache::new(state, spec)) + .map_err(Error::ParticipationCacheBuild)?; + per_epoch_processing::altair::process_justification_and_finalization( + state, + &participation_cache, + ).map_err(Error::from) + } else { + Err(e) + } + })? + } + }, + BeaconBlockRef::Base(_) => { + let mut validator_statuses = + per_epoch_processing::base::ValidatorStatuses::new(state, spec) + .map_err(Error::ValidatorStatuses)?; + validator_statuses + .process_attestations(state) + .map_err(Error::ValidatorStatuses)?; + per_epoch_processing::base::process_justification_and_finalization( + state, + &validator_statuses.total_balances, + spec, + )? + } }; + ( + justification_and_finalization_state.current_justified_checkpoint(), + justification_and_finalization_state.finalized_checkpoint(), + ) + }; + // Update best known unrealized justified & finalized checkpoints if unrealized_justified_checkpoint.epoch > self.fc_store.unrealized_justified_checkpoint().epoch @@ -1504,6 +1564,92 @@ where } } +/// Process justification and finalization using progressive cache. Also performs a comparative +/// check against the `ParticipationCache` if it is supplied. +/// +/// Returns an error if the cache is not initialized or if there is a mismatch on the comparative check. +fn process_justification_and_finalization_from_progressive_cache( + state: &BeaconState, + maybe_participation_cache: Option<&ParticipationCache>, +) -> Result, Error> +where + E: EthSpec, + T: ForkChoiceStore, +{ + let justification_and_finalization_state = JustificationAndFinalizationState::new(state); + if state.current_epoch() <= E::genesis_epoch() + 1 { + return Ok(justification_and_finalization_state); + } + + // Load cached balances + let progressive_balances_cache: &ProgressiveBalancesCache = state.progressive_balances_cache(); + let previous_target_balance = + progressive_balances_cache.previous_epoch_target_attesting_balance()?; + let current_target_balance = + progressive_balances_cache.current_epoch_target_attesting_balance()?; + let total_active_balance = state.get_total_active_balance()?; + + if let Some(participation_cache) = maybe_participation_cache { + check_progressive_balances::( + state, + participation_cache, + previous_target_balance, + current_target_balance, + total_active_balance, + )?; + } + + weigh_justification_and_finalization( + justification_and_finalization_state, + total_active_balance, + previous_target_balance, + current_target_balance, + ) + .map_err(Error::from) +} + +/// Perform comparative checks against `ParticipationCache`, will return error if there's a mismatch. +fn check_progressive_balances( + state: &BeaconState, + participation_cache: &ParticipationCache, + cached_previous_target_balance: u64, + cached_current_target_balance: u64, + cached_total_active_balance: u64, +) -> Result<(), Error> +where + E: EthSpec, + T: ForkChoiceStore, +{ + let slot = state.slot(); + let epoch = state.current_epoch(); + + // Check previous epoch target balances + let previous_target_balance = participation_cache.previous_epoch_target_attesting_balance()?; + if previous_target_balance != cached_previous_target_balance { + return Err(Error::ProgressiveBalancesCacheCheckFailed( + format!("Previous epoch target attesting balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, previous_target_balance, cached_previous_target_balance) + )); + } + + // Check current epoch target balances + let current_target_balance = participation_cache.current_epoch_target_attesting_balance()?; + if current_target_balance != cached_current_target_balance { + return Err(Error::ProgressiveBalancesCacheCheckFailed( + format!("Current epoch target attesting balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, current_target_balance, cached_current_target_balance) + )); + } + + // Check current epoch total balances + let total_active_balance = participation_cache.current_epoch_total_active_balance(); + if total_active_balance != cached_total_active_balance { + return Err(Error::ProgressiveBalancesCacheCheckFailed( + format!("Current epoch total active balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, total_active_balance, cached_total_active_balance) + )); + } + + Ok(()) +} + /// Helper struct that is used to encode/decode the state of the `ForkChoice` as SSZ bytes. /// /// This is used when persisting the state of the fork choice to disk. diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 9062cb550..06fad8727 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -17,12 +17,13 @@ use fork_choice::{ use store::MemoryStore; use types::{ test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, - Epoch, EthSpec, Hash256, IndexedAttestation, MainnetEthSpec, SignedBeaconBlock, Slot, SubnetId, + Epoch, EthSpec, ForkName, Hash256, IndexedAttestation, MainnetEthSpec, ProgressiveBalancesMode, + RelativeEpoch, SignedBeaconBlock, Slot, SubnetId, }; pub type E = MainnetEthSpec; -pub const VALIDATOR_COUNT: usize = 32; +pub const VALIDATOR_COUNT: usize = 64; /// Defines some delay between when an attestation is created and when it is mutated. pub enum MutationDelay { @@ -68,6 +69,24 @@ impl ForkChoiceTest { Self { harness } } + /// Creates a new tester with the specified `ProgressiveBalancesMode` and genesis from latest fork. + fn new_with_progressive_balances_mode(mode: ProgressiveBalancesMode) -> ForkChoiceTest { + // genesis with latest fork (at least altair required to test the cache) + let spec = ForkName::latest().make_genesis_spec(ChainSpec::default()); + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec) + .chain_config(ChainConfig { + progressive_balances_mode: mode, + ..ChainConfig::default() + }) + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + Self { harness } + } + /// Get a value from the `ForkChoice` instantiation. fn get(&self, func: T) -> U where @@ -212,6 +231,39 @@ impl ForkChoiceTest { self } + /// Slash a validator from the previous epoch committee. + pub async fn add_previous_epoch_attester_slashing(self) -> Self { + let state = self.harness.get_current_state(); + let previous_epoch_shuffling = state.get_shuffling(RelativeEpoch::Previous).unwrap(); + let validator_indices = previous_epoch_shuffling + .iter() + .map(|idx| *idx as u64) + .take(1) + .collect(); + + self.harness + .add_attester_slashing(validator_indices) + .unwrap(); + + self + } + + /// Slash the proposer of a block in the previous epoch. + pub async fn add_previous_epoch_proposer_slashing(self, slots_per_epoch: u64) -> Self { + let previous_epoch_slot = self.harness.get_current_slot() - slots_per_epoch; + let previous_epoch_block = self + .harness + .chain + .block_at_slot(previous_epoch_slot, WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let proposer_index: u64 = previous_epoch_block.message().proposer_index(); + + self.harness.add_proposer_slashing(proposer_index).unwrap(); + + self + } + /// Apply `count` blocks to the chain (without attestations). pub async fn apply_blocks_without_new_attestations(self, count: usize) -> Self { self.harness.advance_slot(); @@ -286,7 +338,9 @@ impl ForkChoiceTest { Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, + self.harness.chain.config.progressive_balances_mode, &self.harness.chain.spec, + self.harness.logger(), ) .unwrap(); self @@ -328,7 +382,9 @@ impl ForkChoiceTest { Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, + self.harness.chain.config.progressive_balances_mode, &self.harness.chain.spec, + self.harness.logger(), ) .err() .expect("on_block did not return an error"); @@ -1287,3 +1343,49 @@ async fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); } + +/// Checks that `ProgressiveBalancesCache` is updated correctly after an attester slashing event, +/// where the slashed validator is a target attester in previous / current epoch. +#[tokio::test] +async fn progressive_balances_cache_attester_slashing() { + ForkChoiceTest::new_with_progressive_balances_mode(ProgressiveBalancesMode::Strict) + // first two epochs + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await + .unwrap() + .add_previous_epoch_attester_slashing() + .await + // expect fork choice to import blocks successfully after a previous epoch attester is + // slashed, i.e. the slashed attester's balance is correctly excluded from + // the previous epoch total balance in `ProgressiveBalancesCache`. + .apply_blocks(1) + .await + // expect fork choice to import another epoch of blocks successfully - the slashed + // attester's balance should be excluded from the current epoch total balance in + // `ProgressiveBalancesCache` as well. + .apply_blocks(MainnetEthSpec::slots_per_epoch() as usize) + .await; +} + +/// Checks that `ProgressiveBalancesCache` is updated correctly after a proposer slashing event, +/// where the slashed validator is a target attester in previous / current epoch. +#[tokio::test] +async fn progressive_balances_cache_proposer_slashing() { + ForkChoiceTest::new_with_progressive_balances_mode(ProgressiveBalancesMode::Strict) + // first two epochs + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await + .unwrap() + .add_previous_epoch_proposer_slashing(MainnetEthSpec::slots_per_epoch()) + .await + // expect fork choice to import blocks successfully after a previous epoch proposer is + // slashed, i.e. the slashed proposer's balance is correctly excluded from + // the previous epoch total balance in `ProgressiveBalancesCache`. + .apply_blocks(1) + .await + // expect fork choice to import another epoch of blocks successfully - the slashed + // proposer's balance should be excluded from the current epoch total balance in + // `ProgressiveBalancesCache` as well. + .apply_blocks(MainnetEthSpec::slots_per_epoch() as usize) + .await; +} diff --git a/consensus/state_processing/src/common/mod.rs b/consensus/state_processing/src/common/mod.rs index 8a2e2439b..ffe8be3a0 100644 --- a/consensus/state_processing/src/common/mod.rs +++ b/consensus/state_processing/src/common/mod.rs @@ -7,6 +7,7 @@ mod slash_validator; pub mod altair; pub mod base; +pub mod update_progressive_balances_cache; pub use deposit_data_tree::DepositDataTree; pub use get_attestation_participation::get_attestation_participation_flag_indices; diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index 94148ff6c..d8b1c1a10 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -1,3 +1,4 @@ +use crate::common::update_progressive_balances_cache::update_progressive_balances_on_slashing; use crate::{ common::{decrease_balance, increase_balance, initiate_validator_exit}, per_block_processing::errors::BlockProcessingError, @@ -43,6 +44,8 @@ pub fn slash_validator( .safe_div(spec.min_slashing_penalty_quotient_for_state(state))?, )?; + update_progressive_balances_on_slashing(state, slashed_index)?; + // Apply proposer and whistleblower rewards let proposer_index = ctxt.get_proposer_index(state, spec)? as usize; let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index); diff --git a/consensus/state_processing/src/common/update_progressive_balances_cache.rs b/consensus/state_processing/src/common/update_progressive_balances_cache.rs new file mode 100644 index 000000000..45b5d657a --- /dev/null +++ b/consensus/state_processing/src/common/update_progressive_balances_cache.rs @@ -0,0 +1,142 @@ +/// A collection of all functions that mutates the `ProgressiveBalancesCache`. +use crate::metrics::{ + PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, + PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, +}; +use crate::per_epoch_processing::altair::ParticipationCache; +use crate::{BlockProcessingError, EpochProcessingError}; +use lighthouse_metrics::set_gauge; +use ssz_types::VariableList; +use std::borrow::Cow; +use types::consts::altair::TIMELY_TARGET_FLAG_INDEX; +use types::{ + is_progressive_balances_enabled, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, + ParticipationFlags, ProgressiveBalancesCache, +}; + +/// Initializes the `ProgressiveBalancesCache` cache using balance values from the +/// `ParticipationCache`. If the optional `&ParticipationCache` is not supplied, it will be computed +/// from the `BeaconState`. +pub fn initialize_progressive_balances_cache( + state: &mut BeaconState, + maybe_participation_cache: Option<&ParticipationCache>, + spec: &ChainSpec, +) -> Result<(), BeaconStateError> { + if !is_progressive_balances_enabled(state) + || state.progressive_balances_cache().is_initialized() + { + return Ok(()); + } + + let participation_cache = match maybe_participation_cache { + Some(cache) => Cow::Borrowed(cache), + None => Cow::Owned(ParticipationCache::new(state, spec)?), + }; + + let previous_epoch_target_attesting_balance = participation_cache + .previous_epoch_target_attesting_balance_raw() + .map_err(|e| BeaconStateError::ParticipationCacheError(format!("{e:?}")))?; + + let current_epoch_target_attesting_balance = participation_cache + .current_epoch_target_attesting_balance_raw() + .map_err(|e| BeaconStateError::ParticipationCacheError(format!("{e:?}")))?; + + let current_epoch = state.current_epoch(); + state.progressive_balances_cache_mut().initialize( + current_epoch, + previous_epoch_target_attesting_balance, + current_epoch_target_attesting_balance, + ); + + update_progressive_balances_metrics(state.progressive_balances_cache())?; + + Ok(()) +} + +/// Updates the `ProgressiveBalancesCache` when a new target attestation has been processed. +pub fn update_progressive_balances_on_attestation( + state: &mut BeaconState, + epoch: Epoch, + validator_index: usize, +) -> Result<(), BlockProcessingError> { + if is_progressive_balances_enabled(state) { + let validator = state.get_validator(validator_index)?; + if !validator.slashed { + let validator_effective_balance = validator.effective_balance; + state + .progressive_balances_cache_mut() + .on_new_target_attestation(epoch, validator_effective_balance)?; + } + } + Ok(()) +} + +/// Updates the `ProgressiveBalancesCache` when a target attester has been slashed. +pub fn update_progressive_balances_on_slashing( + state: &mut BeaconState, + validator_index: usize, +) -> Result<(), BlockProcessingError> { + if is_progressive_balances_enabled(state) { + let previous_epoch_participation = state.previous_epoch_participation()?; + let is_previous_epoch_target_attester = + is_target_attester_in_epoch::(previous_epoch_participation, validator_index)?; + + let current_epoch_participation = state.current_epoch_participation()?; + let is_current_epoch_target_attester = + is_target_attester_in_epoch::(current_epoch_participation, validator_index)?; + + let validator_effective_balance = state.get_effective_balance(validator_index)?; + + state.progressive_balances_cache_mut().on_slashing( + is_previous_epoch_target_attester, + is_current_epoch_target_attester, + validator_effective_balance, + )?; + } + + Ok(()) +} + +/// Updates the `ProgressiveBalancesCache` on epoch transition. +pub fn update_progressive_balances_on_epoch_transition( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), EpochProcessingError> { + if is_progressive_balances_enabled(state) { + state + .progressive_balances_cache_mut() + .on_epoch_transition(spec)?; + + update_progressive_balances_metrics(state.progressive_balances_cache())?; + } + + Ok(()) +} + +pub fn update_progressive_balances_metrics( + cache: &ProgressiveBalancesCache, +) -> Result<(), BeaconStateError> { + set_gauge( + &PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, + cache.previous_epoch_target_attesting_balance()? as i64, + ); + + set_gauge( + &PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, + cache.current_epoch_target_attesting_balance()? as i64, + ); + + Ok(()) +} + +fn is_target_attester_in_epoch( + epoch_participation: &VariableList, + validator_index: usize, +) -> Result { + let participation_flags = epoch_participation + .get(validator_index) + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; + participation_flags + .has_flag(TIMELY_TARGET_FLAG_INDEX) + .map_err(|e| e.into()) +} diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 244cdc588..284a7019f 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -111,7 +111,7 @@ pub fn initialize_beacon_state_from_eth1( } // Now that we have our validators, initialize the caches (including the committees) - state.build_all_caches(spec)?; + state.build_caches(spec)?; // Set genesis validators root for domain separation and chain versioning *state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache()?; @@ -134,7 +134,7 @@ pub fn process_activations( state: &mut BeaconState, spec: &ChainSpec, ) -> Result<(), Error> { - let (validators, balances) = state.validators_and_balances_mut(); + let (validators, balances, _) = state.validators_and_balances_and_progressive_balances_mut(); for (index, validator) in validators.iter_mut().enumerate() { let balance = balances .get(index) diff --git a/consensus/state_processing/src/metrics.rs b/consensus/state_processing/src/metrics.rs index ddfaae564..360b00767 100644 --- a/consensus/state_processing/src/metrics.rs +++ b/consensus/state_processing/src/metrics.rs @@ -23,4 +23,15 @@ lazy_static! { "beacon_participation_prev_epoch_active_gwei_total", "Total effective balance (gwei) of validators active in the previous epoch" ); + /* + * Participation Metrics (progressive balances) + */ + pub static ref PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: Result = try_create_int_gauge( + "beacon_participation_prev_epoch_target_attesting_gwei_progressive_total", + "Progressive total effective balance (gwei) of validators who attested to the target in the previous epoch" + ); + pub static ref PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: Result = try_create_int_gauge( + "beacon_participation_curr_epoch_target_attesting_gwei_progressive_total", + "Progressive total effective balance (gwei) of validators who attested to the target in the current epoch" + ); } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index aee43c906..b9a147a5a 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -42,6 +42,9 @@ mod verify_proposer_slashing; use crate::common::decrease_balance; use crate::StateProcessingStrategy; +use crate::common::update_progressive_balances_cache::{ + initialize_progressive_balances_cache, update_progressive_balances_metrics, +}; #[cfg(feature = "arbitrary-fuzz")] use arbitrary::Arbitrary; @@ -115,6 +118,8 @@ pub fn per_block_processing>( .fork_name(spec) .map_err(BlockProcessingError::InconsistentStateFork)?; + initialize_progressive_balances_cache(state, None, spec)?; + let verify_signatures = match block_signature_strategy { BlockSignatureStrategy::VerifyBulk => { // Verify all signatures in the block at once. @@ -183,6 +188,10 @@ pub fn per_block_processing>( )?; } + if is_progressive_balances_enabled(state) { + update_progressive_balances_metrics(state.progressive_balances_cache())?; + } + Ok(()) } diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index ab6db7e47..016460080 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -1,6 +1,8 @@ use super::signature_sets::Error as SignatureSetError; +use crate::per_epoch_processing::altair::participation_cache; use crate::ContextError; use merkle_proof::MerkleTreeError; +use participation_cache::Error as ParticipationCacheError; use safe_arith::ArithError; use ssz::DecodeError; use types::*; @@ -99,6 +101,7 @@ pub enum BlockProcessingError { length: usize, }, WithdrawalCredentialsInvalid, + ParticipationCacheError(ParticipationCacheError), } impl From for BlockProcessingError { @@ -156,6 +159,12 @@ impl From> for BlockProcessingError { } } +impl From for BlockProcessingError { + fn from(e: ParticipationCacheError) -> Self { + BlockProcessingError::ParticipationCacheError(e) + } +} + /// A conversion that consumes `self` and adds an `index` variable to resulting struct. /// /// Used here to allow converting an error into an upstream error that points to the object that diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 091f961db..4e60c4161 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -97,6 +97,8 @@ pub mod base { pub mod altair { use super::*; + use crate::common::update_progressive_balances_cache::update_progressive_balances_on_attestation; + use types::consts::altair::TIMELY_TARGET_FLAG_INDEX; pub fn process_attestations( state: &mut BeaconState, @@ -163,6 +165,14 @@ pub mod altair { get_base_reward(state, index, base_reward_per_increment, spec)? .safe_mul(weight)?, )?; + + if flag_index == TIMELY_TARGET_FLAG_INDEX { + update_progressive_balances_on_attestation( + state, + data.target.epoch, + index, + )?; + } } } } @@ -235,6 +245,7 @@ pub fn process_attester_slashings( Ok(()) } + /// Wrapper function to handle calling the correct version of `process_attestations` based on /// the fork. pub fn process_attestations>( diff --git a/consensus/state_processing/src/per_epoch_processing/altair.rs b/consensus/state_processing/src/per_epoch_processing/altair.rs index d5df2fc97..0abbd16a9 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair.rs @@ -1,4 +1,7 @@ use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error}; +use crate::common::update_progressive_balances_cache::{ + initialize_progressive_balances_cache, update_progressive_balances_on_epoch_transition, +}; use crate::per_epoch_processing::{ effective_balance_updates::process_effective_balance_updates, historical_roots_update::process_historical_roots_update, @@ -31,6 +34,7 @@ pub fn process_epoch( // Pre-compute participating indices and total balances. let participation_cache = ParticipationCache::new(state, spec)?; let sync_committee = state.current_sync_committee()?.clone(); + initialize_progressive_balances_cache::(state, Some(&participation_cache), spec)?; // Justification and finalization. let justification_and_finalization_state = @@ -56,7 +60,7 @@ pub fn process_epoch( process_eth1_data_reset(state)?; // Update effective balances with hysteresis (lag). - process_effective_balance_updates(state, spec)?; + process_effective_balance_updates(state, Some(&participation_cache), spec)?; // Reset slashings process_slashings_reset(state)?; @@ -75,6 +79,8 @@ pub fn process_epoch( // Rotate the epoch caches to suit the epoch transition. state.advance_caches(spec)?; + update_progressive_balances_on_epoch_transition(state, spec)?; + Ok(EpochProcessingSummary::Altair { participation_cache, sync_committee, diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs index 004726923..a5caddd04 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs @@ -11,49 +11,23 @@ //! Additionally, this cache is returned from the `altair::process_epoch` function and can be used //! to get useful summaries about the validator participation in an epoch. -use safe_arith::{ArithError, SafeArith}; use types::{ consts::altair::{ NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, }, - BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, RelativeEpoch, + Balance, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, + RelativeEpoch, }; -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub enum Error { InvalidFlagIndex(usize), InvalidValidatorIndex(usize), } -/// A balance which will never be below the specified `minimum`. -/// -/// This is an effort to ensure the `EFFECTIVE_BALANCE_INCREMENT` minimum is always respected. -#[derive(PartialEq, Debug, Clone, Copy)] -struct Balance { - raw: u64, - minimum: u64, -} - -impl Balance { - /// Initialize the balance to `0`, or the given `minimum`. - pub fn zero(minimum: u64) -> Self { - Self { raw: 0, minimum } - } - - /// Returns the balance with respect to the initialization `minimum`. - pub fn get(&self) -> u64 { - std::cmp::max(self.raw, self.minimum) - } - - /// Add-assign to the balance. - pub fn safe_add_assign(&mut self, other: u64) -> Result<(), ArithError> { - self.raw.safe_add_assign(other) - } -} - /// Caches the participation values for one epoch (either the previous or current). -#[derive(PartialEq, Debug)] +#[derive(PartialEq, Debug, Clone)] struct SingleEpochParticipationCache { /// Maps an active validator index to their participation flags. /// @@ -95,6 +69,14 @@ impl SingleEpochParticipationCache { .ok_or(Error::InvalidFlagIndex(flag_index)) } + /// Returns the raw total balance of attesters who have `flag_index` set. + fn total_flag_balance_raw(&self, flag_index: usize) -> Result { + self.total_flag_balances + .get(flag_index) + .copied() + .ok_or(Error::InvalidFlagIndex(flag_index)) + } + /// Returns `true` if `val_index` is active, unslashed and has `flag_index` set. /// /// ## Errors @@ -173,7 +155,7 @@ impl SingleEpochParticipationCache { } /// Maintains a cache to be used during `altair::process_epoch`. -#[derive(PartialEq, Debug)] +#[derive(PartialEq, Debug, Clone)] pub struct ParticipationCache { current_epoch: Epoch, /// Caches information about active validators pertaining to `self.current_epoch`. @@ -291,6 +273,11 @@ impl ParticipationCache { .total_flag_balance(TIMELY_TARGET_FLAG_INDEX) } + pub fn current_epoch_target_attesting_balance_raw(&self) -> Result { + self.current_epoch_participation + .total_flag_balance_raw(TIMELY_TARGET_FLAG_INDEX) + } + pub fn previous_epoch_total_active_balance(&self) -> u64 { self.previous_epoch_participation.total_active_balance.get() } @@ -300,6 +287,11 @@ impl ParticipationCache { .total_flag_balance(TIMELY_TARGET_FLAG_INDEX) } + pub fn previous_epoch_target_attesting_balance_raw(&self) -> Result { + self.previous_epoch_participation + .total_flag_balance_raw(TIMELY_TARGET_FLAG_INDEX) + } + pub fn previous_epoch_source_attesting_balance(&self) -> Result { self.previous_epoch_participation .total_flag_balance(TIMELY_SOURCE_FLAG_INDEX) diff --git a/consensus/state_processing/src/per_epoch_processing/base.rs b/consensus/state_processing/src/per_epoch_processing/base.rs index cb7e7d4b3..680563ce7 100644 --- a/consensus/state_processing/src/per_epoch_processing/base.rs +++ b/consensus/state_processing/src/per_epoch_processing/base.rs @@ -52,7 +52,7 @@ pub fn process_epoch( process_eth1_data_reset(state)?; // Update effective balances with hysteresis (lag). - process_effective_balance_updates(state, spec)?; + process_effective_balance_updates(state, None, spec)?; // Reset slashings process_slashings_reset(state)?; diff --git a/consensus/state_processing/src/per_epoch_processing/capella.rs b/consensus/state_processing/src/per_epoch_processing/capella.rs index aaf301f29..911510ed0 100644 --- a/consensus/state_processing/src/per_epoch_processing/capella.rs +++ b/consensus/state_processing/src/per_epoch_processing/capella.rs @@ -11,6 +11,9 @@ use crate::per_epoch_processing::{ }; use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; +use crate::common::update_progressive_balances_cache::{ + initialize_progressive_balances_cache, update_progressive_balances_on_epoch_transition, +}; pub use historical_summaries_update::process_historical_summaries_update; mod historical_summaries_update; @@ -27,6 +30,7 @@ pub fn process_epoch( // Pre-compute participating indices and total balances. let participation_cache = ParticipationCache::new(state, spec)?; let sync_committee = state.current_sync_committee()?.clone(); + initialize_progressive_balances_cache(state, Some(&participation_cache), spec)?; // Justification and finalization. let justification_and_finalization_state = @@ -52,7 +56,7 @@ pub fn process_epoch( process_eth1_data_reset(state)?; // Update effective balances with hysteresis (lag). - process_effective_balance_updates(state, spec)?; + process_effective_balance_updates(state, Some(&participation_cache), spec)?; // Reset slashings process_slashings_reset(state)?; @@ -71,6 +75,8 @@ pub fn process_epoch( // Rotate the epoch caches to suit the epoch transition. state.advance_caches(spec)?; + update_progressive_balances_on_epoch_transition(state, spec)?; + Ok(EpochProcessingSummary::Altair { participation_cache, sync_committee, diff --git a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs index c166667b5..1759f7e14 100644 --- a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs @@ -1,11 +1,13 @@ use super::errors::EpochProcessingError; +use crate::per_epoch_processing::altair::ParticipationCache; use safe_arith::SafeArith; use types::beacon_state::BeaconState; use types::chain_spec::ChainSpec; -use types::{BeaconStateError, EthSpec}; +use types::{BeaconStateError, EthSpec, ProgressiveBalancesCache}; pub fn process_effective_balance_updates( state: &mut BeaconState, + maybe_participation_cache: Option<&ParticipationCache>, spec: &ChainSpec, ) -> Result<(), EpochProcessingError> { let hysteresis_increment = spec @@ -13,7 +15,8 @@ pub fn process_effective_balance_updates( .safe_div(spec.hysteresis_quotient)?; let downward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_downward_multiplier)?; let upward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_upward_multiplier)?; - let (validators, balances) = state.validators_and_balances_mut(); + let (validators, balances, progressive_balances_cache) = + state.validators_and_balances_and_progressive_balances_mut(); for (index, validator) in validators.iter_mut().enumerate() { let balance = balances .get(index) @@ -23,11 +26,43 @@ pub fn process_effective_balance_updates( if balance.safe_add(downward_threshold)? < validator.effective_balance || validator.effective_balance.safe_add(upward_threshold)? < balance { - validator.effective_balance = std::cmp::min( + let old_effective_balance = validator.effective_balance; + let new_effective_balance = std::cmp::min( balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, spec.max_effective_balance, ); + + if let Some(participation_cache) = maybe_participation_cache { + update_progressive_balances( + participation_cache, + progressive_balances_cache, + index, + old_effective_balance, + new_effective_balance, + )?; + } + + validator.effective_balance = new_effective_balance; } } Ok(()) } + +fn update_progressive_balances( + participation_cache: &ParticipationCache, + progressive_balances_cache: &mut ProgressiveBalancesCache, + index: usize, + old_effective_balance: u64, + new_effective_balance: u64, +) -> Result<(), EpochProcessingError> { + if old_effective_balance != new_effective_balance { + let is_current_epoch_target_attester = + participation_cache.is_current_epoch_timely_target_attester(index)?; + progressive_balances_cache.on_effective_balance_change( + is_current_epoch_target_attester, + old_effective_balance, + new_effective_balance, + )?; + } + Ok(()) +} diff --git a/consensus/state_processing/src/per_epoch_processing/slashings.rs b/consensus/state_processing/src/per_epoch_processing/slashings.rs index 6d5342cd3..2d595491c 100644 --- a/consensus/state_processing/src/per_epoch_processing/slashings.rs +++ b/consensus/state_processing/src/per_epoch_processing/slashings.rs @@ -16,7 +16,7 @@ pub fn process_slashings( total_balance, ); - let (validators, balances) = state.validators_and_balances_mut(); + let (validators, balances, _) = state.validators_and_balances_and_progressive_balances_mut(); for (index, validator) in validators.iter().enumerate() { if validator.slashed && epoch.safe_add(T::EpochsPerSlashingsVector::to_u64().safe_div(2)?)? diff --git a/consensus/state_processing/src/upgrade/altair.rs b/consensus/state_processing/src/upgrade/altair.rs index 176f1af15..26b1192bc 100644 --- a/consensus/state_processing/src/upgrade/altair.rs +++ b/consensus/state_processing/src/upgrade/altair.rs @@ -1,3 +1,4 @@ +use crate::common::update_progressive_balances_cache::initialize_progressive_balances_cache; use crate::common::{get_attestation_participation_flag_indices, get_attesting_indices}; use std::mem; use std::sync::Arc; @@ -101,6 +102,7 @@ pub fn upgrade_to_altair( next_sync_committee: temp_sync_committee, // not read // Caches total_active_balance: pre.total_active_balance, + progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), committee_caches: mem::take(&mut pre.committee_caches), pubkey_cache: mem::take(&mut pre.pubkey_cache), exit_cache: mem::take(&mut pre.exit_cache), @@ -110,6 +112,8 @@ pub fn upgrade_to_altair( // Fill in previous epoch participation from the pre state's pending attestations. translate_participation(&mut post, &pre.previous_epoch_attestations, spec)?; + initialize_progressive_balances_cache(&mut post, None, spec)?; + // Fill in sync committees // Note: A duplicate committee is assigned for the current and next committee at the fork // boundary diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index 3b933fac3..5153e35f4 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -62,6 +62,7 @@ pub fn upgrade_to_capella( historical_summaries: VariableList::default(), // Caches total_active_balance: pre.total_active_balance, + progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), committee_caches: mem::take(&mut pre.committee_caches), pubkey_cache: mem::take(&mut pre.pubkey_cache), exit_cache: mem::take(&mut pre.exit_cache), diff --git a/consensus/state_processing/src/upgrade/merge.rs b/consensus/state_processing/src/upgrade/merge.rs index c17246624..eb7445010 100644 --- a/consensus/state_processing/src/upgrade/merge.rs +++ b/consensus/state_processing/src/upgrade/merge.rs @@ -60,6 +60,7 @@ pub fn upgrade_to_bellatrix( latest_execution_payload_header: >::default(), // Caches total_active_balance: pre.total_active_balance, + progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), committee_caches: mem::take(&mut pre.committee_caches), pubkey_cache: mem::take(&mut pre.pubkey_cache), exit_cache: mem::take(&mut pre.exit_cache), diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 8db70dc85..883d5fe05 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -53,6 +53,7 @@ serde_json = "1.0.74" smallvec = "1.8.0" serde_with = "1.13.0" maplit = "1.0.2" +strum = { version = "0.24.0", features = ["derive"] } [dev-dependencies] criterion = "0.3.3" diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index 28f57e708..bb2b52710 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -51,7 +51,7 @@ fn all_benches(c: &mut Criterion) { let spec = Arc::new(MainnetEthSpec::default_spec()); let mut state = get_state::(validator_count); - state.build_all_caches(&spec).expect("should build caches"); + state.build_caches(&spec).expect("should build caches"); let state_bytes = state.as_ssz_bytes(); let inner_state = state.clone(); diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index b88821715..61400a8b4 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -98,7 +98,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, } } -impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { +impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, Payload> { /// Get the fork_name of this object pub fn fork_name(self) -> ForkName { match self { diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index ca333048a..b09f2aa50 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -26,6 +26,8 @@ pub use self::committee_cache::{ compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, CommitteeCache, }; +pub use crate::beacon_state::balance::Balance; +pub use crate::beacon_state::progressive_balances_cache::*; use crate::historical_summary::HistoricalSummary; pub use clone_config::CloneConfig; pub use eth_spec::*; @@ -34,9 +36,11 @@ pub use tree_hash_cache::BeaconTreeHashCache; #[macro_use] mod committee_cache; +mod balance; mod clone_config; mod exit_cache; mod iter; +mod progressive_balances_cache; mod pubkey_cache; mod tests; mod tree_hash_cache; @@ -101,6 +105,9 @@ pub enum Error { SszTypesError(ssz_types::Error), TreeHashCacheNotInitialized, NonLinearTreeHashCacheHistory, + ParticipationCacheError(String), + ProgressiveBalancesCacheNotInitialized, + ProgressiveBalancesCacheInconsistent, TreeHashCacheSkippedSlot { cache: Slot, state: Slot, @@ -322,6 +329,12 @@ where #[tree_hash(skip_hashing)] #[test_random(default)] #[derivative(Clone(clone_with = "clone_default"))] + pub progressive_balances_cache: ProgressiveBalancesCache, + #[serde(skip_serializing, skip_deserializing)] + #[ssz(skip_serializing, skip_deserializing)] + #[tree_hash(skip_hashing)] + #[test_random(default)] + #[derivative(Clone(clone_with = "clone_default"))] pub committee_caches: [CommitteeCache; CACHED_EPOCHS], #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] @@ -398,6 +411,7 @@ impl BeaconState { // Caching (not in spec) total_active_balance: None, + progressive_balances_cache: <_>::default(), committee_caches: [ CommitteeCache::default(), CommitteeCache::default(), @@ -776,7 +790,7 @@ impl BeaconState { Ok(signature_hash_int.safe_rem(modulo)? == 0) } - /// Returns the beacon proposer index for the `slot` in the given `relative_epoch`. + /// Returns the beacon proposer index for the `slot` in `self.current_epoch()`. /// /// Spec v0.12.1 pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result { @@ -1169,13 +1183,35 @@ impl BeaconState { } /// Convenience accessor for validators and balances simultaneously. - pub fn validators_and_balances_mut(&mut self) -> (&mut [Validator], &mut [u64]) { + pub fn validators_and_balances_and_progressive_balances_mut( + &mut self, + ) -> (&mut [Validator], &mut [u64], &mut ProgressiveBalancesCache) { match self { - BeaconState::Base(state) => (&mut state.validators, &mut state.balances), - BeaconState::Altair(state) => (&mut state.validators, &mut state.balances), - BeaconState::Merge(state) => (&mut state.validators, &mut state.balances), - BeaconState::Capella(state) => (&mut state.validators, &mut state.balances), - BeaconState::Deneb(state) => (&mut state.validators, &mut state.balances), + BeaconState::Base(state) => ( + &mut state.validators, + &mut state.balances, + &mut state.progressive_balances_cache, + ), + BeaconState::Altair(state) => ( + &mut state.validators, + &mut state.balances, + &mut state.progressive_balances_cache, + ), + BeaconState::Merge(state) => ( + &mut state.validators, + &mut state.balances, + &mut state.progressive_balances_cache, + ), + BeaconState::Capella(state) => ( + &mut state.validators, + &mut state.balances, + &mut state.progressive_balances_cache, + ), + BeaconState::Deneb(state) => ( + &mut state.validators, + &mut state.balances, + &mut state.progressive_balances_cache, + ), } } @@ -1402,7 +1438,7 @@ impl BeaconState { } /// Build all caches (except the tree hash cache), if they need to be built. - pub fn build_all_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { self.build_all_committee_caches(spec)?; self.update_pubkey_cache()?; self.build_exit_cache(spec)?; @@ -1434,6 +1470,7 @@ impl BeaconState { self.drop_committee_cache(RelativeEpoch::Next)?; self.drop_pubkey_cache(); self.drop_tree_hash_cache(); + self.drop_progressive_balances_cache(); *self.exit_cache_mut() = ExitCache::default(); Ok(()) } @@ -1630,6 +1667,11 @@ impl BeaconState { *self.pubkey_cache_mut() = PubkeyCache::default() } + /// Completely drops the `progressive_balances_cache` cache, replacing it with a new, empty cache. + fn drop_progressive_balances_cache(&mut self) { + *self.progressive_balances_cache_mut() = ProgressiveBalancesCache::default(); + } + /// Initialize but don't fill the tree hash cache, if it isn't already initialized. pub fn initialize_tree_hash_cache(&mut self) { if !self.tree_hash_cache().is_initialized() { @@ -1702,6 +1744,9 @@ impl BeaconState { if config.tree_hash_cache { *res.tree_hash_cache_mut() = self.tree_hash_cache().clone(); } + if config.progressive_balances_cache { + *res.progressive_balances_cache_mut() = self.progressive_balances_cache().clone(); + } res } diff --git a/consensus/types/src/beacon_state/balance.rs b/consensus/types/src/beacon_state/balance.rs new file mode 100644 index 000000000..e537a5b98 --- /dev/null +++ b/consensus/types/src/beacon_state/balance.rs @@ -0,0 +1,33 @@ +use arbitrary::Arbitrary; +use safe_arith::{ArithError, SafeArith}; + +/// A balance which will never be below the specified `minimum`. +/// +/// This is an effort to ensure the `EFFECTIVE_BALANCE_INCREMENT` minimum is always respected. +#[derive(PartialEq, Debug, Clone, Copy, Arbitrary)] +pub struct Balance { + raw: u64, + minimum: u64, +} + +impl Balance { + /// Initialize the balance to `0`, or the given `minimum`. + pub fn zero(minimum: u64) -> Self { + Self { raw: 0, minimum } + } + + /// Returns the balance with respect to the initialization `minimum`. + pub fn get(&self) -> u64 { + std::cmp::max(self.raw, self.minimum) + } + + /// Add-assign to the balance. + pub fn safe_add_assign(&mut self, other: u64) -> Result<(), ArithError> { + self.raw.safe_add_assign(other) + } + + /// Sub-assign to the balance. + pub fn safe_sub_assign(&mut self, other: u64) -> Result<(), ArithError> { + self.raw.safe_sub_assign(other) + } +} diff --git a/consensus/types/src/beacon_state/clone_config.rs b/consensus/types/src/beacon_state/clone_config.rs index e5f050aee..c6e7f4742 100644 --- a/consensus/types/src/beacon_state/clone_config.rs +++ b/consensus/types/src/beacon_state/clone_config.rs @@ -5,6 +5,7 @@ pub struct CloneConfig { pub pubkey_cache: bool, pub exit_cache: bool, pub tree_hash_cache: bool, + pub progressive_balances_cache: bool, } impl CloneConfig { @@ -14,6 +15,7 @@ impl CloneConfig { pubkey_cache: true, exit_cache: true, tree_hash_cache: true, + progressive_balances_cache: true, } } diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/beacon_state/progressive_balances_cache.rs new file mode 100644 index 000000000..9f5c223d5 --- /dev/null +++ b/consensus/types/src/beacon_state/progressive_balances_cache.rs @@ -0,0 +1,184 @@ +use crate::beacon_state::balance::Balance; +use crate::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec}; +use arbitrary::Arbitrary; +use safe_arith::SafeArith; +use serde_derive::{Deserialize, Serialize}; +use strum::{Display, EnumString, EnumVariantNames}; + +/// This cache keeps track of the accumulated target attestation balance for the current & previous +/// epochs. The cached values can be utilised by fork choice to calculate unrealized justification +/// and finalization instead of converting epoch participation arrays to balances for each block we +/// process. +#[derive(Default, Debug, PartialEq, Arbitrary, Clone)] +pub struct ProgressiveBalancesCache { + inner: Option, +} + +#[derive(Debug, PartialEq, Arbitrary, Clone)] +struct Inner { + pub current_epoch: Epoch, + pub previous_epoch_target_attesting_balance: Balance, + pub current_epoch_target_attesting_balance: Balance, +} + +impl ProgressiveBalancesCache { + pub fn initialize( + &mut self, + current_epoch: Epoch, + previous_epoch_target_attesting_balance: Balance, + current_epoch_target_attesting_balance: Balance, + ) { + self.inner = Some(Inner { + current_epoch, + previous_epoch_target_attesting_balance, + current_epoch_target_attesting_balance, + }); + } + + pub fn is_initialized(&self) -> bool { + self.inner.is_some() + } + + /// When a new target attestation has been processed, we update the cached + /// `current_epoch_target_attesting_balance` to include the validator effective balance. + /// If the epoch is neither the current epoch nor the previous epoch, an error is returned. + pub fn on_new_target_attestation( + &mut self, + epoch: Epoch, + validator_effective_balance: u64, + ) -> Result<(), BeaconStateError> { + let cache = self.get_inner_mut()?; + + if epoch == cache.current_epoch { + cache + .current_epoch_target_attesting_balance + .safe_add_assign(validator_effective_balance)?; + } else if epoch.safe_add(1)? == cache.current_epoch { + cache + .previous_epoch_target_attesting_balance + .safe_add_assign(validator_effective_balance)?; + } else { + return Err(BeaconStateError::ProgressiveBalancesCacheInconsistent); + } + + Ok(()) + } + + /// When a validator is slashed, we reduce the `current_epoch_target_attesting_balance` by the + /// validator's effective balance to exclude the validator weight. + pub fn on_slashing( + &mut self, + is_previous_epoch_target_attester: bool, + is_current_epoch_target_attester: bool, + effective_balance: u64, + ) -> Result<(), BeaconStateError> { + let cache = self.get_inner_mut()?; + if is_previous_epoch_target_attester { + cache + .previous_epoch_target_attesting_balance + .safe_sub_assign(effective_balance)?; + } + if is_current_epoch_target_attester { + cache + .current_epoch_target_attesting_balance + .safe_sub_assign(effective_balance)?; + } + Ok(()) + } + + /// When a current epoch target attester has its effective balance changed, we adjust the + /// its share of the target attesting balance in the cache. + pub fn on_effective_balance_change( + &mut self, + is_current_epoch_target_attester: bool, + old_effective_balance: u64, + new_effective_balance: u64, + ) -> Result<(), BeaconStateError> { + let cache = self.get_inner_mut()?; + if is_current_epoch_target_attester { + if new_effective_balance > old_effective_balance { + cache + .current_epoch_target_attesting_balance + .safe_add_assign(new_effective_balance.safe_sub(old_effective_balance)?)?; + } else { + cache + .current_epoch_target_attesting_balance + .safe_sub_assign(old_effective_balance.safe_sub(new_effective_balance)?)?; + } + } + Ok(()) + } + + /// On epoch transition, the balance from current epoch is shifted to previous epoch, and the + /// current epoch balance is reset to 0. + pub fn on_epoch_transition(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { + let cache = self.get_inner_mut()?; + cache.current_epoch.safe_add_assign(1)?; + cache.previous_epoch_target_attesting_balance = + cache.current_epoch_target_attesting_balance; + cache.current_epoch_target_attesting_balance = + Balance::zero(spec.effective_balance_increment); + Ok(()) + } + + pub fn previous_epoch_target_attesting_balance(&self) -> Result { + Ok(self + .get_inner()? + .previous_epoch_target_attesting_balance + .get()) + } + + pub fn current_epoch_target_attesting_balance(&self) -> Result { + Ok(self + .get_inner()? + .current_epoch_target_attesting_balance + .get()) + } + + fn get_inner_mut(&mut self) -> Result<&mut Inner, BeaconStateError> { + self.inner + .as_mut() + .ok_or(BeaconStateError::ProgressiveBalancesCacheNotInitialized) + } + + fn get_inner(&self) -> Result<&Inner, BeaconStateError> { + self.inner + .as_ref() + .ok_or(BeaconStateError::ProgressiveBalancesCacheNotInitialized) + } +} + +#[derive( + Debug, PartialEq, Eq, Clone, Copy, Deserialize, Serialize, Display, EnumString, EnumVariantNames, +)] +#[strum(serialize_all = "lowercase")] +pub enum ProgressiveBalancesMode { + /// Disable the usage of progressive cache, and use the existing `ParticipationCache` calculation. + Disabled, + /// Enable the usage of progressive cache, with checks against the `ParticipationCache` and falls + /// back to the existing calculation if there is a balance mismatch. + Checked, + /// Enable the usage of progressive cache, with checks against the `ParticipationCache`. Errors + /// if there is a balance mismatch. Used in testing only. + Strict, + /// Enable the usage of progressive cache, with no comparative checks against the + /// `ParticipationCache`. This is fast but an experimental mode, use with caution. + Fast, +} + +impl ProgressiveBalancesMode { + pub fn perform_comparative_checks(&self) -> bool { + match self { + Self::Disabled | Self::Fast => false, + Self::Checked | Self::Strict => true, + } + } +} + +/// `ProgressiveBalancesCache` is only enabled from `Altair` as it requires `ParticipationCache`. +pub fn is_progressive_balances_enabled(state: &BeaconState) -> bool { + match state { + BeaconState::Base(_) => false, + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => true, + } +} diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index d63eaafc4..6cd9c1dbf 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -219,17 +219,18 @@ async fn clone_config() { let mut state = build_state::(16).await; - state.build_all_caches(&spec).unwrap(); + state.build_caches(&spec).unwrap(); state .update_tree_hash_cache() .expect("should update tree hash cache"); - let num_caches = 4; + let num_caches = 5; let all_configs = (0..2u8.pow(num_caches)).map(|i| CloneConfig { committee_caches: (i & 1) != 0, pubkey_cache: ((i >> 1) & 1) != 0, exit_cache: ((i >> 2) & 1) != 0, tree_hash_cache: ((i >> 3) & 1) != 0, + progressive_balances_cache: ((i >> 4) & 1) != 0, }); for config in all_configs { diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 185da02d0..3e0754240 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "4.2.0" +version = "4.3.0" authors = ["Paul Hauner "] edition = "2021" diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index eaf963a8c..3c5e0fe2b 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -329,7 +329,7 @@ fn initialize_state_with_validators( } // Now that we have our validators, initialize the caches (including the committees) - state.build_all_caches(spec).unwrap(); + state.build_caches(spec).unwrap(); // Set genesis validators root for domain separation and chain versioning *state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache().unwrap(); diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 49d1dd424..e3b2a5acb 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -109,7 +109,7 @@ pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), let target_slot = initial_slot + slots; state - .build_all_caches(spec) + .build_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; let state_root = if let Some(root) = cli_state_root.or(state_root) { diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index cf971c69f..34a456076 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -205,7 +205,7 @@ pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), if config.exclude_cache_builds { pre_state - .build_all_caches(spec) + .build_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; let state_root = pre_state .update_tree_hash_cache() @@ -303,7 +303,7 @@ fn do_transition( if !config.exclude_cache_builds { let t = Instant::now(); pre_state - .build_all_caches(spec) + .build_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; debug!("Build caches: {:?}", t.elapsed()); @@ -335,7 +335,7 @@ fn do_transition( let t = Instant::now(); pre_state - .build_all_caches(spec) + .build_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; debug!("Build all caches (again): {:?}", t.elapsed()); diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index ae79972e9..050ea2841 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "4.2.0" +version = "4.3.0" authors = ["Sigma Prime "] edition = "2021" autotests = false diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 84bc2966c..bc5b69054 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -16,7 +16,10 @@ use std::str::FromStr; use std::string::ToString; use std::time::Duration; use tempfile::TempDir; -use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec}; +use types::{ + Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec, + ProgressiveBalancesMode, +}; use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -1125,48 +1128,13 @@ fn default_backfill_rate_limiting_flag() { } #[test] fn default_boot_nodes() { - let mainnet = vec![ - // Lighthouse Team (Sigma Prime) - "enr:-Jq4QItoFUuug_n_qbYbU0OY04-np2wT8rUCauOOXNi0H3BWbDj-zbfZb7otA7jZ6flbBpx1LNZK2TDebZ9dEKx84LYBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISsaa0ZiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMo", - "enr:-Jq4QN_YBsUOqQsty1OGvYv48PMaiEt1AzGD1NkYQHaxZoTyVGqMYXg0K9c0LPNWC9pkXmggApp8nygYLsQwScwAgfgBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISLosQxiXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMo", - // EF Team - "enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg", - "enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg", - "enr:-Ku4QPn5eVhcoF1opaFEvg1b6JNFD2rqVkHQ8HApOKK61OIcIXD127bKWgAtbwI7pnxx6cDyk_nI88TrZKQaGMZj0q0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDayLMaJc2VjcDI1NmsxoQK2sBOLGcUb4AwuYzFuAVCaNHA-dy24UuEKkeFNgCVCsIN1ZHCCIyg", - "enr:-Ku4QEWzdnVtXc2Q0ZVigfCGggOVB2Vc1ZCPEc6j21NIFLODSJbvNaef1g4PxhPwl_3kax86YPheFUSLXPRs98vvYsoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDZBrP2Jc2VjcDI1NmsxoQM6jr8Rb1ktLEsVcKAPa08wCsKUmvoQ8khiOl_SLozf9IN1ZHCCIyg", - // Teku team (Consensys) - "enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2Gxb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNlY3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA", - "enr:-KG4QDyytgmE4f7AnvW-ZaUOIi9i79qX4JwjRAiXBZCU65wOfBu-3Nb5I7b_Rmg3KCOcZM_C3y5pg7EBU5XGrcLTduQEhGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQ2_DUbiXNlY3AyNTZrMaEDKnz_-ps3UUOfHWVYaskI5kWYO_vtYMGYCQRAR3gHDouDdGNwgiMog3VkcIIjKA", - // Prysm team (Prysmatic Labs) - "enr:-Ku4QImhMc1z8yCiNJ1TyUxdcfNucje3BGwEHzodEZUan8PherEo4sF7pPHPSIB1NNuSg5fZy7qFsjmUKs2ea1Whi0EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQOVphkDqal4QzPMksc5wnpuC3gvSC8AfbFOnZY_On34wIN1ZHCCIyg", - "enr:-Ku4QP2xDnEtUXIjzJ_DhlCRN9SN99RYQPJL92TMlSv7U5C1YnYLjwOQHgZIUXw6c-BvRg2Yc2QsZxxoS_pPRVe0yK8Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMeFF5GrS7UZpAH2Ly84aLK-TyvH-dRo0JM1i8yygH50YN1ZHCCJxA", - "enr:-Ku4QPp9z1W4tAO8Ber_NQierYaOStqhDqQdOPY3bB3jDgkjcbk6YrEnVYIiCBbTxuar3CzS528d2iE7TdJsrL-dEKoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMw5fqqkw2hHC4F5HZZDPsNmPdB1Gi8JPQK7pRc9XHh-oN1ZHCCKvg", - // Nimbus team - "enr:-LK4QA8FfhaAjlb_BXsXxSfiysR7R52Nhi9JBt4F8SPssu8hdE1BXQQEtVDC3qStCW60LSO7hEsVHv5zm8_6Vnjhcn0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAN4aBKJc2VjcDI1NmsxoQJerDhsJ-KxZ8sHySMOCmTO6sHM3iCFQ6VMvLTe948MyYN0Y3CCI4yDdWRwgiOM", - "enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM" - ]; + let number_of_boot_nodes = 15; CommandLineTest::new() .run_with_zero_port() .with_config(|config| { // Lighthouse Team (Sigma Prime) - assert_eq!(config.network.boot_nodes_enr[0].to_base64(), mainnet[0]); - assert_eq!(config.network.boot_nodes_enr[1].to_base64(), mainnet[1]); - // EF Team - assert_eq!(config.network.boot_nodes_enr[2].to_base64(), mainnet[2]); - assert_eq!(config.network.boot_nodes_enr[3].to_base64(), mainnet[3]); - assert_eq!(config.network.boot_nodes_enr[4].to_base64(), mainnet[4]); - assert_eq!(config.network.boot_nodes_enr[5].to_base64(), mainnet[5]); - // Teku team (Consensys) - assert_eq!(config.network.boot_nodes_enr[6].to_base64(), mainnet[6]); - assert_eq!(config.network.boot_nodes_enr[7].to_base64(), mainnet[7]); - // Prysm team (Prysmatic Labs) - assert_eq!(config.network.boot_nodes_enr[8].to_base64(), mainnet[8]); - assert_eq!(config.network.boot_nodes_enr[9].to_base64(), mainnet[9]); - assert_eq!(config.network.boot_nodes_enr[10].to_base64(), mainnet[10]); - // Nimbus team - assert_eq!(config.network.boot_nodes_enr[11].to_base64(), mainnet[11]); - assert_eq!(config.network.boot_nodes_enr[12].to_base64(), mainnet[12]); + assert_eq!(config.network.boot_nodes_enr.len(), number_of_boot_nodes); }); } #[test] @@ -2323,3 +2291,28 @@ fn invalid_gossip_verified_blocks_path() { ) }); } + +#[test] +fn progressive_balances_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.progressive_balances_mode, + ProgressiveBalancesMode::Checked + ) + }); +} + +#[test] +fn progressive_balances_fast() { + CommandLineTest::new() + .flag("progressive-balances", Some("fast")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.progressive_balances_mode, + ProgressiveBalancesMode::Fast + ) + }); +} diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 34c4a74a9..5f2b8403f 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -6,9 +6,9 @@ use crate::type_name; use crate::type_name::TypeName; use serde_derive::Deserialize; use state_processing::per_epoch_processing::capella::process_historical_summaries_update; +use state_processing::per_epoch_processing::effective_balance_updates::process_effective_balance_updates; use state_processing::per_epoch_processing::{ altair, base, - effective_balance_updates::process_effective_balance_updates, historical_roots_update::process_historical_roots_update, process_registry_updates, process_slashings, resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, @@ -180,7 +180,7 @@ impl EpochTransition for Eth1DataReset { impl EpochTransition for EffectiveBalanceUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - process_effective_balance_updates(state, spec) + process_effective_balance_updates(state, None, spec) } } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index e4cf9f1ee..8c4a90044 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -18,7 +18,8 @@ use std::sync::Arc; use std::time::Duration; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, Checkpoint, EthSpec, - ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, + ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, ProgressiveBalancesMode, + SignedBeaconBlock, Slot, Uint256, }; #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] @@ -382,6 +383,7 @@ impl Tester { block_root, block.clone(), NotifyExecutionLayer::Yes, + || Ok(()), ))?; if result.is_ok() != valid { return Err(Error::DidntFail(format!( @@ -439,7 +441,9 @@ impl Tester { block_delay, &state, PayloadVerificationStatus::Irrelevant, + ProgressiveBalancesMode::Strict, &self.harness.chain.spec, + self.harness.logger(), ); if result.is_ok() { diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 002558d6b..59aa51228 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -4,6 +4,7 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; use serde_derive::Deserialize; +use state_processing::common::update_progressive_balances_cache::initialize_progressive_balances_cache; use ssz::Decode; use state_processing::{ per_block_processing::{ @@ -97,10 +98,8 @@ impl Operation for Attestation { &mut ctxt, spec, ), - BeaconState::Altair(_) - | BeaconState::Merge(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_)| BeaconState::Deneb(_) => { + initialize_progressive_balances_cache(state, None, spec)?; altair::process_attestation(state, self, 0, &mut ctxt, VerifySignatures::True, spec) } } @@ -123,6 +122,7 @@ impl Operation for AttesterSlashing { _: &Operations, ) -> Result<(), BlockProcessingError> { let mut ctxt = ConsensusContext::new(state.slot()); + initialize_progressive_balances_cache(state, None, spec)?; process_attester_slashings( state, &[self.clone()], @@ -173,6 +173,7 @@ impl Operation for ProposerSlashing { _: &Operations, ) -> Result<(), BlockProcessingError> { let mut ctxt = ConsensusContext::new(state.slot()); + initialize_progressive_balances_cache(state, None, spec)?; process_proposer_slashings( state, &[self.clone()], diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index e51fed190..191b45c33 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -67,7 +67,7 @@ impl Case for SanityBlocks { let spec = &testing_spec::(fork_name); // Processing requires the epoch cache. - bulk_state.build_all_caches(spec).unwrap(); + bulk_state.build_caches(spec).unwrap(); // Spawning a second state to call the VerifyIndiviual strategy to avoid bitrot. // See https://github.com/sigp/lighthouse/issues/742. diff --git a/testing/ef_tests/src/cases/sanity_slots.rs b/testing/ef_tests/src/cases/sanity_slots.rs index a38a8930a..dd385d13f 100644 --- a/testing/ef_tests/src/cases/sanity_slots.rs +++ b/testing/ef_tests/src/cases/sanity_slots.rs @@ -61,7 +61,7 @@ impl Case for SanitySlots { let spec = &testing_spec::(fork_name); // Processing requires the epoch cache. - state.build_all_caches(spec).unwrap(); + state.build_caches(spec).unwrap(); let mut result = (0..self.slots) .try_for_each(|_| per_slot_processing(&mut state, None, spec).map(|_| ()))