From ffbf70e2d9fb1fabc5768453d19878b05e69405d Mon Sep 17 00:00:00 2001 From: Divma Date: Fri, 16 Dec 2022 04:04:00 +0000 Subject: [PATCH 01/14] Clippy lints for rust 1.66 (#3810) ## Issue Addressed Fixes the new clippy lints for rust 1.66 ## Proposed Changes Most of the changes come from: - [unnecessary_cast](https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_cast) - [iter_kv_map](https://rust-lang.github.io/rust-clippy/master/index.html#iter_kv_map) - [needless_borrow](https://rust-lang.github.io/rust-clippy/master/index.html#needless_borrow) ## Additional Info na --- account_manager/src/wallet/list.rs | 2 +- .../src/naive_aggregation_pool.rs | 8 ++------ beacon_node/beacon_chain/src/test_utils.rs | 2 +- .../beacon_chain/src/validator_monitor.rs | 5 +---- beacon_node/eth1/src/deposit_cache.rs | 6 +++--- .../execution_layer/src/engine_api/auth.rs | 2 +- beacon_node/http_api/src/lib.rs | 4 ++-- .../src/peer_manager/peerdb/score.rs | 9 +-------- .../src/rpc/codec/ssz_snappy.rs | 2 +- .../service/gossipsub_scoring_parameters.rs | 4 ++-- .../lighthouse_network/src/service/utils.rs | 2 +- beacon_node/operation_pool/src/attestation.rs | 2 +- beacon_node/store/src/chunked_vector.rs | 2 +- .../src/validator_definitions.rs | 2 +- common/validator_dir/src/builder.rs | 2 +- consensus/serde_utils/src/hex.rs | 6 +++--- consensus/serde_utils/src/u64_hex_be.rs | 2 +- .../altair/sync_committee.rs | 4 ++-- .../altair/rewards_and_penalties.rs | 6 +++--- .../base/rewards_and_penalties.rs | 2 +- consensus/types/src/beacon_state.rs | 2 +- .../types/src/beacon_state/committee_cache.rs | 2 +- consensus/types/src/beacon_state/tests.rs | 20 +++++-------------- consensus/types/src/preset.rs | 2 +- slasher/tests/attester_slashings.rs | 20 +++++++++---------- .../src/nethermind.rs | 2 +- testing/node_test_rig/src/lib.rs | 2 +- validator_client/src/doppelganger_service.rs | 10 +++++----- validator_client/src/http_api/mod.rs | 2 +- .../src/initialized_validators.rs | 2 +- validator_client/src/key_cache.rs | 2 +- 31 files changed, 58 insertions(+), 82 deletions(-) diff --git a/account_manager/src/wallet/list.rs b/account_manager/src/wallet/list.rs index 5b671b1dc..9190de391 100644 --- a/account_manager/src/wallet/list.rs +++ b/account_manager/src/wallet/list.rs @@ -10,7 +10,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { } pub fn cli_run(wallet_base_dir: PathBuf) -> Result<(), String> { - let mgr = WalletManager::open(&wallet_base_dir) + let mgr = WalletManager::open(wallet_base_dir) .map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?; for (name, _uuid) in mgr diff --git a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs index 252e9915d..7eeb9bb56 100644 --- a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs +++ b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs @@ -402,7 +402,7 @@ impl NaiveAggregationPool { /// Returns the total number of items stored in `self`. pub fn num_items(&self) -> usize { - self.maps.iter().map(|(_, map)| map.len()).sum() + self.maps.values().map(T::len).sum() } /// Returns an aggregated `T::Value` with the given `T::Data`, if any. @@ -448,11 +448,7 @@ impl NaiveAggregationPool { // If we have too many maps, remove the lowest amount to ensure we only have // `SLOTS_RETAINED` left. if self.maps.len() > SLOTS_RETAINED { - let mut slots = self - .maps - .iter() - .map(|(slot, _map)| *slot) - .collect::>(); + let mut slots = self.maps.keys().copied().collect::>(); // Sort is generally pretty slow, however `SLOTS_RETAINED` is quite low so it should be // negligible. slots.sort_unstable(); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index d6e8787f4..9183583fb 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1459,7 +1459,7 @@ where let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); let signed_block = block.sign( - &self.validator_keypairs[proposer_index as usize].sk, + &self.validator_keypairs[proposer_index].sk, &state.fork(), state.genesis_validators_root(), &self.spec, diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 2d093ff88..e95394bb7 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -631,10 +631,7 @@ impl ValidatorMonitor { // Return the `id`'s of all monitored validators. pub fn get_all_monitored_validators(&self) -> Vec { - self.validators - .iter() - .map(|(_, val)| val.id.clone()) - .collect() + self.validators.values().map(|val| val.id.clone()).collect() } /// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`. diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs index ab07b380d..75391e58a 100644 --- a/beacon_node/eth1/src/deposit_cache.rs +++ b/beacon_node/eth1/src/deposit_cache.rs @@ -675,7 +675,7 @@ pub mod tests { #[test] fn test_finalization_boundaries() { let n = 8; - let half = (n / 2) as usize; + let half = n / 2; let mut deposit_cache = get_cache_with_deposits(n as u64); @@ -828,9 +828,9 @@ pub mod tests { // get_log(half+quarter) should return log with index `half+quarter` assert_eq!( q3_log_before_finalization.index, - (half + quarter) as u64, + half + quarter, "log index should be {}", - (half + quarter), + half + quarter, ); // get lower quarter of deposits with max deposit count diff --git a/beacon_node/execution_layer/src/engine_api/auth.rs b/beacon_node/execution_layer/src/engine_api/auth.rs index 8fcdb2543..2f4c0cd1e 100644 --- a/beacon_node/execution_layer/src/engine_api/auth.rs +++ b/beacon_node/execution_layer/src/engine_api/auth.rs @@ -27,7 +27,7 @@ impl From for Error { /// Provides wrapper around `[u8; JWT_SECRET_LENGTH]` that implements `Zeroize`. #[derive(Zeroize, Clone)] #[zeroize(drop)] -pub struct JwtKey([u8; JWT_SECRET_LENGTH as usize]); +pub struct JwtKey([u8; JWT_SECRET_LENGTH]); impl JwtKey { /// Wrap given slice in `Self`. Returns an error if slice.len() != `JWT_SECRET_LENGTH`. diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index b018f9c73..6cfdaf5db 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2840,7 +2840,7 @@ pub fn serve( let is_live = chain.validator_seen_at_epoch(index as usize, request_data.epoch); api_types::LivenessResponseData { - index: index as u64, + index, epoch: request_data.epoch, is_live, } @@ -2876,7 +2876,7 @@ pub fn serve( .and_then( |sysinfo, app_start: std::time::Instant, data_dir, network_globals| { blocking_json_task(move || { - let app_uptime = app_start.elapsed().as_secs() as u64; + let app_uptime = app_start.elapsed().as_secs(); Ok(api_types::GenericResponse::from(observe_system_health_bn( sysinfo, data_dir, diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs index fca665db9..bafa355d6 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs @@ -186,14 +186,7 @@ impl RealScore { /// Add an f64 to the score abiding by the limits. fn add(&mut self, score: f64) { - let mut new_score = self.lighthouse_score + score; - if new_score > MAX_SCORE { - new_score = MAX_SCORE; - } - if new_score < MIN_SCORE { - new_score = MIN_SCORE; - } - + let new_score = (self.lighthouse_score + score).clamp(MIN_SCORE, MAX_SCORE); self.set_lighthouse_score(new_score); } diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index a4dd602b3..eccbf0dd6 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -443,7 +443,7 @@ fn handle_length( // Note: length-prefix of > 10 bytes(uint64) would be a decoding error match uvi_codec.decode(bytes).map_err(RPCError::from)? { Some(length) => { - *len = Some(length as usize); + *len = Some(length); Ok(Some(length)) } None => Ok(None), // need more bytes to decode length diff --git a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs index 71a3953ec..88becd686 100644 --- a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs +++ b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs @@ -270,11 +270,11 @@ impl PeerScoreSettings { let modulo_smaller = max( 1, - smaller_committee_size / self.target_aggregators_per_committee as usize, + smaller_committee_size / self.target_aggregators_per_committee, ); let modulo_larger = max( 1, - (smaller_committee_size + 1) / self.target_aggregators_per_committee as usize, + (smaller_committee_size + 1) / self.target_aggregators_per_committee, ); Ok(( diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 09a8d1a86..3f02e73c0 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -88,7 +88,7 @@ fn keypair_from_hex(hex_bytes: &str) -> error::Result { hex_bytes.to_string() }; - hex::decode(&hex_bytes) + hex::decode(hex_bytes) .map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into()) .and_then(keypair_from_bytes) } diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index 4af4edc0e..fbbd5d7dd 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -49,7 +49,7 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { let indices = get_attesting_indices::(committee.committee, &fresh_validators).ok()?; let fresh_validators_rewards: HashMap = indices .iter() - .map(|i| *i as u64) + .copied() .flat_map(|validator_index| { let reward = base::get_base_reward( state, diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index 25169b479..8c64d4bcc 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -801,7 +801,7 @@ mod test { fn needs_genesis_value_test_randao>(_: F) { let spec = &TestSpec::default_spec(); - let max = TestSpec::slots_per_epoch() as u64 * (F::Length::to_u64() - 1); + let max = TestSpec::slots_per_epoch() * (F::Length::to_u64() - 1); for i in 0..max { assert!( F::slot_needs_genesis_value(Slot::new(i), spec), diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 66e3b7354..6ce2517fb 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -189,7 +189,7 @@ impl ValidatorDefinitions { .write(true) .read(true) .create_new(false) - .open(&config_path) + .open(config_path) .map_err(Error::UnableToOpenFile)?; serde_yaml::from_reader(file).map_err(Error::UnableToParseFile) } diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index 596c918b3..2b3f670c7 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -196,7 +196,7 @@ impl<'a> Builder<'a> { if path.exists() { return Err(Error::DepositDataAlreadyExists(path)); } else { - let hex = format!("0x{}", hex::encode(&deposit_data)); + let hex = format!("0x{}", hex::encode(deposit_data)); File::options() .write(true) .read(true) diff --git a/consensus/serde_utils/src/hex.rs b/consensus/serde_utils/src/hex.rs index 1e6c02427..9a2cd65c7 100644 --- a/consensus/serde_utils/src/hex.rs +++ b/consensus/serde_utils/src/hex.rs @@ -63,15 +63,15 @@ mod test { #[test] fn encoding() { let bytes = vec![0, 255]; - let hex = encode(&bytes); + let hex = encode(bytes); assert_eq!(hex.as_str(), "0x00ff"); let bytes = vec![]; - let hex = encode(&bytes); + let hex = encode(bytes); assert_eq!(hex.as_str(), "0x"); let bytes = vec![1, 2, 3]; - let hex = encode(&bytes); + let hex = encode(bytes); assert_eq!(hex.as_str(), "0x010203"); } } diff --git a/consensus/serde_utils/src/u64_hex_be.rs b/consensus/serde_utils/src/u64_hex_be.rs index 6af8a7589..e3364a2d2 100644 --- a/consensus/serde_utils/src/u64_hex_be.rs +++ b/consensus/serde_utils/src/u64_hex_be.rs @@ -36,7 +36,7 @@ impl<'de> Visitor<'de> for QuantityVisitor { } else if stripped.starts_with('0') { Err(de::Error::custom("cannot have leading zero")) } else if stripped.len() % 2 != 0 { - hex::decode(&format!("0{}", stripped)) + hex::decode(format!("0{}", stripped)) .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) } else { hex::decode(stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 306e86714..a5dcd6e0b 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -52,10 +52,10 @@ pub fn process_sync_aggregate( .zip(aggregate.sync_committee_bits.iter()) { if participation_bit { - increase_balance(state, participant_index as usize, participant_reward)?; + increase_balance(state, participant_index, participant_reward)?; increase_balance(state, proposer_index as usize, proposer_reward)?; } else { - decrease_balance(state, participant_index as usize, participant_reward)?; + decrease_balance(state, participant_index, participant_reward)?; } } diff --git a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs index ccebbcb3a..e2aa67a61 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs @@ -76,7 +76,7 @@ pub fn get_flag_index_deltas( let base_reward = get_base_reward(state, index, base_reward_per_increment, spec)?; let mut delta = Delta::default(); - if unslashed_participating_indices.contains(index as usize)? { + if unslashed_participating_indices.contains(index)? { if !state.is_in_inactivity_leak(previous_epoch, spec) { let reward_numerator = base_reward .safe_mul(weight)? @@ -89,8 +89,8 @@ pub fn get_flag_index_deltas( delta.penalize(base_reward.safe_mul(weight)?.safe_div(WEIGHT_DENOMINATOR)?)?; } deltas - .get_mut(index as usize) - .ok_or(Error::DeltaOutOfBounds(index as usize))? + .get_mut(index) + .ok_or(Error::DeltaOutOfBounds(index))? .combine(delta)?; } Ok(()) diff --git a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs index 87e4261e0..e7a4d9c4d 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs @@ -235,7 +235,7 @@ fn get_inclusion_delay_delta( let max_attester_reward = base_reward.safe_sub(proposer_reward)?; delta.reward(max_attester_reward.safe_div(inclusion_info.delay)?)?; - let proposer_index = inclusion_info.proposer_index as usize; + let proposer_index = inclusion_info.proposer_index; Ok((delta, Some((proposer_index, proposer_delta)))) } else { Ok((Delta::default(), None)) diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 79625c12e..12d44741f 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -482,7 +482,7 @@ impl BeaconState { /// Spec v0.12.1 pub fn get_committee_count_at_slot(&self, slot: Slot) -> Result { let cache = self.committee_cache_at_slot(slot)?; - Ok(cache.committees_per_slot() as u64) + Ok(cache.committees_per_slot()) } /// Compute the number of committees in an entire epoch. diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 7a526acc5..03adaf3d4 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -144,7 +144,7 @@ impl CommitteeCache { self.committees_per_slot as usize, index as usize, ); - let committee = self.compute_committee(committee_index as usize)?; + let committee = self.compute_committee(committee_index)?; Some(BeaconCommittee { slot, diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index 5898bfe21..abca10e37 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -344,12 +344,7 @@ mod committees { let cache_epoch = cache_epoch.into_epoch(state_epoch); - execute_committee_consistency_test( - new_head_state, - cache_epoch, - validator_count as usize, - spec, - ); + execute_committee_consistency_test(new_head_state, cache_epoch, validator_count, spec); } async fn committee_consistency_test_suite(cached_epoch: RelativeEpoch) { @@ -361,18 +356,13 @@ mod committees { .mul(spec.target_committee_size) .add(1); - committee_consistency_test::(validator_count as usize, Epoch::new(0), cached_epoch) + committee_consistency_test::(validator_count, Epoch::new(0), cached_epoch).await; + + committee_consistency_test::(validator_count, T::genesis_epoch() + 4, cached_epoch) .await; committee_consistency_test::( - validator_count as usize, - T::genesis_epoch() + 4, - cached_epoch, - ) - .await; - - committee_consistency_test::( - validator_count as usize, + validator_count, T::genesis_epoch() + (T::slots_per_historical_root() as u64) .mul(T::slots_per_epoch()) diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 8ee38e46a..fc5aa8730 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -202,7 +202,7 @@ mod test { } fn preset_from_file(preset_name: &str, filename: &str) -> T { - let f = File::open(&presets_base_path().join(preset_name).join(filename)) + let f = File::open(presets_base_path().join(preset_name).join(filename)) .expect("preset file exists"); serde_yaml::from_reader(f).unwrap() } diff --git a/slasher/tests/attester_slashings.rs b/slasher/tests/attester_slashings.rs index 5cf3fe6c2..40d9fa511 100644 --- a/slasher/tests/attester_slashings.rs +++ b/slasher/tests/attester_slashings.rs @@ -39,8 +39,8 @@ fn double_vote_multi_vals() { fn double_vote_some_vals() { let v1 = vec![0, 1, 2, 3, 4, 5, 6]; let v2 = vec![0, 2, 4, 6]; - let att1 = indexed_att(&v1, 0, 1, 0); - let att2 = indexed_att(&v2, 0, 1, 1); + let att1 = indexed_att(v1, 0, 1, 0); + let att2 = indexed_att(v2, 0, 1, 1); let slashings = hashset![att_slashing(&att1, &att2)]; let attestations = vec![att1, att2]; slasher_test_indiv(&attestations, &slashings, 1); @@ -53,9 +53,9 @@ fn double_vote_some_vals_repeat() { let v1 = vec![0, 1, 2, 3, 4, 5, 6]; let v2 = vec![0, 2, 4, 6]; let v3 = vec![1, 3, 5]; - let att1 = indexed_att(&v1, 0, 1, 0); - let att2 = indexed_att(&v2, 0, 1, 1); - let att3 = indexed_att(&v3, 0, 1, 0); + let att1 = indexed_att(v1, 0, 1, 0); + let att2 = indexed_att(v2, 0, 1, 1); + let att3 = indexed_att(v3, 0, 1, 0); let slashings = hashset![att_slashing(&att1, &att2)]; let attestations = vec![att1, att2, att3]; slasher_test_indiv(&attestations, &slashings, 1); @@ -67,8 +67,8 @@ fn double_vote_some_vals_repeat() { fn no_double_vote_same_target() { let v1 = vec![0, 1, 2, 3, 4, 5, 6]; let v2 = vec![0, 1, 2, 3, 4, 5, 7, 8]; - let att1 = indexed_att(&v1, 0, 1, 0); - let att2 = indexed_att(&v2, 0, 1, 0); + let att1 = indexed_att(v1, 0, 1, 0); + let att2 = indexed_att(v2, 0, 1, 0); let attestations = vec![att1, att2]; slasher_test_indiv(&attestations, &hashset! {}, 1); slasher_test_indiv(&attestations, &hashset! {}, 1000); @@ -79,8 +79,8 @@ fn no_double_vote_same_target() { fn no_double_vote_distinct_vals() { let v1 = vec![0, 1, 2, 3]; let v2 = vec![4, 5, 6, 7]; - let att1 = indexed_att(&v1, 0, 1, 0); - let att2 = indexed_att(&v2, 0, 1, 1); + let att1 = indexed_att(v1, 0, 1, 0); + let att2 = indexed_att(v2, 0, 1, 1); let attestations = vec![att1, att2]; slasher_test_indiv(&attestations, &hashset! {}, 1); slasher_test_indiv(&attestations, &hashset! {}, 1000); @@ -89,7 +89,7 @@ fn no_double_vote_distinct_vals() { #[test] fn no_double_vote_repeated() { let v = vec![0, 1, 2, 3, 4]; - let att1 = indexed_att(&v, 0, 1, 0); + let att1 = indexed_att(v, 0, 1, 0); let att2 = att1.clone(); let attestations = vec![att1, att2]; slasher_test_indiv(&attestations, &hashset! {}, 1); diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index f643fbd5f..740d87ab8 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -76,7 +76,7 @@ impl GenericExecutionEngine for NethermindEngine { fn init_datadir() -> TempDir { let datadir = TempDir::new().unwrap(); let genesis_json_path = datadir.path().join("genesis.json"); - let mut file = File::create(&genesis_json_path).unwrap(); + let mut file = File::create(genesis_json_path).unwrap(); let json = nethermind_genesis_json(); serde_json::to_writer(&mut file, &json).unwrap(); datadir diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index d0a4ef949..82a60cda2 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -231,7 +231,7 @@ impl LocalExecutionNode { .tempdir() .expect("should create temp directory for client datadir"); let jwt_file_path = datadir.path().join("jwt.hex"); - if let Err(e) = std::fs::write(&jwt_file_path, config.jwt_key.hex_string()) { + if let Err(e) = std::fs::write(jwt_file_path, config.jwt_key.hex_string()) { panic!("Failed to write jwt file {}", e); } Self { diff --git a/validator_client/src/doppelganger_service.rs b/validator_client/src/doppelganger_service.rs index e6934ed48..558b9e199 100644 --- a/validator_client/src/doppelganger_service.rs +++ b/validator_client/src/doppelganger_service.rs @@ -441,7 +441,7 @@ impl DoppelgangerService { } // Get a list of indices to provide to the BN API. - let indices_only = indices_map.iter().map(|(index, _)| *index).collect(); + let indices_only = indices_map.keys().copied().collect(); // Pull the liveness responses from the BN. let request_epoch = request_slot.epoch(E::slots_per_epoch()); @@ -971,16 +971,16 @@ mod test { LivenessResponses { current_epoch_responses: detection_indices .iter() - .map(|i| LivenessResponseData { - index: *i as u64, + .map(|&index| LivenessResponseData { + index, epoch: current_epoch, is_live: false, }) .collect(), previous_epoch_responses: detection_indices .iter() - .map(|i| LivenessResponseData { - index: *i as u64, + .map(|&index| LivenessResponseData { + index, epoch: current_epoch - 1, is_live: false, }) diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 600e7a4c6..b87bb0838 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -331,7 +331,7 @@ pub fn serve( .and(signer.clone()) .and_then(|sysinfo, app_start: std::time::Instant, val_dir, signer| { blocking_signed_json_task(signer, move || { - let app_uptime = app_start.elapsed().as_secs() as u64; + let app_uptime = app_start.elapsed().as_secs(); Ok(api_types::GenericResponse::from(observe_system_health_vc( sysinfo, val_dir, app_uptime, ))) diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index e8fe6ff2f..d3f6b1eb6 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -472,7 +472,7 @@ impl InitializedValidators { /// Iterate through all voting public keys in `self` that should be used when querying for duties. pub fn iter_voting_pubkeys(&self) -> impl Iterator { - self.validators.iter().map(|(pubkey, _)| pubkey) + self.validators.keys() } /// Returns the voting `Keypair` for a given voting `PublicKey`, if all are true: diff --git a/validator_client/src/key_cache.rs b/validator_client/src/key_cache.rs index 2088aa683..b7abaaed0 100644 --- a/validator_client/src/key_cache.rs +++ b/validator_client/src/key_cache.rs @@ -104,7 +104,7 @@ impl KeyCache { let file = File::options() .read(true) .create_new(false) - .open(&cache_path) + .open(cache_path) .map_err(Error::UnableToOpenFile)?; serde_json::from_reader(file).map_err(Error::UnableToParseFile) } From 53aad18da3c1e0ab9ac6d6572eedab3419db0535 Mon Sep 17 00:00:00 2001 From: John Adler Date: Tue, 20 Dec 2022 01:34:51 +0000 Subject: [PATCH 02/14] docs: remove mention of phases in voluntary exits (#3776) The notion of "phases" doesn't exist anymore in the Ethereum roadmap. Also fix dead link to roadmap. Co-authored-by: Michael Sproul --- book/src/voluntary-exit.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/book/src/voluntary-exit.md b/book/src/voluntary-exit.md index 69c2d7598..5056040e4 100644 --- a/book/src/voluntary-exit.md +++ b/book/src/voluntary-exit.md @@ -12,10 +12,10 @@ This number can be much higher depending on how many other validators are queued ## Withdrawal of exited funds -Even though users can perform a voluntary exit in phase 0, they **cannot withdraw their exited funds at this point in time**. -This implies that the staked funds are effectively **frozen** until withdrawals are enabled in future phases. +Even though users can currently perform a voluntary exit, they **cannot withdraw their exited funds at this point in time**. +This implies that the staked funds are effectively **frozen** until withdrawals are enabled in a future hard fork (Capella). -To understand the phased rollout strategy for Ethereum upgrades, please visit . +To understand the rollout strategy for Ethereum upgrades, please visit . From 59a7a4703c04a9cffb4402bf2aa3bdb04462b2b5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 20 Dec 2022 01:34:52 +0000 Subject: [PATCH 03/14] Various CI fixes (#3813) ## Issue Addressed Closes #3812 Closes #3750 Closes #3705 --- .github/workflows/local-testnet.yml | 2 +- .github/workflows/release.yml | 27 +++++++++++----------- .github/workflows/test-suite.yml | 36 +++++++++++++++-------------- testing/ef_tests/Makefile | 6 +++-- testing/web3signer_tests/build.rs | 17 +++++++++++--- 5 files changed, 51 insertions(+), 37 deletions(-) diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index b916ffee6..a522f2efb 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -21,7 +21,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 957d016dc..8ca6ab0f9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -8,8 +8,8 @@ on: env: DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - REPO_NAME: sigp/lighthouse - IMAGE_NAME: sigp/lighthouse + REPO_NAME: ${{ github.repository_owner }}/lighthouse + IMAGE_NAME: ${{ github.repository_owner }}/lighthouse jobs: extract-version: @@ -63,12 +63,8 @@ jobs: steps: - name: Checkout sources uses: actions/checkout@v3 - - name: Build toolchain - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - profile: minimal - override: true + - name: Get latest version of stable Rust + run: rustup update stable # ============================== # Windows dependencies @@ -88,7 +84,7 @@ jobs: # ============================== - name: Install Protoc if: contains(matrix.arch, 'darwin') || contains(matrix.arch, 'windows') - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} @@ -179,13 +175,13 @@ jobs: # ======================================================================= - name: Upload artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz - name: Upload signature - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc @@ -208,7 +204,7 @@ jobs: # ============================== - name: Download artifacts - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 # ============================== # Create release draft @@ -216,11 +212,14 @@ jobs: - name: Generate Full Changelog id: changelog - run: echo "CHANGELOG=$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" >> $GITHUB_OUTPUT + run: | + echo "CHANGELOG<> $GITHUB_OUTPUT + echo "$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT - name: Create Release Draft env: - GITHUB_USER: sigp + GITHUB_USER: ${{ github.repository_owner }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # The formatting here is borrowed from OpenEthereum: https://github.com/openethereum/openethereum/blob/main/.github/workflows/build.yml diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index d536869e4..d95ac6116 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -13,6 +13,8 @@ env: RUSTFLAGS: "-D warnings" # The Nightly version used for cargo-udeps, might need updating from time to time. PINNED_NIGHTLY: nightly-2022-05-20 + # Prevent Github API rate limiting. + LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} jobs: target-branch-check: name: target-branch-check @@ -51,7 +53,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache @@ -95,7 +97,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run beacon_chain tests for all known forks @@ -109,7 +111,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run operation_pool tests for all known forks @@ -133,7 +135,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache @@ -149,7 +151,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run state_transition_vectors in release. @@ -163,7 +165,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run consensus-spec-tests with blst, milagro and fake_crypto @@ -189,7 +191,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache @@ -205,7 +207,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache @@ -221,7 +223,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache @@ -237,7 +239,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache @@ -253,7 +255,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache @@ -285,7 +287,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run exec engine integration tests in release @@ -299,7 +301,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Typecheck benchmark code without running it @@ -323,7 +325,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Lint code for quality and style with Clippy @@ -346,7 +348,7 @@ jobs: cargo build --release --bin cargo-clippy --bin clippy-driver cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run Clippy with the disallowed-from-async lint @@ -360,7 +362,7 @@ jobs: - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run cargo check @@ -404,7 +406,7 @@ jobs: # NOTE: cargo-udeps version is pinned until this issue is resolved: # https://github.com/est31/cargo-udeps/issues/135 - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install cargo-udeps diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index e05ef0b06..b2af490dd 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -13,6 +13,8 @@ BLS_TARBALL = $(patsubst %,%-$(BLS_TEST_TAG).tar.gz,$(BLS_TEST)) BLS_OUTPUT_DIR := $(OUTPUT_DIR)/$(BLS_TEST_REPO_NAME) BLS_BASE_URL := https://github.com/ethereum/$(BLS_TEST_REPO_NAME)/releases/download/$(BLS_TEST_TAG) +WGET := $(if $(LIGHTHOUSE_GITHUB_TOKEN),wget --header="Authorization: $(LIGHTHOUSE_GITHUB_TOKEN)",wget) + all: make $(OUTPUT_DIR) make $(BLS_OUTPUT_DIR) @@ -25,11 +27,11 @@ $(OUTPUT_DIR): $(TARBALLS) $(BLS_OUTPUT_DIR): mkdir $(BLS_OUTPUT_DIR) - wget $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -O $(BLS_TARBALL) + $(WGET) $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -O $(BLS_TARBALL) tar -xzf $(BLS_TARBALL) -C $(BLS_OUTPUT_DIR) %-$(TESTS_TAG).tar.gz: - wget $(BASE_URL)/$*.tar.gz -O $@ + $(WGET) $(BASE_URL)/$*.tar.gz -O $@ clean-test-files: rm -rf $(OUTPUT_DIR) $(BLS_OUTPUT_DIR) diff --git a/testing/web3signer_tests/build.rs b/testing/web3signer_tests/build.rs index f62dff0b6..a55c39376 100644 --- a/testing/web3signer_tests/build.rs +++ b/testing/web3signer_tests/build.rs @@ -1,7 +1,10 @@ //! This build script downloads the latest Web3Signer release and places it in the `OUT_DIR` so it //! can be used for integration testing. -use reqwest::Client; +use reqwest::{ + header::{self, HeaderValue}, + Client, +}; use serde_json::Value; use std::env; use std::fs; @@ -15,10 +18,15 @@ const FIXED_VERSION_STRING: Option<&str> = None; #[tokio::main] async fn main() { let out_dir = env::var("OUT_DIR").unwrap(); - download_binary(out_dir.into()).await; + + // Read a Github API token from the environment. This is intended to prevent rate-limits on CI. + // We use a name that is unlikely to accidentally collide with anything the user has configured. + let github_token = env::var("LIGHTHOUSE_GITHUB_TOKEN"); + + download_binary(out_dir.into(), github_token.as_deref().unwrap_or("")).await; } -pub async fn download_binary(dest_dir: PathBuf) { +pub async fn download_binary(dest_dir: PathBuf, github_token: &str) { let version_file = dest_dir.join("version"); let client = Client::builder() @@ -33,8 +41,11 @@ pub async fn download_binary(dest_dir: PathBuf) { env_version } else { // Get the latest release of the web3 signer repo. + let mut token_header_value = HeaderValue::from_str(github_token).unwrap(); + token_header_value.set_sensitive(true); let latest_response: Value = client .get("https://api.github.com/repos/ConsenSys/web3signer/releases/latest") + .header(header::AUTHORIZATION, token_header_value) .send() .await .unwrap() From 986ae4360a4a2d4635a7373be957e927c62fe2bf Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Wed, 28 Dec 2022 14:46:53 -0600 Subject: [PATCH 04/14] Fix clippy complaints --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 +--- .../src/per_block_processing/eip4844/eip4844.rs | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5c0311736..69889014a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4766,9 +4766,7 @@ impl BeaconChain { .ok_or(Error::InvalidSlot(prepare_slot))? .as_secs(), pre_payload_attributes.prev_randao, - execution_layer - .get_suggested_fee_recipient(proposer as u64) - .await, + execution_layer.get_suggested_fee_recipient(proposer).await, withdrawals, ); diff --git a/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs b/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs index 55b1ab967..7826057a4 100644 --- a/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs +++ b/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs @@ -109,7 +109,7 @@ fn tx_peek_blob_versioned_hashes( .get(next_version_hash_index..next_version_hash_index.safe_add(32)?) .ok_or(BlockProcessingError::BlobVersionHashIndexOutOfBounds { length: tx_len, - index: (next_version_hash_index as usize).safe_add(32)?, + index: (next_version_hash_index).safe_add(32)?, })?; Ok(VersionedHash::from_slice(bytes)) })) From d8f7277bebac17f6b56bee0aab4ab3f7eb98981a Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 30 Dec 2022 11:00:14 -0500 Subject: [PATCH 05/14] cleanup --- beacon_node/beacon_chain/src/beacon_chain.rs | 19 --- .../beacon_chain/src/execution_payload.rs | 4 +- .../src/engine_api/json_structures.rs | 6 +- beacon_node/execution_layer/src/lib.rs | 31 ++-- beacon_node/http_api/src/publish_blocks.rs | 5 + .../lighthouse_network/src/rpc/protocol.rs | 2 +- .../network/src/beacon_processor/mod.rs | 40 +---- .../beacon_processor/worker/gossip_methods.rs | 18 +-- .../beacon_processor/worker/rpc_methods.rs | 149 ------------------ beacon_node/network/src/sync/manager.rs | 7 +- common/eth2/src/lib.rs | 21 --- consensus/types/src/eth_spec.rs | 5 - consensus/types/src/payload.rs | 24 ++- lcli/src/create_payload_header.rs | 32 +++- lcli/src/main.rs | 13 +- lcli/src/new_testnet.rs | 28 +++- validator_client/src/signing_method.rs | 3 - .../src/signing_method/web3signer.rs | 4 - 18 files changed, 109 insertions(+), 302 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 69889014a..edf0e149c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6,7 +6,6 @@ use crate::attestation_verification::{ use crate::attester_cache::{AttesterCache, AttesterCacheKey}; use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::beacon_proposer_cache::BeaconProposerCache; -use crate::blob_verification::{BlobError, VerifiedBlobsSidecar}; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::{ check_block_is_finalized_descendant, check_block_relevancy, get_block_root, @@ -1818,23 +1817,6 @@ impl BeaconChain { }) } - /// Accepts some `BlobsSidecar` received over from the network and attempts to verify it, - /// returning `Ok(_)` if it is valid to be (re)broadcast on the gossip network. - pub fn verify_blobs_sidecar_for_gossip<'a>( - &self, - blobs_sidecar: &'a BlobsSidecar, - ) -> Result, BlobError> { - metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_REQUESTS); - let _timer = metrics::start_timer(&metrics::BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES); - VerifiedBlobsSidecar::verify(blobs_sidecar, self).map(|v| { - if let Some(_event_handler) = self.event_handler.as_ref() { - // TODO: Handle sse events - } - metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_SUCCESSES); - v - }) - } - /// Accepts some 'LightClientFinalityUpdate' from the network and attempts to verify it pub fn verify_finality_update_for_gossip( self: &Arc, @@ -4479,7 +4461,6 @@ impl BeaconChain { .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, bls_to_execution_changes: bls_to_execution_changes.into(), - //FIXME(sean) get blobs blob_kzg_commitments: VariableList::from(kzg_commitments), }, }), diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index d52df4853..619b713a3 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -460,7 +460,7 @@ where if is_terminal_block_hash_set && !is_activation_epoch_reached { // Use the "empty" payload if there's a terminal block hash, but we haven't reached the // terminal block epoch yet. - return Ok(BlockProposalContents::default_at_fork(fork)); + return BlockProposalContents::default_at_fork(fork).map_err(Into::into); } let terminal_pow_block_hash = execution_layer @@ -473,7 +473,7 @@ where } else { // If the merge transition hasn't occurred yet and the EL hasn't found the terminal // block, return an "empty" payload. - return Ok(BlockProposalContents::default_at_fork(fork)); + return BlockProposalContents::default_at_fork(fork).map_err(Into::into); } } else { latest_execution_payload_header_block_hash diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 13948affb..c09541f3b 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -350,12 +350,14 @@ impl From for JsonWithdrawal { impl From for Withdrawal { fn from(jw: JsonWithdrawal) -> Self { + // This comparison is to avoid a scenarion where the EE gives us too large a number this + // panics when it attempts to case to a `u64`. + let amount = std::cmp::max(jw.amount / 1000000000, Uint256::from(u64::MAX)); Self { index: jw.index, validator_index: jw.validator_index, address: jw.address, - //FIXME(sean) if EE gives us too large a number this panics - amount: (jw.amount / 1000000000).as_u64(), + amount: amount.as_u64(), } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index e22da42a7..d79ac0c36 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -35,7 +35,7 @@ use tokio::{ time::sleep, }; use tokio_stream::wrappers::WatchStream; -use types::{AbstractExecPayload, Blob, ExecPayload, KzgCommitment}; +use types::{AbstractExecPayload, BeaconStateError, Blob, ExecPayload, KzgCommitment}; use types::{ BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName, ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256, @@ -95,6 +95,13 @@ pub enum Error { FeeRecipientUnspecified, MissingLatestValidHash, InvalidJWTSecret(String), + BeaconStateError(BeaconStateError), +} + +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Error::BeaconStateError(e) + } } impl From for Error { @@ -153,17 +160,17 @@ impl> BlockProposalContents Some(blobs), } } - pub fn default_at_fork(fork_name: ForkName) -> Self { - match fork_name { + pub fn default_at_fork(fork_name: ForkName) -> Result { + Ok(match fork_name { ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - BlockProposalContents::Payload(Payload::default_at_fork(fork_name)) + BlockProposalContents::Payload(Payload::default_at_fork(fork_name)?) } ForkName::Eip4844 => BlockProposalContents::PayloadAndBlobs { - payload: Payload::default_at_fork(fork_name), + payload: Payload::default_at_fork(fork_name)?, blobs: vec![], kzg_commitments: vec![], }, - } + }) } } @@ -803,10 +810,6 @@ impl ExecutionLayer { spec, ) { Ok(()) => Ok(ProvenancedPayload::Builder( - //FIXME(sean) the builder API needs to be updated - // NOTE the comment above was removed in the - // rebase with unstable.. I think it goes - // here now? BlockProposalContents::Payload(relay.data.message.header), )), Err(reason) if !reason.payload_invalid() => { @@ -858,19 +861,11 @@ impl ExecutionLayer { spec, ) { Ok(()) => Ok(ProvenancedPayload::Builder( - //FIXME(sean) the builder API needs to be updated - // NOTE the comment above was removed in the - // rebase with unstable.. I think it goes - // here now? BlockProposalContents::Payload(relay.data.message.header), )), // If the payload is valid then use it. The local EE failed // to produce a payload so we have no alternative. Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder( - //FIXME(sean) the builder API needs to be updated - // NOTE the comment above was removed in the - // rebase with unstable.. I think it goes - // here now? BlockProposalContents::Payload(relay.data.message.header), )), Err(reason) => { diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 9e85a8b5c..83ab8ceee 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -189,6 +189,11 @@ async fn reconstruct_block( .spec .fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())), ) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Default payload construction error: {e:?}" + )) + })? .into() // If we already have an execution payload with this transactions root cached, use it. } else if let Some(cached_payload) = diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 8bf728346..691b16e41 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -119,8 +119,8 @@ lazy_static! { pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M /// The maximum bytes that can be sent across the RPC post-merge. pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M - //FIXME(sean) should these be the same? pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M + // FIXME(sean) should this be increased to account for blobs? pub(crate) const MAX_RPC_SIZE_POST_EIP4844: usize = 10 * 1_048_576; // 10M /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 445d144ac..158379b7e 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -115,7 +115,8 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024; /// before we start dropping them. const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; -//FIXME(sean) verify +/// The maximum number of queued `SignedBeaconBlockAndBlobsSidecar` objects received on gossip that +/// will be stored before we start dropping them. const MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN: usize = 1_024; /// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but @@ -1186,7 +1187,6 @@ impl BeaconProcessor { // required to verify some attestations. } else if let Some(item) = gossip_block_queue.pop() { self.spawn_worker(item, toolbox); - //FIXME(sean) } else if let Some(item) = gossip_block_and_blobs_sidecar_queue.pop() { self.spawn_worker(item, toolbox); // Check the aggregates, *then* the unaggregates since we assume that @@ -1675,23 +1675,9 @@ impl BeaconProcessor { /* * Verification for blobs sidecars received on gossip. */ - Work::GossipBlockAndBlobsSidecar { - message_id, - peer_id, - peer_client, - block_and_blobs, - seen_timestamp, - } => task_spawner.spawn_async(async move { - worker - .process_gossip_block_and_blobs_sidecar( - message_id, - peer_id, - peer_client, - block_and_blobs, - seen_timestamp, - ) - .await - }), + Work::GossipBlockAndBlobsSidecar { .. } => { + warn!(self.log, "Unexpected block and blobs on gossip") + } /* * Import for blocks that we received earlier than their intended slot. */ @@ -1892,19 +1878,9 @@ impl BeaconProcessor { request, ) }), - Work::BlobsByRangeRequest { - peer_id, - request_id, - request, - } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { - worker.handle_blobs_by_range_request( - sub_executor, - send_idle_on_drop, - peer_id, - request_id, - request, - ) - }), + Work::BlobsByRangeRequest { .. } => { + warn!(self.log.clone(), "Unexpected BlobsByRange Request") + } /* * Processing of lightclient bootstrap requests from other peers. */ diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 14d69898f..589d7e9b4 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -11,10 +11,7 @@ use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError, GossipVerifiedBlock, NotifyExecutionLayer, }; -use lighthouse_network::{ - Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource, - SignedBeaconBlockAndBlobsSidecar, -}; +use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use ssz::Encode; @@ -699,19 +696,6 @@ impl Worker { } } - #[allow(clippy::too_many_arguments)] - pub async fn process_gossip_block_and_blobs_sidecar( - self, - _message_id: MessageId, - _peer_id: PeerId, - _peer_client: Client, - _block_and_blob: Arc>, - _seen_timestamp: Duration, - ) { - //FIXME - unimplemented!() - } - /// Process the beacon block received from the gossip network and /// if it passes gossip propagation criteria, tell the network thread to forward it. /// diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index c3a452acd..bfa0ea516 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -4,7 +4,6 @@ use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; use itertools::process_results; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, MAX_REQUEST_BLOBS_SIDECARS}; use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; @@ -455,152 +454,4 @@ impl Worker { "load_blocks_by_range_blocks", ); } - - /// Handle a `BlobsByRange` request from the peer. - pub fn handle_blobs_by_range_request( - self, - _executor: TaskExecutor, - _send_on_drop: SendOnDrop, - peer_id: PeerId, - _request_id: PeerRequestId, - mut req: BlobsByRangeRequest, - ) { - debug!(self.log, "Received BlobsByRange Request"; - "peer_id" => %peer_id, - "count" => req.count, - "start_slot" => req.start_slot, - ); - - // Should not send more than max request blocks - if req.count > MAX_REQUEST_BLOBS_SIDECARS { - req.count = MAX_REQUEST_BLOBS_SIDECARS; - } - - //FIXME(sean) create the blobs iter - - // let forwards_block_root_iter = match self - // .chain - // .forwards_iter_block_roots(Slot::from(req.start_slot)) - // { - // Ok(iter) => iter, - // Err(BeaconChainError::HistoricalBlockError( - // HistoricalBlockError::BlockOutOfRange { - // slot, - // oldest_block_slot, - // }, - // )) => { - // debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot); - // return self.send_error_response( - // peer_id, - // RPCResponseErrorCode::ResourceUnavailable, - // "Backfilling".into(), - // request_id, - // ); - // } - // Err(e) => return error!(self.log, "Unable to obtain root iter"; "error" => ?e), - // }; - // - // // Pick out the required blocks, ignoring skip-slots. - // let mut last_block_root = None; - // let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { - // iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) - // // map skip slots to None - // .map(|(root, _)| { - // let result = if Some(root) == last_block_root { - // None - // } else { - // Some(root) - // }; - // last_block_root = Some(root); - // result - // }) - // .collect::>>() - // }); - // - // let block_roots = match maybe_block_roots { - // Ok(block_roots) => block_roots, - // Err(e) => return error!(self.log, "Error during iteration over blocks"; "error" => ?e), - // }; - // - // // remove all skip slots - // let block_roots = block_roots.into_iter().flatten().collect::>(); - // - // // Fetching blocks is async because it may have to hit the execution layer for payloads. - // executor.spawn( - // async move { - // let mut blocks_sent = 0; - // let mut send_response = true; - // - // for root in block_roots { - // match self.chain.store.get_blobs(&root) { - // Ok(Some(blob)) => { - // blocks_sent += 1; - // self.send_network_message(NetworkMessage::SendResponse { - // peer_id, - // response: Response::BlobsByRange(Some(Arc::new(VariableList::new(vec![blob.message]).unwrap()))), - // id: request_id, - // }); - // } - // Ok(None) => { - // error!( - // self.log, - // "Blob in the chain is not in the store"; - // "request_root" => ?root - // ); - // break; - // } - // Err(e) => { - // error!( - // self.log, - // "Error fetching block for peer"; - // "block_root" => ?root, - // "error" => ?e - // ); - // break; - // } - // } - // } - // - // let current_slot = self - // .chain - // .slot() - // .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); - // - // if blocks_sent < (req.count as usize) { - // debug!( - // self.log, - // "BlocksByRange Response processed"; - // "peer" => %peer_id, - // "msg" => "Failed to return all requested blocks", - // "start_slot" => req.start_slot, - // "current_slot" => current_slot, - // "requested" => req.count, - // "returned" => blocks_sent - // ); - // } else { - // debug!( - // self.log, - // "BlocksByRange Response processed"; - // "peer" => %peer_id, - // "start_slot" => req.start_slot, - // "current_slot" => current_slot, - // "requested" => req.count, - // "returned" => blocks_sent - // ); - // } - // - // if send_response { - // // send the stream terminator - // self.send_network_message(NetworkMessage::SendResponse { - // peer_id, - // response: Response::BlobsByRange(None), - // id: request_id, - // }); - // } - // - // drop(send_on_drop); - // }, - // "load_blocks_by_range_blocks", - // ); - } } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index a1eeda84e..0548b0906 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -47,7 +47,7 @@ use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; use lighthouse_network::{PeerAction, PeerId}; -use slog::{crit, debug, error, info, trace, Logger}; +use slog::{crit, debug, error, info, trace, warn, Logger}; use std::boxed::Box; use std::ops::Sub; use std::sync::Arc; @@ -592,8 +592,9 @@ impl SyncManager { .block_lookups .parent_chain_processed(chain_hash, result, &mut self.network), }, - //FIXME(sean) - SyncMessage::RpcBlob { .. } => todo!(), + SyncMessage::RpcBlob { .. } => { + warn!(self.log, "Unexpected blob message received"); + } } } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index fcfff7284..752e472e2 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -628,27 +628,6 @@ impl BeaconNodeHttpClient { Ok(()) } - /// `POST beacon/blobs` - /// - /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blobs( - &self, - block: &BlobsSidecar, - ) -> Result<(), Error> { - let mut path = self.eth_path(V1)?; - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("beacon") - .push("blobs"); - - //FIXME(sean) should we re-use the proposal timeout? seems reasonable to.. - self.post_with_timeout(path, block, self.timeouts.proposal) - .await?; - - Ok(()) - } - /// `POST beacon/blinded_blocks` /// /// Returns `Ok(None)` on a 404 error. diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 661484fde..5ed5307ff 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -247,11 +247,6 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + fn max_blobs_per_block() -> usize { Self::MaxBlobsPerBlock::to_usize() } - - /// FIXME: why is this called chunks_per_blob?? - fn chunks_per_blob() -> usize { - Self::FieldElementsPerBlob::to_usize() - } } /// Macro to inherit some type values from another EthSpec. diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 8bba00b46..f56b88fc9 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -92,7 +92,7 @@ pub trait AbstractExecPayload: + From> + TryFrom>; - fn default_at_fork(fork_name: ForkName) -> Self; + fn default_at_fork(fork_name: ForkName) -> Result; } #[superstruct( @@ -372,13 +372,12 @@ impl AbstractExecPayload for FullPayload { type Capella = FullPayloadCapella; type Eip4844 = FullPayloadEip4844; - fn default_at_fork(fork_name: ForkName) -> Self { + fn default_at_fork(fork_name: ForkName) -> Result { match fork_name { - //FIXME(sean) error handling - ForkName::Base | ForkName::Altair => panic!(), - ForkName::Merge => FullPayloadMerge::default().into(), - ForkName::Capella => FullPayloadCapella::default().into(), - ForkName::Eip4844 => FullPayloadEip4844::default().into(), + ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Merge => Ok(FullPayloadMerge::default().into()), + ForkName::Capella => Ok(FullPayloadCapella::default().into()), + ForkName::Eip4844 => Ok(FullPayloadEip4844::default().into()), } } } @@ -882,13 +881,12 @@ impl AbstractExecPayload for BlindedPayload { type Capella = BlindedPayloadCapella; type Eip4844 = BlindedPayloadEip4844; - fn default_at_fork(fork_name: ForkName) -> Self { + fn default_at_fork(fork_name: ForkName) -> Result { match fork_name { - //FIXME(sean) error handling - ForkName::Base | ForkName::Altair => panic!(), - ForkName::Merge => BlindedPayloadMerge::default().into(), - ForkName::Capella => BlindedPayloadCapella::default().into(), - ForkName::Eip4844 => BlindedPayloadEip4844::default().into(), + ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Merge => Ok(BlindedPayloadMerge::default().into()), + ForkName::Capella => Ok(BlindedPayloadCapella::default().into()), + ForkName::Eip4844 => Ok(BlindedPayloadEip4844::default().into()), } } } diff --git a/lcli/src/create_payload_header.rs b/lcli/src/create_payload_header.rs index ebda93616..7700f23d9 100644 --- a/lcli/src/create_payload_header.rs +++ b/lcli/src/create_payload_header.rs @@ -4,7 +4,10 @@ use ssz::Encode; use std::fs::File; use std::io::Write; use std::time::{SystemTime, UNIX_EPOCH}; -use types::{EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderMerge}; +use types::{ + EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderEip4844, + ExecutionPayloadHeaderMerge, ForkName, +}; pub fn run(matches: &ArgMatches) -> Result<(), String> { let eth1_block_hash = parse_required(matches, "execution-block-hash")?; @@ -17,17 +20,36 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { let base_fee_per_gas = parse_required(matches, "base-fee-per-gas")?; let gas_limit = parse_required(matches, "gas-limit")?; let file_name = matches.value_of("file").ok_or("No file supplied")?; + let fork_name: ForkName = parse_optional(matches, "fork")?.unwrap_or(ForkName::Merge); - //FIXME(sean) - let execution_payload_header: ExecutionPayloadHeader = - ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge { + let execution_payload_header: ExecutionPayloadHeader = match fork_name { + ForkName::Base | ForkName::Altair => return Err("invalid fork name".to_string()), + ForkName::Merge => ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge { gas_limit, base_fee_per_gas, timestamp: genesis_time, block_hash: eth1_block_hash, prev_randao: eth1_block_hash.into_root(), ..ExecutionPayloadHeaderMerge::default() - }); + }), + ForkName::Capella => ExecutionPayloadHeader::Capella(ExecutionPayloadHeaderCapella { + gas_limit, + base_fee_per_gas, + timestamp: genesis_time, + block_hash: eth1_block_hash, + prev_randao: eth1_block_hash.into_root(), + ..ExecutionPayloadHeaderCapella::default() + }), + ForkName::Eip4844 => ExecutionPayloadHeader::Eip4844(ExecutionPayloadHeaderEip4844 { + gas_limit, + base_fee_per_gas, + timestamp: genesis_time, + block_hash: eth1_block_hash, + prev_randao: eth1_block_hash.into_root(), + ..ExecutionPayloadHeaderEip4844::default() + }), + }; + let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?; let bytes = execution_payload_header.as_ssz_bytes(); file.write_all(bytes.as_slice()) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index de6039f35..238c7e9f1 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -371,7 +371,8 @@ fn main() { .subcommand( SubCommand::with_name("create-payload-header") .about("Generates an SSZ file containing bytes for an `ExecutionPayloadHeader`. \ - Useful as input for `lcli new-testnet --execution-payload-header FILE`. ") + Useful as input for `lcli new-testnet --execution-payload-header FILE`. If `--fork` \ + is not provided, a payload header for the `Bellatrix` fork will be created.") .arg( Arg::with_name("execution-block-hash") .long("execution-block-hash") @@ -417,7 +418,15 @@ fn main() { .takes_value(true) .required(true) .help("Output file"), - ) + ).arg( + Arg::with_name("fork") + .long("fork") + .value_name("FORK") + .takes_value(true) + .default_value("bellatrix") + .help("The fork for which the execution payload header should be created.") + .possible_values(&["merge", "bellatrix", "capella", "eip4844"]) + ) ) .subcommand( SubCommand::with_name("new-testnet") diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 650addc18..4d194ff10 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -9,8 +9,9 @@ use std::io::Read; use std::path::PathBuf; use std::time::{SystemTime, UNIX_EPOCH}; use types::{ - test_utils::generate_deterministic_keypairs, Address, Config, EthSpec, ExecutionPayloadHeader, - ExecutionPayloadHeaderMerge, + test_utils::generate_deterministic_keypairs, Address, Config, Epoch, EthSpec, + ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderEip4844, + ExecutionPayloadHeaderMerge, ForkName, }; pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { @@ -80,10 +81,25 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul .map_err(|e| format!("Unable to open {}: {}", filename, e))?; file.read_to_end(&mut bytes) .map_err(|e| format!("Unable to read {}: {}", filename, e))?; - //FIXME(sean) - ExecutionPayloadHeaderMerge::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Merge) - .map_err(|e| format!("SSZ decode failed: {:?}", e)) + let fork_name = spec.fork_name_at_epoch(Epoch::new(0)); + match fork_name { + ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid( + "genesis fork must be post-merge".to_string(), + )), + ForkName::Merge => { + ExecutionPayloadHeaderMerge::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Merge) + } + ForkName::Capella => { + ExecutionPayloadHeaderCapella::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Capella) + } + ForkName::Eip4844 => { + ExecutionPayloadHeaderEip4844::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Eip4844) + } + } + .map_err(|e| format!("SSZ decode failed: {:?}", e)) }) .transpose()?; diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index 2ebca2dfb..ae9df0809 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -37,7 +37,6 @@ pub enum Error { pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload = FullPayload> { RandaoReveal(Epoch), BeaconBlock(&'a BeaconBlock), - BlobsSidecar(&'a BlobsSidecar), AttestationData(&'a AttestationData), SignedAggregateAndProof(&'a AggregateAndProof), SelectionProof(Slot), @@ -59,7 +58,6 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignableMessage<'a, T, Pay match self { SignableMessage::RandaoReveal(epoch) => epoch.signing_root(domain), SignableMessage::BeaconBlock(b) => b.signing_root(domain), - SignableMessage::BlobsSidecar(b) => b.signing_root(domain), SignableMessage::AttestationData(a) => a.signing_root(domain), SignableMessage::SignedAggregateAndProof(a) => a.signing_root(domain), SignableMessage::SelectionProof(slot) => slot.signing_root(domain), @@ -182,7 +180,6 @@ impl SigningMethod { Web3SignerObject::RandaoReveal { epoch } } SignableMessage::BeaconBlock(block) => Web3SignerObject::beacon_block(block)?, - SignableMessage::BlobsSidecar(blob) => Web3SignerObject::BlobsSidecar(blob), SignableMessage::AttestationData(a) => Web3SignerObject::Attestation(a), SignableMessage::SignedAggregateAndProof(a) => { Web3SignerObject::AggregateAndProof(a) diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 5daa42fa3..512cbc7d0 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -11,7 +11,6 @@ pub enum MessageType { AggregateAndProof, Attestation, BlockV2, - BlobsSidecar, Deposit, RandaoReveal, VoluntaryExit, @@ -52,8 +51,6 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: AbstractExecPayload> { #[serde(skip_serializing_if = "Option::is_none")] block_header: Option, }, - //FIXME(sean) just guessing here - BlobsSidecar(&'a BlobsSidecar), #[allow(dead_code)] Deposit { pubkey: PublicKeyBytes, @@ -114,7 +111,6 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> Web3SignerObject<'a, T, Pa Web3SignerObject::AggregateAndProof(_) => MessageType::AggregateAndProof, Web3SignerObject::Attestation(_) => MessageType::Attestation, Web3SignerObject::BeaconBlock { .. } => MessageType::BlockV2, - Web3SignerObject::BlobsSidecar(_) => MessageType::BlobsSidecar, Web3SignerObject::Deposit { .. } => MessageType::Deposit, Web3SignerObject::RandaoReveal { .. } => MessageType::RandaoReveal, Web3SignerObject::VoluntaryExit(_) => MessageType::VoluntaryExit, From 4353c49855dc016928eb6a255bfb12fda044249a Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 3 Jan 2023 08:55:19 -0500 Subject: [PATCH 06/14] Update beacon_node/execution_layer/src/engine_api/json_structures.rs --- beacon_node/execution_layer/src/engine_api/json_structures.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index c09541f3b..728150a20 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -350,8 +350,8 @@ impl From for JsonWithdrawal { impl From for Withdrawal { fn from(jw: JsonWithdrawal) -> Self { - // This comparison is to avoid a scenarion where the EE gives us too large a number this - // panics when it attempts to case to a `u64`. + // This comparison is done to avoid a scenario where the EE gives us too large a number and we + // panic when attempting to cast to a `u64`. let amount = std::cmp::max(jw.amount / 1000000000, Uint256::from(u64::MAX)); Self { index: jw.index, From be232c4587075b1be6c36dc4272b4fb03e15f0fc Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Tue, 3 Jan 2023 16:42:34 -0600 Subject: [PATCH 07/14] Update Execution Layer Tests for Capella --- Makefile | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 20 ++- beacon_node/beacon_chain/tests/capella.rs | 170 ++++++++++++++++++ beacon_node/beacon_chain/tests/main.rs | 1 + beacon_node/beacon_chain/tests/merge.rs | 9 +- .../test_utils/execution_block_generator.rs | 104 +++++++++-- .../src/test_utils/handle_rpc.rs | 122 +++++++++++-- .../src/test_utils/mock_execution_layer.rs | 7 + .../execution_layer/src/test_utils/mod.rs | 21 ++- 9 files changed, 408 insertions(+), 48 deletions(-) create mode 100644 beacon_node/beacon_chain/tests/capella.rs diff --git a/Makefile b/Makefile index 15d09c586..40db61de7 100644 --- a/Makefile +++ b/Makefile @@ -28,7 +28,7 @@ PROFILE ?= release # List of all hard forks. This list is used to set env variables for several tests so that # they run for different forks. -FORKS=phase0 altair merge +FORKS=phase0 altair merge capella # Builds the Lighthouse binary in release (optimized). # diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 9183583fb..3c4ab1ca1 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -11,11 +11,11 @@ use crate::{ StateSkipConfig, }; use bls::get_withdrawal_credentials; -use execution_layer::test_utils::DEFAULT_JWT_SECRET; use execution_layer::{ auth::JwtKey, test_utils::{ - ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_TERMINAL_BLOCK, + ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_JWT_SECRET, + DEFAULT_TERMINAL_BLOCK, }, ExecutionLayer, }; @@ -385,12 +385,20 @@ where pub fn mock_execution_layer(mut self) -> Self { let spec = self.spec.clone().expect("cannot build without spec"); + let shanghai_time = spec.capella_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); + let eip4844_time = spec.eip4844_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); let mock = MockExecutionLayer::new( self.runtime.task_executor.clone(), spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK, spec.terminal_block_hash, spec.terminal_block_hash_activation_epoch, + shanghai_time, + eip4844_time, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), None, ); @@ -405,12 +413,20 @@ where let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); let spec = self.spec.clone().expect("cannot build without spec"); + let shanghai_time = spec.capella_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); + let eip4844_time = spec.eip4844_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); let mock_el = MockExecutionLayer::new( self.runtime.task_executor.clone(), spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK, spec.terminal_block_hash, spec.terminal_block_hash_activation_epoch, + shanghai_time, + eip4844_time, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), Some(builder_url.clone()), ) diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs new file mode 100644 index 000000000..1e39d075d --- /dev/null +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -0,0 +1,170 @@ +#![cfg(not(debug_assertions))] // Tests run too slow in debug. + +use beacon_chain::test_utils::BeaconChainHarness; +use execution_layer::test_utils::Block; +use types::*; + +const VALIDATOR_COUNT: usize = 32; +type E = MainnetEthSpec; + +fn verify_execution_payload_chain(chain: &[FullPayload]) { + let mut prev_ep: Option> = None; + + for ep in chain { + assert!(!ep.is_default_with_empty_roots()); + assert!(ep.block_hash() != ExecutionBlockHash::zero()); + + // Check against previous `ExecutionPayload`. + if let Some(prev_ep) = prev_ep { + assert_eq!(prev_ep.block_hash(), ep.execution_payload().parent_hash()); + assert_eq!( + prev_ep.execution_payload().block_number() + 1, + ep.execution_payload().block_number() + ); + assert!(ep.execution_payload().timestamp() > prev_ep.execution_payload().timestamp()); + } + prev_ep = Some(ep.clone()); + } +} + +#[tokio::test] +async fn base_altair_merge_capella() { + let altair_fork_epoch = Epoch::new(4); + let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch()); + let bellatrix_fork_epoch = Epoch::new(8); + let merge_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); + let capella_fork_epoch = Epoch::new(12); + let capella_fork_slot = capella_fork_epoch.start_slot(E::slots_per_epoch()); + + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(altair_fork_epoch); + spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); + spec.capella_fork_epoch = Some(capella_fork_epoch); + + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .logger(logging::test_logger()) + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + /* + * Start with the base fork. + */ + assert!(harness.chain.head_snapshot().beacon_block.as_base().is_ok()); + + /* + * Do the Altair fork. + */ + harness.extend_to_slot(altair_fork_slot).await; + + let altair_head = &harness.chain.head_snapshot().beacon_block; + assert!(altair_head.as_altair().is_ok()); + assert_eq!(altair_head.slot(), altair_fork_slot); + + /* + * Do the merge fork, without a terminal PoW block. + */ + harness.extend_to_slot(merge_fork_slot).await; + + let merge_head = &harness.chain.head_snapshot().beacon_block; + assert!(merge_head.as_merge().is_ok()); + assert_eq!(merge_head.slot(), merge_fork_slot); + assert!( + merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "Merge head is default payload" + ); + + /* + * Next merge block shouldn't include an exec payload. + */ + harness.extend_slots(1).await; + + let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; + assert!( + one_after_merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "One after merge head is default payload" + ); + assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1); + + /* + * Trigger the terminal PoW block. + */ + harness + .execution_block_generator() + .move_to_terminal_block() + .unwrap(); + + // Add a slot duration to get to the next slot + let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot; + harness + .execution_block_generator() + .modify_last_block(|block| { + if let Block::PoW(terminal_block) = block { + terminal_block.timestamp = timestamp; + } + }); + harness.extend_slots(1).await; + + let two_after_merge_head = &harness.chain.head_snapshot().beacon_block; + assert!( + two_after_merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "Two after merge head is default payload" + ); + assert_eq!(two_after_merge_head.slot(), merge_fork_slot + 2); + + /* + * Next merge block should include an exec payload. + */ + let mut execution_payloads = vec![]; + for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() { + harness.extend_slots(1).await; + let block = &harness.chain.head_snapshot().beacon_block; + let full_payload: FullPayload = block + .message() + .body() + .execution_payload() + .unwrap() + .clone() + .into(); + // pre-capella shouldn't have withdrawals + assert!(full_payload.withdrawals_root().is_err()); + execution_payloads.push(full_payload); + } + + /* + * Should enter capella fork now. + */ + for _ in 0..16 { + harness.extend_slots(1).await; + let block = &harness.chain.head_snapshot().beacon_block; + let full_payload: FullPayload = block + .message() + .body() + .execution_payload() + .unwrap() + .clone() + .into(); + // post-capella should have withdrawals + assert!(full_payload.withdrawals_root().is_ok()); + execution_payloads.push(full_payload); + } + + verify_execution_payload_chain(execution_payloads.as_slice()); +} diff --git a/beacon_node/beacon_chain/tests/main.rs b/beacon_node/beacon_chain/tests/main.rs index 1c61e9927..3b8c83594 100644 --- a/beacon_node/beacon_chain/tests/main.rs +++ b/beacon_node/beacon_chain/tests/main.rs @@ -1,6 +1,7 @@ mod attestation_production; mod attestation_verification; mod block_verification; +mod capella; mod merge; mod op_verification; mod payload_invalidation; diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index 80ffc57be..cd6e0e2ba 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -191,18 +191,17 @@ async fn base_altair_merge_with_terminal_block_after_fork() { harness.extend_slots(1).await; - let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; - // FIXME: why is this being tested twice? + let two_after_merge_head = &harness.chain.head_snapshot().beacon_block; assert!( - one_after_merge_head + two_after_merge_head .message() .body() .execution_payload() .unwrap() .is_default_with_empty_roots(), - "One after merge head is default payload" + "Two after merge head is default payload" ); - assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 2); + assert_eq!(two_after_merge_head.slot(), merge_fork_slot + 2); /* * Next merge block should include an exec payload. diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index b7206cbf8..7790dcbed 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -13,7 +13,8 @@ use std::collections::HashMap; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use types::{ - EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadMerge, Hash256, Uint256, + EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, + ExecutionPayloadEip4844, ExecutionPayloadMerge, ForkName, Hash256, Uint256, }; const GAS_LIMIT: u64 = 16384; @@ -113,6 +114,11 @@ pub struct ExecutionBlockGenerator { pub pending_payloads: HashMap>, pub next_payload_id: u64, pub payload_ids: HashMap>, + /* + * Post-merge fork triggers + */ + pub shanghai_time: Option, // withdrawals + pub eip4844_time: Option, // 4844 } impl ExecutionBlockGenerator { @@ -120,6 +126,8 @@ impl ExecutionBlockGenerator { terminal_total_difficulty: Uint256, terminal_block_number: u64, terminal_block_hash: ExecutionBlockHash, + shanghai_time: Option, + eip4844_time: Option, ) -> Self { let mut gen = Self { head_block: <_>::default(), @@ -132,6 +140,8 @@ impl ExecutionBlockGenerator { pending_payloads: <_>::default(), next_payload_id: 0, payload_ids: <_>::default(), + shanghai_time, + eip4844_time, }; gen.insert_pow_block(0).unwrap(); @@ -163,6 +173,16 @@ impl ExecutionBlockGenerator { } } + pub fn get_fork_at_timestamp(&self, timestamp: u64) -> ForkName { + match self.eip4844_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Eip4844, + _ => match self.shanghai_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Capella, + _ => ForkName::Merge, + }, + } + } + pub fn execution_block_by_number(&self, number: u64) -> Option { self.block_by_number(number) .map(|block| block.as_execution_block(self.terminal_total_difficulty)) @@ -395,7 +415,9 @@ impl ExecutionBlockGenerator { } } - pub fn forkchoice_updated_v1( + // This function expects payload_attributes to already be validated with respect to + // the current fork [obtained by self.get_fork_at_timestamp(payload_attributes.timestamp)] + pub fn forkchoice_updated( &mut self, forkchoice_state: ForkchoiceState, payload_attributes: Option, @@ -469,23 +491,65 @@ impl ExecutionBlockGenerator { transactions: vec![].into(), }), PayloadAttributes::V2(pa) => { - // FIXME: think about how to test different forks - ExecutionPayload::Merge(ExecutionPayloadMerge { - parent_hash: forkchoice_state.head_block_hash, - fee_recipient: pa.suggested_fee_recipient, - receipts_root: Hash256::repeat_byte(42), - state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), - prev_randao: pa.prev_randao, - block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, - gas_used: GAS_USED, - timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), - block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - }) + match self.get_fork_at_timestamp(pa.timestamp) { + ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: forkchoice_state.head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + }), + ForkName::Capella => { + ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: forkchoice_state.head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + withdrawals: pa.withdrawals.as_ref().unwrap().clone().into(), + }) + } + ForkName::Eip4844 => { + ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { + parent_hash: forkchoice_state.head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + // FIXME(4844): maybe this should be set to something? + excess_data_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + withdrawals: pa.withdrawals.as_ref().unwrap().clone().into(), + }) + } + _ => unreachable!(), + } } }; @@ -576,6 +640,8 @@ mod test { TERMINAL_DIFFICULTY.into(), TERMINAL_BLOCK, ExecutionBlockHash::zero(), + None, + None, ); for i in 0..=TERMINAL_BLOCK { diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index e8ece97f3..f01ae00e8 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -82,17 +82,40 @@ pub async fn handle_rpc( ENGINE_NEW_PAYLOAD_V2 => { JsonExecutionPayload::V2(get_param::>(params, 0)?) } + // TODO(4844) add that here.. _ => unreachable!(), }; - let fork = match request { - JsonExecutionPayload::V1(_) => ForkName::Merge, - JsonExecutionPayload::V2(ref payload) => { - if payload.withdrawals.is_none() { - ForkName::Merge - } else { - ForkName::Capella + + let fork = ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(*request.timestamp()); + // validate method called correctly according to shanghai fork time + match fork { + ForkName::Merge => { + if request.withdrawals().is_ok() && request.withdrawals().unwrap().is_some() { + return Err(format!( + "{} called with `withdrawals` before capella fork!", + method + )); } } + ForkName::Capella => { + if method == ENGINE_NEW_PAYLOAD_V1 { + return Err(format!("{} called after capella fork!", method)); + } + if request.withdrawals().is_err() + || (request.withdrawals().is_ok() + && request.withdrawals().unwrap().is_none()) + { + return Err(format!( + "{} called without `withdrawals` after capella fork!", + method + )); + } + } + // TODO(4844) add 4844 error checking here + _ => unreachable!(), }; // Canned responses set by block hash take priority. @@ -125,7 +148,7 @@ pub async fn handle_rpc( Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap()) } - ENGINE_GET_PAYLOAD_V1 => { + ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 => { let request: JsonPayloadIdRequest = get_param(params, 0)?; let id = request.into(); @@ -135,12 +158,76 @@ pub async fn handle_rpc( .get_payload(&id) .ok_or_else(|| format!("no payload for id {:?}", id))?; - Ok(serde_json::to_value(JsonExecutionPayloadV1::try_from(response).unwrap()).unwrap()) + // validate method called correctly according to shanghai fork time + if ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(response.timestamp()) + == ForkName::Capella + && method == ENGINE_GET_PAYLOAD_V1 + { + return Err(format!("{} called after capella fork!", method)); + } + // TODO(4844) add 4844 error checking here + + match method { + ENGINE_GET_PAYLOAD_V1 => Ok(serde_json::to_value( + JsonExecutionPayloadV1::try_from(response).unwrap(), + ) + .unwrap()), + ENGINE_GET_PAYLOAD_V2 => Ok(serde_json::to_value(JsonGetPayloadResponse { + execution_payload: JsonExecutionPayloadV2::try_from(response).unwrap(), + }) + .unwrap()), + _ => unreachable!(), + } } - // FIXME(capella): handle fcu version 2 - ENGINE_FORKCHOICE_UPDATED_V1 => { + ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 => { let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0)?; - let payload_attributes: Option = get_param(params, 1)?; + let payload_attributes = match method { + ENGINE_FORKCHOICE_UPDATED_V1 => { + let jpa1: Option = get_param(params, 1)?; + jpa1.map(JsonPayloadAttributes::V1) + } + ENGINE_FORKCHOICE_UPDATED_V2 => { + let jpa2: Option = get_param(params, 1)?; + jpa2.map(JsonPayloadAttributes::V2) + } + _ => unreachable!(), + }; + + // validate method called correctly according to shanghai fork time + if let Some(pa) = payload_attributes.as_ref() { + match ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(*pa.timestamp()) + { + ForkName::Merge => { + if pa.withdrawals().is_ok() && pa.withdrawals().unwrap().is_some() { + return Err(format!( + "{} called with `withdrawals` before capella fork!", + method + )); + } + } + ForkName::Capella => { + if method == ENGINE_FORKCHOICE_UPDATED_V1 { + return Err(format!("{} called after capella fork!", method)); + } + if pa.withdrawals().is_err() + || (pa.withdrawals().is_ok() && pa.withdrawals().unwrap().is_none()) + { + return Err(format!( + "{} called without `withdrawals` after capella fork!", + method + )); + } + } + // TODO(4844) add 4844 error checking here + _ => unreachable!(), + }; + } if let Some(hook_response) = ctx .hook @@ -161,13 +248,10 @@ pub async fn handle_rpc( return Ok(serde_json::to_value(response).unwrap()); } - let mut response = ctx - .execution_block_generator - .write() - .forkchoice_updated_v1( - forkchoice_state.into(), - payload_attributes.map(|json| json.into()), - )?; + let mut response = ctx.execution_block_generator.write().forkchoice_updated( + forkchoice_state.into(), + payload_attributes.map(|json| json.into()), + )?; if let Some(mut status) = ctx.static_forkchoice_updated_response.lock().clone() { if status.status == PayloadStatusV1Status::Valid { diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index e552b7ca7..89e0344d9 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -26,17 +26,22 @@ impl MockExecutionLayer { DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), Epoch::new(0), + None, + None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), None, ) } + #[allow(clippy::too_many_arguments)] pub fn new( executor: TaskExecutor, terminal_total_difficulty: Uint256, terminal_block: u64, terminal_block_hash: ExecutionBlockHash, terminal_block_hash_activation_epoch: Epoch, + shanghai_time: Option, + eip4844_time: Option, jwt_key: Option, builder_url: Option, ) -> Self { @@ -54,6 +59,8 @@ impl MockExecutionLayer { terminal_total_difficulty, terminal_block, terminal_block_hash, + shanghai_time, + eip4844_time, ); let url = SensitiveUrl::parse(&server.url()).unwrap(); diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index f18ecbe62..bad02e369 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -45,6 +45,8 @@ pub struct MockExecutionConfig { pub terminal_difficulty: Uint256, pub terminal_block: u64, pub terminal_block_hash: ExecutionBlockHash, + pub shanghai_time: Option, + pub eip4844_time: Option, } impl Default for MockExecutionConfig { @@ -55,6 +57,8 @@ impl Default for MockExecutionConfig { terminal_block: DEFAULT_TERMINAL_BLOCK, terminal_block_hash: ExecutionBlockHash::zero(), server_config: Config::default(), + shanghai_time: None, + eip4844_time: None, } } } @@ -74,6 +78,8 @@ impl MockServer { DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), + None, // FIXME(capella): should this be the default? + None, // FIXME(eip4844): should this be the default? ) } @@ -84,11 +90,18 @@ impl MockServer { terminal_block, terminal_block_hash, server_config, + shanghai_time, + eip4844_time, } = config; let last_echo_request = Arc::new(RwLock::new(None)); let preloaded_responses = Arc::new(Mutex::new(vec![])); - let execution_block_generator = - ExecutionBlockGenerator::new(terminal_difficulty, terminal_block, terminal_block_hash); + let execution_block_generator = ExecutionBlockGenerator::new( + terminal_difficulty, + terminal_block, + terminal_block_hash, + shanghai_time, + eip4844_time, + ); let ctx: Arc> = Arc::new(Context { config: server_config, @@ -140,6 +153,8 @@ impl MockServer { terminal_difficulty: Uint256, terminal_block: u64, terminal_block_hash: ExecutionBlockHash, + shanghai_time: Option, + eip4844_time: Option, ) -> Self { Self::new_with_config( handle, @@ -149,6 +164,8 @@ impl MockServer { terminal_difficulty, terminal_block, terminal_block_hash, + shanghai_time, + eip4844_time, }, ) } From 933772dd0678380383fbad4f4953ba26622b5035 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Tue, 3 Jan 2023 18:40:35 -0600 Subject: [PATCH 08/14] Fixed Operation Pool Tests --- beacon_node/beacon_chain/src/test_utils.rs | 18 ++++++++----- beacon_node/operation_pool/src/lib.rs | 31 +++++++++++++++++----- 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 3c4ab1ca1..fec04bd54 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -120,19 +120,23 @@ fn make_rng() -> Mutex { Mutex::new(StdRng::seed_from_u64(0x0DDB1A5E5BAD5EEDu64)) } +pub fn get_fork_from_env() -> ForkName { + let fork_string = std::env::var(FORK_NAME_ENV_VAR).unwrap_or_else(|e| { + panic!( + "{} env var must be defined when using fork_from_env: {:?}", + FORK_NAME_ENV_VAR, e + ) + }); + ForkName::from_str(fork_string.as_str()).unwrap() +} + /// Return a `ChainSpec` suitable for test usage. /// /// If the `fork_from_env` feature is enabled, read the fork to use from the FORK_NAME environment /// variable. Otherwise use the default spec. pub fn test_spec() -> ChainSpec { let mut spec = if cfg!(feature = "fork_from_env") { - let fork_name = std::env::var(FORK_NAME_ENV_VAR).unwrap_or_else(|e| { - panic!( - "{} env var must be defined when using fork_from_env: {:?}", - FORK_NAME_ENV_VAR, e - ) - }); - let fork = ForkName::from_str(fork_name.as_str()).unwrap(); + let fork = get_fork_from_env(); fork.make_genesis_spec(E::default_spec()) } else { E::default_spec() diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 37fa68938..4a895391f 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -8,19 +8,18 @@ mod persistence; mod reward_cache; mod sync_aggregate_id; +use crate::attestation_storage::{AttestationMap, CheckpointKey}; +use crate::sync_aggregate_id::SyncAggregateId; pub use attestation::AttMaxCover; pub use attestation_storage::{AttestationRef, SplitAttestation}; +use attester_slashing::AttesterSlashingMaxCover; +use max_cover::maximum_cover; pub use max_cover::MaxCover; +use parking_lot::{RwLock, RwLockWriteGuard}; pub use persistence::{ PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5, }; pub use reward_cache::RewardCache; - -use crate::attestation_storage::{AttestationMap, CheckpointKey}; -use crate::sync_aggregate_id::SyncAggregateId; -use attester_slashing::AttesterSlashingMaxCover; -use max_cover::maximum_cover; -use parking_lot::{RwLock, RwLockWriteGuard}; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::per_block_processing::{ get_slashable_indices_modular, verify_exit, VerifySignatures, @@ -767,7 +766,8 @@ mod release_tests { use super::attestation::earliest_attestation_validators; use super::*; use beacon_chain::test_utils::{ - test_spec, BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee, + get_fork_from_env, test_spec, BeaconChainHarness, EphemeralHarnessType, + RelativeSyncCommittee, }; use lazy_static::lazy_static; use maplit::hashset; @@ -1789,9 +1789,26 @@ mod release_tests { { let mut spec = test_spec::(); + if cfg!(feature = "fork_from_env") { + let fork = get_fork_from_env(); + match fork { + ForkName::Altair | ForkName::Merge | ForkName::Capella | ForkName::Eip4844 => {} + _ => panic!( + "Unknown fork {}, add it above AND below so this test doesn't panic", + fork + ), + } + } + // Give some room to sign surround slashings. + // It appears we need to set _every_ fork to some non-zero value + // here. Otherwise if we set FORK_NAME_ENV_VAR to some fork that + // isn't listed here, tests that use this function will panic in + // non-trivial ways spec.altair_fork_epoch = Some(Epoch::new(3)); spec.bellatrix_fork_epoch = Some(Epoch::new(6)); + spec.capella_fork_epoch = Some(Epoch::new(9)); + spec.eip4844_fork_epoch = Some(Epoch::new(12)); // To make exits immediately valid. spec.shard_committee_period = 0; From 8711db2f3b7dbe27756d93f3853c36c3780495ce Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Wed, 4 Jan 2023 15:14:43 -0600 Subject: [PATCH 09/14] Fix EF Tests --- beacon_node/beacon_chain/src/test_utils.rs | 21 +++++++++++++++++++++ testing/ef_tests/src/cases/fork_choice.rs | 1 + 2 files changed, 22 insertions(+) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index fec04bd54..53b333c5f 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -387,6 +387,27 @@ where self } + pub fn recalculate_fork_times_with_genesis(mut self, genesis_time: u64) -> Self { + let mock = self + .mock_execution_layer + .as_mut() + .expect("must have mock execution layer to recalculate fork times"); + let spec = self + .spec + .clone() + .expect("cannot recalculate fork times without spec"); + mock.server.execution_block_generator().shanghai_time = + spec.capella_fork_epoch.map(|epoch| { + genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); + mock.server.execution_block_generator().eip4844_time = + spec.eip4844_fork_epoch.map(|epoch| { + genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); + + self + } + pub fn mock_execution_layer(mut self) -> Self { let spec = self.spec.clone().expect("cannot build without spec"); let shanghai_time = spec.capella_fork_epoch.map(|epoch| { diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 039efb368..31165d632 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -311,6 +311,7 @@ impl Tester { .keypairs(vec![]) .genesis_state_ephemeral_store(case.anchor_state.clone()) .mock_execution_layer() + .recalculate_fork_times_with_genesis(0) .mock_execution_layer_all_payloads_valid() .build(); From 2ac609b64e20a98b2d8e1368771ed4fa82f6df2e Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Thu, 5 Jan 2023 13:00:44 -0600 Subject: [PATCH 10/14] Fixing Moar Failing Tests --- Makefile | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 18 ++++------ .../execution_layer/src/engine_api/http.rs | 6 ++-- beacon_node/operation_pool/src/lib.rs | 33 +++++-------------- 4 files changed, 19 insertions(+), 40 deletions(-) diff --git a/Makefile b/Makefile index 40db61de7..e1fba5b42 100644 --- a/Makefile +++ b/Makefile @@ -120,7 +120,7 @@ run-ef-tests: test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS)) test-beacon-chain-%: - env FORK_NAME=$* cargo test --release --features fork_from_env -p beacon_chain + env FORK_NAME=$* cargo test --release --features fork_from_env,withdrawals-processing -p beacon_chain # Run the tests in the `operation_pool` crate for all known forks. test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS)) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 53b333c5f..c4bb9f3d8 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -120,23 +120,19 @@ fn make_rng() -> Mutex { Mutex::new(StdRng::seed_from_u64(0x0DDB1A5E5BAD5EEDu64)) } -pub fn get_fork_from_env() -> ForkName { - let fork_string = std::env::var(FORK_NAME_ENV_VAR).unwrap_or_else(|e| { - panic!( - "{} env var must be defined when using fork_from_env: {:?}", - FORK_NAME_ENV_VAR, e - ) - }); - ForkName::from_str(fork_string.as_str()).unwrap() -} - /// Return a `ChainSpec` suitable for test usage. /// /// If the `fork_from_env` feature is enabled, read the fork to use from the FORK_NAME environment /// variable. Otherwise use the default spec. pub fn test_spec() -> ChainSpec { let mut spec = if cfg!(feature = "fork_from_env") { - let fork = get_fork_from_env(); + let fork_name = std::env::var(FORK_NAME_ENV_VAR).unwrap_or_else(|e| { + panic!( + "{} env var must be defined when using fork_from_env: {:?}", + FORK_NAME_ENV_VAR, e + ) + }); + let fork = ForkName::from_str(fork_name.as_str()).unwrap(); fork.make_genesis_spec(E::default_spec()) } else { E::default_spec() diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index bf1da078e..06abe7274 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -852,11 +852,11 @@ impl HttpJsonRpc { pub async fn supported_apis_v1(&self) -> Result { Ok(SupportedApis { new_payload_v1: true, - new_payload_v2: cfg!(feature = "withdrawals-processing"), + new_payload_v2: cfg!(any(feature = "withdrawals-processing", test)), forkchoice_updated_v1: true, - forkchoice_updated_v2: cfg!(feature = "withdrawals-processing"), + forkchoice_updated_v2: cfg!(any(feature = "withdrawals-processing", test)), get_payload_v1: true, - get_payload_v2: cfg!(feature = "withdrawals-processing"), + get_payload_v2: cfg!(any(feature = "withdrawals-processing", test)), exchange_transition_configuration_v1: true, }) } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 4a895391f..1f4660fc2 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -8,18 +8,19 @@ mod persistence; mod reward_cache; mod sync_aggregate_id; -use crate::attestation_storage::{AttestationMap, CheckpointKey}; -use crate::sync_aggregate_id::SyncAggregateId; pub use attestation::AttMaxCover; pub use attestation_storage::{AttestationRef, SplitAttestation}; -use attester_slashing::AttesterSlashingMaxCover; -use max_cover::maximum_cover; pub use max_cover::MaxCover; -use parking_lot::{RwLock, RwLockWriteGuard}; pub use persistence::{ PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5, }; pub use reward_cache::RewardCache; + +use crate::attestation_storage::{AttestationMap, CheckpointKey}; +use crate::sync_aggregate_id::SyncAggregateId; +use attester_slashing::AttesterSlashingMaxCover; +use max_cover::maximum_cover; +use parking_lot::{RwLock, RwLockWriteGuard}; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::per_block_processing::{ get_slashable_indices_modular, verify_exit, VerifySignatures, @@ -766,8 +767,7 @@ mod release_tests { use super::attestation::earliest_attestation_validators; use super::*; use beacon_chain::test_utils::{ - get_fork_from_env, test_spec, BeaconChainHarness, EphemeralHarnessType, - RelativeSyncCommittee, + test_spec, BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee, }; use lazy_static::lazy_static; use maplit::hashset; @@ -1787,28 +1787,11 @@ mod release_tests { fn cross_fork_harness() -> (BeaconChainHarness>, ChainSpec) { - let mut spec = test_spec::(); - - if cfg!(feature = "fork_from_env") { - let fork = get_fork_from_env(); - match fork { - ForkName::Altair | ForkName::Merge | ForkName::Capella | ForkName::Eip4844 => {} - _ => panic!( - "Unknown fork {}, add it above AND below so this test doesn't panic", - fork - ), - } - } + let mut spec = E::default_spec(); // Give some room to sign surround slashings. - // It appears we need to set _every_ fork to some non-zero value - // here. Otherwise if we set FORK_NAME_ENV_VAR to some fork that - // isn't listed here, tests that use this function will panic in - // non-trivial ways spec.altair_fork_epoch = Some(Epoch::new(3)); spec.bellatrix_fork_epoch = Some(Epoch::new(6)); - spec.capella_fork_epoch = Some(Epoch::new(9)); - spec.eip4844_fork_epoch = Some(Epoch::new(12)); // To make exits immediately valid. spec.shard_committee_period = 0; From cb94f639b00b168c49cc60f216f1bbede1ee5f9f Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Sun, 8 Jan 2023 18:05:28 -0600 Subject: [PATCH 11/14] Isolate withdrawals-processing Feature (#3854) --- Makefile | 4 +- beacon_node/Cargo.toml | 1 - beacon_node/beacon_chain/Cargo.toml | 2 - beacon_node/beacon_chain/src/beacon_chain.rs | 36 ++---- beacon_node/beacon_chain/src/builder.rs | 1 - .../beacon_chain/src/observed_operations.rs | 6 +- beacon_node/execution_layer/Cargo.toml | 2 +- beacon_node/http_api/Cargo.toml | 3 - beacon_node/http_api/src/lib.rs | 16 +-- beacon_node/operation_pool/Cargo.toml | 3 - beacon_node/operation_pool/src/lib.rs | 113 ++++++------------ beacon_node/operation_pool/src/persistence.rs | 1 - common/eth2/Cargo.toml | 1 - .../src/per_block_processing.rs | 8 +- .../process_operations.rs | 5 +- .../state_processing/src/verify_operation.rs | 17 +-- testing/ef_tests/src/cases/operations.rs | 19 +-- testing/ef_tests/src/lib.rs | 1 - testing/ef_tests/tests/tests.rs | 2 - 19 files changed, 74 insertions(+), 167 deletions(-) diff --git a/Makefile b/Makefile index e1fba5b42..41721c2d6 100644 --- a/Makefile +++ b/Makefile @@ -89,12 +89,12 @@ build-release-tarballs: # Runs the full workspace tests in **release**, without downloading any additional # test vectors. test-release: - cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher + cargo test --workspace --features withdrawals-processing --release --exclude ef_tests --exclude beacon_chain --exclude slasher # Runs the full workspace tests in **debug**, without downloading any additional test # vectors. test-debug: - cargo test --workspace --exclude ef_tests --exclude beacon_chain + cargo test --workspace --features withdrawals-processing --exclude ef_tests --exclude beacon_chain # Runs cargo-fmt (linter). cargo-fmt: diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index bed32011f..d6b0b643a 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -17,7 +17,6 @@ withdrawals-processing = [ "beacon_chain/withdrawals-processing", "store/withdrawals-processing", "execution_layer/withdrawals-processing", - "http_api/withdrawals-processing", ] [dependencies] diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index a6ac66037..c89f1650e 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -12,9 +12,7 @@ participation_metrics = [] # Exposes validator participation metrics to Prometh fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable withdrawals-processing = [ "state_processing/withdrawals-processing", - "store/withdrawals-processing", "execution_layer/withdrawals-processing", - "operation_pool/withdrawals-processing" ] [dev-dependencies] diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index edf0e149c..798a9b808 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -362,7 +362,6 @@ pub struct BeaconChain { pub(crate) observed_attester_slashings: Mutex, T::EthSpec>>, /// Maintains a record of which validators we've seen BLS to execution changes for. - #[cfg(feature = "withdrawals-processing")] pub(crate) observed_bls_to_execution_changes: Mutex>, /// The most recently validated light client finality update received on gossip. @@ -2232,29 +2231,18 @@ impl BeaconChain { &self, bls_to_execution_change: SignedBlsToExecutionChange, ) -> Result, Error> { - #[cfg(feature = "withdrawals-processing")] - { - let current_fork = self.spec.fork_name_at_slot::(self.slot()?); - if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork { - // Disallow BLS to execution changes prior to the Capella fork. - return Err(Error::BlsToExecutionChangeBadFork(current_fork)); - } - - let wall_clock_state = self.wall_clock_state()?; - - Ok(self - .observed_bls_to_execution_changes - .lock() - .verify_and_observe(bls_to_execution_change, &wall_clock_state, &self.spec)?) + let current_fork = self.spec.fork_name_at_slot::(self.slot()?); + if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork { + // Disallow BLS to execution changes prior to the Capella fork. + return Err(Error::BlsToExecutionChangeBadFork(current_fork)); } - // TODO: remove this whole block once withdrawals-processing is removed - #[cfg(not(feature = "withdrawals-processing"))] - { - #[allow(clippy::drop_non_drop)] - drop(bls_to_execution_change); - Ok(ObservationOutcome::AlreadyKnown) - } + let wall_clock_state = self.wall_clock_state()?; + + Ok(self + .observed_bls_to_execution_changes + .lock() + .verify_and_observe(bls_to_execution_change, &wall_clock_state, &self.spec)?) } /// Import a BLS to execution change to the op pool. @@ -2263,12 +2251,8 @@ impl BeaconChain { bls_to_execution_change: SigVerifiedOp, ) { if self.eth1_chain.is_some() { - #[cfg(feature = "withdrawals-processing")] self.op_pool .insert_bls_to_execution_change(bls_to_execution_change); - - #[cfg(not(feature = "withdrawals-processing"))] - drop(bls_to_execution_change); } } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 3073de9ca..b73ad2f25 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -798,7 +798,6 @@ where observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), - #[cfg(feature = "withdrawals-processing")] observed_bls_to_execution_changes: <_>::default(), latest_seen_finality_update: <_>::default(), latest_seen_optimistic_update: <_>::default(), diff --git a/beacon_node/beacon_chain/src/observed_operations.rs b/beacon_node/beacon_chain/src/observed_operations.rs index 5781f9b5b..6e5337393 100644 --- a/beacon_node/beacon_chain/src/observed_operations.rs +++ b/beacon_node/beacon_chain/src/observed_operations.rs @@ -6,12 +6,9 @@ use std::collections::HashSet; use std::marker::PhantomData; use types::{ AttesterSlashing, BeaconState, ChainSpec, EthSpec, ForkName, ProposerSlashing, - SignedVoluntaryExit, Slot, + SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, }; -#[cfg(feature = "withdrawals-processing")] -use types::SignedBlsToExecutionChange; - /// Number of validator indices to store on the stack in `observed_validators`. pub const SMALL_VEC_SIZE: usize = 8; @@ -83,7 +80,6 @@ impl ObservableOperation for AttesterSlashing { } } -#[cfg(feature = "withdrawals-processing")] impl ObservableOperation for SignedBlsToExecutionChange { fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> { smallvec![self.message.validator_index] diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 47c1e0341..30312939d 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] -withdrawals-processing = ["state_processing/withdrawals-processing", "eth2/withdrawals-processing"] +withdrawals-processing = ["state_processing/withdrawals-processing"] [dependencies] types = { path = "../../consensus/types"} diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index e8a97fd0b..077e3aa7c 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -5,9 +5,6 @@ authors = ["Paul Hauner "] edition = "2021" autotests = false # using a single test binary compiles faster -[features] -withdrawals-processing = [] - [dependencies] warp = { version = "0.3.2", features = ["tls"] } serde = { version = "1.0.116", features = ["derive"] } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 2d6c44680..9d04938ad 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1687,16 +1687,12 @@ pub fn serve( match chain.verify_bls_to_execution_change_for_gossip(address_change) { Ok(ObservationOutcome::New(verified_address_change)) => { - #[cfg(feature = "withdrawals-processing")] - { - publish_pubsub_message( - &network_tx, - PubsubMessage::BlsToExecutionChange(Box::new( - verified_address_change.as_inner().clone(), - )), - )?; - } - + publish_pubsub_message( + &network_tx, + PubsubMessage::BlsToExecutionChange(Box::new( + verified_address_change.as_inner().clone(), + )), + )?; chain.import_bls_to_execution_change(verified_address_change); } Ok(ObservationOutcome::AlreadyKnown) => { diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index d75235443..848323358 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -4,9 +4,6 @@ version = "0.2.0" authors = ["Michael Sproul "] edition = "2021" -[features] -withdrawals-processing = [] - [dependencies] derivative = "2.1.1" itertools = "0.10.0" diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 1f4660fc2..a69e0a750 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -51,7 +51,6 @@ pub struct OperationPool { /// Map from exiting validator to their exit data. voluntary_exits: RwLock>>, /// Map from credential changing validator to their execution change data. - #[cfg(feature = "withdrawals-processing")] bls_to_execution_changes: RwLock>>, /// Reward cache for accelerating attestation packing. reward_cache: RwLock, @@ -518,17 +517,10 @@ impl OperationPool { &self, verified_change: SigVerifiedOp, ) { - #[cfg(feature = "withdrawals-processing")] - { - self.bls_to_execution_changes.write().insert( - verified_change.as_inner().message.validator_index, - verified_change, - ); - } - #[cfg(not(feature = "withdrawals-processing"))] - { - drop(verified_change); - } + self.bls_to_execution_changes.write().insert( + verified_change.as_inner().message.validator_index, + verified_change, + ); } /// Get a list of execution changes for inclusion in a block. @@ -539,32 +531,19 @@ impl OperationPool { state: &BeaconState, spec: &ChainSpec, ) -> Vec { - #[cfg(feature = "withdrawals-processing")] - { - filter_limit_operations( - self.bls_to_execution_changes.read().values(), - |address_change| { - address_change.signature_is_still_valid(&state.fork()) - && state - .get_validator( - address_change.as_inner().message.validator_index as usize, - ) - .map_or(false, |validator| { - !validator.has_eth1_withdrawal_credential(spec) - }) - }, - |address_change| address_change.as_inner().clone(), - T::MaxBlsToExecutionChanges::to_usize(), - ) - } - - // TODO: remove this whole block once withdrwals-processing is removed - #[cfg(not(feature = "withdrawals-processing"))] - { - #[allow(clippy::drop_copy)] - drop((state, spec)); - vec![] - } + filter_limit_operations( + self.bls_to_execution_changes.read().values(), + |address_change| { + address_change.signature_is_still_valid(&state.fork()) + && state + .get_validator(address_change.as_inner().message.validator_index as usize) + .map_or(false, |validator| { + !validator.has_eth1_withdrawal_credential(spec) + }) + }, + |address_change| address_change.as_inner().clone(), + T::MaxBlsToExecutionChanges::to_usize(), + ) } /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. @@ -579,32 +558,22 @@ impl OperationPool { head_state: &BeaconState, spec: &ChainSpec, ) { - #[cfg(feature = "withdrawals-processing")] - { - prune_validator_hash_map( - &mut self.bls_to_execution_changes.write(), - |validator_index, validator| { - validator.has_eth1_withdrawal_credential(spec) - && head_block - .message() - .body() - .bls_to_execution_changes() - .map_or(true, |recent_changes| { - !recent_changes - .iter() - .any(|c| c.message.validator_index == validator_index) - }) - }, - head_state, - ); - } - - // TODO: remove this whole block once withdrwals-processing is removed - #[cfg(not(feature = "withdrawals-processing"))] - { - #[allow(clippy::drop_copy)] - drop((head_block, head_state, spec)); - } + prune_validator_hash_map( + &mut self.bls_to_execution_changes.write(), + |validator_index, validator| { + validator.has_eth1_withdrawal_credential(spec) + && head_block + .message() + .body() + .bls_to_execution_changes() + .map_or(true, |recent_changes| { + !recent_changes + .iter() + .any(|c| c.message.validator_index == validator_index) + }) + }, + head_state, + ); } /// Prune all types of transactions given the latest head state and head fork. @@ -691,17 +660,11 @@ impl OperationPool { /// /// This method may return objects that are invalid for block inclusion. pub fn get_all_bls_to_execution_changes(&self) -> Vec { - #[cfg(feature = "withdrawals-processing")] - { - self.bls_to_execution_changes - .read() - .iter() - .map(|(_, address_change)| address_change.as_inner().clone()) - .collect() - } - - #[cfg(not(feature = "withdrawals-processing"))] - vec![] + self.bls_to_execution_changes + .read() + .iter() + .map(|(_, address_change)| address_change.as_inner().clone()) + .collect() } } diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 184b967db..b232e5a55 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -143,7 +143,6 @@ impl PersistedOperationPool { proposer_slashings, voluntary_exits, // FIXME(capella): implement schema migration for address changes in op pool - #[cfg(feature = "withdrawals-processing")] bls_to_execution_changes: Default::default(), reward_cache: Default::default(), _phantom: Default::default(), diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index fc5eba98e..eca086d83 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -35,4 +35,3 @@ procinfo = { version = "0.4.2", optional = true } [features] default = ["lighthouse"] lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"] -withdrawals-processing = ["store/withdrawals-processing"] \ No newline at end of file diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index f1a544099..0192fe0ce 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -19,7 +19,6 @@ pub use process_operations::process_operations; pub use verify_attestation::{ verify_attestation_for_block_inclusion, verify_attestation_for_state, }; -#[cfg(feature = "withdrawals-processing")] pub use verify_bls_to_execution_change::verify_bls_to_execution_change; pub use verify_deposit::{ get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, @@ -36,13 +35,11 @@ pub mod signature_sets; pub mod tests; mod verify_attestation; mod verify_attester_slashing; -#[cfg(feature = "withdrawals-processing")] mod verify_bls_to_execution_change; mod verify_deposit; mod verify_exit; mod verify_proposer_slashing; -#[cfg(feature = "withdrawals-processing")] use crate::common::decrease_balance; #[cfg(feature = "arbitrary-fuzz")] @@ -165,7 +162,6 @@ pub fn per_block_processing>( // previous block. if is_execution_enabled(state, block.body()) { let payload = block.body().execution_payload()?; - #[cfg(feature = "withdrawals-processing")] process_withdrawals::(state, payload, spec)?; process_execution_payload::(state, payload, spec)?; } @@ -524,12 +520,14 @@ pub fn get_expected_withdrawals( } /// Apply withdrawals to the state. -#[cfg(feature = "withdrawals-processing")] pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload>( state: &mut BeaconState, payload: Payload::Ref<'payload>, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { + if cfg!(not(feature = "withdrawals-processing")) { + return Ok(()); + } match state { BeaconState::Merge(_) => Ok(()), BeaconState::Capella(_) | BeaconState::Eip4844(_) => { diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index f27fd48b4..eacb7617c 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -34,7 +34,6 @@ pub fn process_operations<'a, T: EthSpec, Payload: AbstractExecPayload>( process_deposits(state, block_body.deposits(), spec)?; process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?; - #[cfg(feature = "withdrawals-processing")] if let Ok(bls_to_execution_changes) = block_body.bls_to_execution_changes() { process_bls_to_execution_changes(state, bls_to_execution_changes, verify_signatures, spec)?; } @@ -295,13 +294,15 @@ pub fn process_exits( /// /// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returns /// an `Err` describing the invalid object or cause of failure. -#[cfg(feature = "withdrawals-processing")] pub fn process_bls_to_execution_changes( state: &mut BeaconState, bls_to_execution_changes: &[SignedBlsToExecutionChange], verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { + if cfg!(not(feature = "withdrawals-processing")) { + return Ok(()); + } for (i, signed_address_change) in bls_to_execution_changes.iter().enumerate() { verify_bls_to_execution_change(state, signed_address_change, verify_signatures, spec) .map_err(|e| e.into_with_index(i))?; diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index e2e434417..efd356462 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -1,8 +1,10 @@ use crate::per_block_processing::{ errors::{ - AttesterSlashingValidationError, ExitValidationError, ProposerSlashingValidationError, + AttesterSlashingValidationError, BlsExecutionChangeValidationError, ExitValidationError, + ProposerSlashingValidationError, }, - verify_attester_slashing, verify_exit, verify_proposer_slashing, + verify_attester_slashing, verify_bls_to_execution_change, verify_exit, + verify_proposer_slashing, }; use crate::VerifySignatures; use derivative::Derivative; @@ -12,15 +14,7 @@ use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; use types::{ AttesterSlashing, BeaconState, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, ProposerSlashing, - SignedVoluntaryExit, -}; - -#[cfg(feature = "withdrawals-processing")] -use { - crate::per_block_processing::{ - errors::BlsExecutionChangeValidationError, verify_bls_to_execution_change, - }, - types::SignedBlsToExecutionChange, + SignedBlsToExecutionChange, SignedVoluntaryExit, }; const MAX_FORKS_VERIFIED_AGAINST: usize = 2; @@ -202,7 +196,6 @@ impl VerifyOperation for ProposerSlashing { } } -#[cfg(feature = "withdrawals-processing")] impl VerifyOperation for SignedBlsToExecutionChange { type Error = BlsExecutionChangeValidationError; diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index a08ee1996..71954405c 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -4,30 +4,24 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; use serde_derive::Deserialize; -#[cfg(feature = "withdrawals-processing")] -use state_processing::per_block_processing::process_operations::{ - process_bls_to_execution_changes, process_bls_to_execution_changes, -}; use state_processing::{ per_block_processing::{ errors::BlockProcessingError, process_block_header, process_execution_payload, process_operations::{ - altair, base, process_attester_slashings, process_deposits, process_exits, - process_proposer_slashings, + altair, base, process_attester_slashings, process_bls_to_execution_changes, + process_deposits, process_exits, process_proposer_slashings, }, - process_sync_aggregate, VerifyBlockRoot, VerifySignatures, + process_sync_aggregate, process_withdrawals, VerifyBlockRoot, VerifySignatures, }, ConsensusContext, }; use std::fmt::Debug; use std::path::Path; -#[cfg(feature = "withdrawals-processing")] -use types::SignedBlsToExecutionChange; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, - EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedVoluntaryExit, - SyncAggregate, + EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedBlsToExecutionChange, + SignedVoluntaryExit, SyncAggregate, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -42,7 +36,6 @@ struct ExecutionMetadata { } /// Newtype for testing withdrawals. -#[cfg(feature = "withdrawals-processing")] #[derive(Debug, Clone, Deserialize)] pub struct WithdrawalsPayload { payload: FullPayload, @@ -341,7 +334,6 @@ impl Operation for BlindedPayload { } } -#[cfg(feature = "withdrawals-processing")] impl Operation for WithdrawalsPayload { fn handler_name() -> String { "withdrawals".into() @@ -374,7 +366,6 @@ impl Operation for WithdrawalsPayload { } } -#[cfg(feature = "withdrawals-processing")] impl Operation for SignedBlsToExecutionChange { fn handler_name() -> String { "bls_to_execution_change".into() diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index a4d4f2d52..f52b7bca1 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,5 +1,4 @@ pub use case_result::CaseResult; -#[cfg(feature = "withdrawals-processing")] pub use cases::WithdrawalsPayload; pub use cases::{ Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates, diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 66c4f83ec..f84be64da 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -82,14 +82,12 @@ fn operations_execution_payload_blinded() { OperationsHandler::>::default().run(); } -#[cfg(feature = "withdrawals-processing")] #[test] fn operations_withdrawals() { OperationsHandler::>::default().run(); OperationsHandler::>::default().run(); } -#[cfg(feature = "withdrawals-processing")] #[test] fn operations_bls_to_execution_change() { OperationsHandler::::default().run(); From 11f4784ae6bbcfa00cd4c8ffd59266385a337add Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Sun, 8 Jan 2023 19:38:02 -0600 Subject: [PATCH 12/14] Added bls_to_execution_changes to PersistedOpPool (#3857) * Added bls_to_execution_changes to PersistedOpPool --- beacon_node/beacon_chain/src/schema_change.rs | 9 +++ .../src/schema_change/migration_schema_v12.rs | 8 +- .../src/schema_change/migration_schema_v14.rs | 75 +++++++++++++++++++ beacon_node/operation_pool/src/lib.rs | 3 +- beacon_node/operation_pool/src/persistence.rs | 69 +++++++++++++---- beacon_node/store/src/metadata.rs | 2 +- 6 files changed, 146 insertions(+), 20 deletions(-) create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 73906b1b5..8684bafe2 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,6 +1,7 @@ //! Utilities for managing database schema changes. mod migration_schema_v12; mod migration_schema_v13; +mod migration_schema_v14; use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; use crate::eth1_chain::SszEth1; @@ -114,6 +115,14 @@ pub fn migrate_schema( Ok(()) } + (SchemaVersion(13), SchemaVersion(14)) => { + let ops = migration_schema_v14::upgrade_to_v14::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(14), SchemaVersion(13)) => { + let ops = migration_schema_v14::downgrade_from_v14::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs index bb72b28c0..c9aa2097f 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs @@ -168,16 +168,14 @@ pub fn downgrade_from_v12( log: Logger, ) -> Result, Error> { // Load a V12 op pool and transform it to V5. - let PersistedOperationPoolV12 { + let PersistedOperationPoolV12:: { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, - } = if let Some(PersistedOperationPool::::V12(op_pool)) = - db.get_item(&OP_POOL_DB_KEY)? - { - op_pool + } = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool_v12 } else { debug!(log, "Nothing to do, no operation pool stored"); return Ok(vec![]); diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs new file mode 100644 index 000000000..02422a403 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs @@ -0,0 +1,75 @@ +use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; +use operation_pool::{ + PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, +}; +use slog::{debug, info, Logger}; +use std::sync::Arc; +use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; + +pub fn upgrade_to_v14( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V12 op pool and transform it to V14. + let PersistedOperationPoolV12:: { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + } = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool_v12 + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + // initialize with empty vector + let bls_to_execution_changes = vec![]; + let v14 = PersistedOperationPool::V14(PersistedOperationPoolV14 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + }); + Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)]) +} + +pub fn downgrade_from_v14( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V14 op pool and transform it to V12. + let PersistedOperationPoolV14 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + } = if let Some(PersistedOperationPool::::V14(op_pool)) = + db.get_item(&OP_POOL_DB_KEY)? + { + op_pool + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + info!( + log, + "Dropping bls_to_execution_changes from pool"; + "count" => bls_to_execution_changes.len(), + ); + + let v12 = PersistedOperationPoolV12 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + }; + Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)]) +} diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index a69e0a750..70e0d56bc 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -12,7 +12,8 @@ pub use attestation::AttMaxCover; pub use attestation_storage::{AttestationRef, SplitAttestation}; pub use max_cover::MaxCover; pub use persistence::{ - PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5, + PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, + PersistedOperationPoolV5, }; pub use reward_cache::RewardCache; diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index b232e5a55..043e6fb7f 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -18,7 +18,7 @@ type PersistedSyncContributions = Vec<(SyncAggregateId, Vec { #[superstruct(only(V5))] pub attestations_v5: Vec<(AttestationId, Vec>)>, /// Attestations and their attesting indices. - #[superstruct(only(V12))] + #[superstruct(only(V12, V14))] pub attestations: Vec<(Attestation, Vec)>, /// Mapping from sync contribution ID to sync contributions and aggregate. pub sync_contributions: PersistedSyncContributions, @@ -40,20 +40,23 @@ pub struct PersistedOperationPool { #[superstruct(only(V5))] pub attester_slashings_v5: Vec<(AttesterSlashing, ForkVersion)>, /// Attester slashings. - #[superstruct(only(V12))] + #[superstruct(only(V12, V14))] pub attester_slashings: Vec, T>>, /// [DEPRECATED] Proposer slashings. #[superstruct(only(V5))] pub proposer_slashings_v5: Vec, /// Proposer slashings with fork information. - #[superstruct(only(V12))] + #[superstruct(only(V12, V14))] pub proposer_slashings: Vec>, /// [DEPRECATED] Voluntary exits. #[superstruct(only(V5))] pub voluntary_exits_v5: Vec, /// Voluntary exits with fork information. - #[superstruct(only(V12))] + #[superstruct(only(V12, V14))] pub voluntary_exits: Vec>, + /// BLS to Execution Changes + #[superstruct(only(V14))] + pub bls_to_execution_changes: Vec>, } impl PersistedOperationPool { @@ -99,12 +102,20 @@ impl PersistedOperationPool { .map(|(_, exit)| exit.clone()) .collect(); - PersistedOperationPool::V12(PersistedOperationPoolV12 { + let bls_to_execution_changes = operation_pool + .bls_to_execution_changes + .read() + .iter() + .map(|(_, bls_to_execution_change)| bls_to_execution_change.clone()) + .collect(); + + PersistedOperationPool::V14(PersistedOperationPoolV14 { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, + bls_to_execution_changes, }) } @@ -127,23 +138,41 @@ impl PersistedOperationPool { ); let sync_contributions = RwLock::new(self.sync_contributions().iter().cloned().collect()); let attestations = match self { - PersistedOperationPool::V5(_) => return Err(OpPoolError::IncorrectOpPoolVariant), - PersistedOperationPool::V12(pool) => { + PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => { + return Err(OpPoolError::IncorrectOpPoolVariant) + } + PersistedOperationPool::V14(ref pool) => { let mut map = AttestationMap::default(); - for (att, attesting_indices) in pool.attestations { + for (att, attesting_indices) in pool.attestations.clone() { map.insert(att, attesting_indices); } RwLock::new(map) } }; + let bls_to_execution_changes = match self { + PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => { + return Err(OpPoolError::IncorrectOpPoolVariant) + } + PersistedOperationPool::V14(pool) => RwLock::new( + pool.bls_to_execution_changes + .iter() + .cloned() + .map(|bls_to_execution_change| { + ( + bls_to_execution_change.as_inner().message.validator_index, + bls_to_execution_change, + ) + }) + .collect(), + ), + }; let op_pool = OperationPool { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, - // FIXME(capella): implement schema migration for address changes in op pool - bls_to_execution_changes: Default::default(), + bls_to_execution_changes, reward_cache: Default::default(), _phantom: Default::default(), }; @@ -165,6 +194,20 @@ impl StoreItem for PersistedOperationPoolV5 { } } +impl StoreItem for PersistedOperationPoolV12 { + fn db_column() -> DBColumn { + DBColumn::OpPool + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + PersistedOperationPoolV12::from_ssz_bytes(bytes).map_err(Into::into) + } +} + /// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`. impl StoreItem for PersistedOperationPool { fn db_column() -> DBColumn { @@ -177,8 +220,8 @@ impl StoreItem for PersistedOperationPool { fn from_store_bytes(bytes: &[u8]) -> Result { // Default deserialization to the latest variant. - PersistedOperationPoolV12::from_ssz_bytes(bytes) - .map(Self::V12) + PersistedOperationPoolV14::from_ssz_bytes(bytes) + .map(Self::V14) .map_err(Into::into) } } diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 5cb3f1220..fb5769635 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(13); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(14); // All the keys that get stored under the `BeaconMeta` column. // From 87c44697d090347d32a093d5bd0be88c38519290 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 9 Jan 2023 14:40:09 +1100 Subject: [PATCH 13/14] Bump MSRV to 1.65 (#3860) --- lighthouse/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 2db42d6ec..be022e311 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -4,7 +4,7 @@ version = "3.3.0" authors = ["Sigma Prime "] edition = "2021" autotests = false -rust-version = "1.62" +rust-version = "1.65" [features] default = ["slasher-mdbx"] From 98b11bbd3fec8b78602a80243eb889d75e1f7d67 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 10 Jan 2023 20:40:21 -0500 Subject: [PATCH 14/14] add historical summaries (#3865) * add historical summaries * fix tree hash caching, disable the sanity slots test with fake crypto * add ssz static HistoricalSummary * only store historical summaries after capella * Teach `UpdatePattern` about Capella * Tidy EF tests * Clippy Co-authored-by: Michael Sproul --- beacon_node/store/src/chunked_vector.rs | 78 ++++++++++++++-- beacon_node/store/src/hot_cold_store.rs | 4 +- beacon_node/store/src/lib.rs | 2 + beacon_node/store/src/partial_beacon_state.rs | 72 +++++++++++---- .../src/per_epoch_processing.rs | 12 +-- .../src/per_epoch_processing/capella.rs | 78 ++++++++++++++++ .../capella/historical_summaries_update.rs | 23 +++++ .../state_processing/src/upgrade/capella.rs | 4 +- .../state_processing/src/upgrade/eip4844.rs | 3 +- consensus/types/src/beacon_state.rs | 8 +- .../types/src/beacon_state/tree_hash_cache.rs | 38 ++++++-- consensus/types/src/historical_summary.rs | 88 +++++++++++++++++++ consensus/types/src/lib.rs | 1 + lcli/src/main.rs | 1 - testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 2 - .../ef_tests/src/cases/epoch_processing.rs | 30 ++++++- testing/ef_tests/src/handler.rs | 5 ++ testing/ef_tests/src/lib.rs | 8 +- testing/ef_tests/src/type_name.rs | 2 + testing/ef_tests/tests/tests.rs | 13 +++ 21 files changed, 424 insertions(+), 50 deletions(-) create mode 100644 consensus/state_processing/src/per_epoch_processing/capella.rs create mode 100644 consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs create mode 100644 consensus/types/src/historical_summary.rs diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index 8c64d4bcc..73edfbb07 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -18,6 +18,7 @@ use self::UpdatePattern::*; use crate::*; use ssz::{Decode, Encode}; use typenum::Unsigned; +use types::historical_summary::HistoricalSummary; /// Description of how a `BeaconState` field is updated during state processing. /// @@ -26,7 +27,18 @@ use typenum::Unsigned; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum UpdatePattern { /// The value is updated once per `n` slots. - OncePerNSlots { n: u64 }, + OncePerNSlots { + n: u64, + /// The slot at which the field begins to accumulate values. + /// + /// The field should not be read or written until `activation_slot` is reached, and the + /// activation slot should act as an offset when converting slots to vector indices. + activation_slot: Option, + /// The slot at which the field ceases to accumulate values. + /// + /// If this is `None` then the field is continually updated. + deactivation_slot: Option, + }, /// The value is updated once per epoch, for the epoch `current_epoch - lag`. OncePerEpoch { lag: u64 }, } @@ -98,12 +110,30 @@ pub trait Field: Copy { fn start_and_end_vindex(current_slot: Slot, spec: &ChainSpec) -> (usize, usize) { // We take advantage of saturating subtraction on slots and epochs match Self::update_pattern(spec) { - OncePerNSlots { n } => { + OncePerNSlots { + n, + activation_slot, + deactivation_slot, + } => { // Per-slot changes exclude the index for the current slot, because // it won't be set until the slot completes (think of `state_roots`, `block_roots`). // This also works for the `historical_roots` because at the `n`th slot, the 0th // entry of the list is created, and before that the list is empty. - let end_vindex = current_slot / n; + // + // To account for the switch from historical roots to historical summaries at + // Capella we also modify the current slot by the activation and deactivation slots. + // The activation slot acts as an offset (subtraction) while the deactivation slot + // acts as a clamp (min). + let slot_with_clamp = deactivation_slot.map_or(current_slot, |deactivation_slot| { + std::cmp::min(current_slot, deactivation_slot) + }); + let slot_with_clamp_and_offset = if let Some(activation_slot) = activation_slot { + slot_with_clamp - activation_slot + } else { + // Return (0, 0) to indicate that the field should not be read/written. + return (0, 0); + }; + let end_vindex = slot_with_clamp_and_offset / n; let start_vindex = end_vindex - Self::Length::to_u64(); (start_vindex.as_usize(), end_vindex.as_usize()) } @@ -295,7 +325,11 @@ field!( Hash256, T::SlotsPerHistoricalRoot, DBColumn::BeaconBlockRoots, - |_| OncePerNSlots { n: 1 }, + |_| OncePerNSlots { + n: 1, + activation_slot: Some(Slot::new(0)), + deactivation_slot: None + }, |state: &BeaconState<_>, index, _| safe_modulo_index(state.block_roots(), index) ); @@ -305,7 +339,11 @@ field!( Hash256, T::SlotsPerHistoricalRoot, DBColumn::BeaconStateRoots, - |_| OncePerNSlots { n: 1 }, + |_| OncePerNSlots { + n: 1, + activation_slot: Some(Slot::new(0)), + deactivation_slot: None, + }, |state: &BeaconState<_>, index, _| safe_modulo_index(state.state_roots(), index) ); @@ -315,8 +353,12 @@ field!( Hash256, T::HistoricalRootsLimit, DBColumn::BeaconHistoricalRoots, - |_| OncePerNSlots { - n: T::SlotsPerHistoricalRoot::to_u64() + |spec: &ChainSpec| OncePerNSlots { + n: T::SlotsPerHistoricalRoot::to_u64(), + activation_slot: Some(Slot::new(0)), + deactivation_slot: spec + .capella_fork_epoch + .map(|fork_epoch| fork_epoch.start_slot(T::slots_per_epoch())), }, |state: &BeaconState<_>, index, _| safe_modulo_index(state.historical_roots(), index) ); @@ -331,6 +373,27 @@ field!( |state: &BeaconState<_>, index, _| safe_modulo_index(state.randao_mixes(), index) ); +field!( + HistoricalSummaries, + VariableLengthField, + HistoricalSummary, + T::HistoricalRootsLimit, + DBColumn::BeaconHistoricalSummaries, + |spec: &ChainSpec| OncePerNSlots { + n: T::SlotsPerHistoricalRoot::to_u64(), + activation_slot: spec + .capella_fork_epoch + .map(|fork_epoch| fork_epoch.start_slot(T::slots_per_epoch())), + deactivation_slot: None, + }, + |state: &BeaconState<_>, index, _| safe_modulo_index( + state + .historical_summaries() + .map_err(|_| ChunkError::InvalidFork)?, + index + ) +); + pub fn store_updated_vector, E: EthSpec, S: KeyValueStore>( field: F, store: &S, @@ -679,6 +742,7 @@ pub enum ChunkError { end_vindex: usize, length: usize, }, + InvalidFork, } #[cfg(test)] diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 4600e5e82..93af0ca18 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1,5 +1,5 @@ use crate::chunked_vector::{ - store_updated_vector, BlockRoots, HistoricalRoots, RandaoMixes, StateRoots, + store_updated_vector, BlockRoots, HistoricalRoots, HistoricalSummaries, RandaoMixes, StateRoots, }; use crate::config::{ OnDiskStoreConfig, StoreConfig, DEFAULT_SLOTS_PER_RESTORE_POINT, @@ -948,6 +948,7 @@ impl, Cold: ItemStore> HotColdDB store_updated_vector(StateRoots, db, state, &self.spec, ops)?; store_updated_vector(HistoricalRoots, db, state, &self.spec, ops)?; store_updated_vector(RandaoMixes, db, state, &self.spec, ops)?; + store_updated_vector(HistoricalSummaries, db, state, &self.spec, ops)?; // 3. Store restore point. let restore_point_index = state.slot().as_u64() / self.config.slots_per_restore_point; @@ -1002,6 +1003,7 @@ impl, Cold: ItemStore> HotColdDB partial_state.load_state_roots(&self.cold_db, &self.spec)?; partial_state.load_historical_roots(&self.cold_db, &self.spec)?; partial_state.load_randao_mixes(&self.cold_db, &self.spec)?; + partial_state.load_historical_summaries(&self.cold_db, &self.spec)?; partial_state.try_into() } diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index d9041dd63..9db5f45ec 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -214,6 +214,8 @@ pub enum DBColumn { /// For Optimistically Imported Merge Transition Blocks #[strum(serialize = "otb")] OptimisticTransitionBlock, + #[strum(serialize = "bhs")] + BeaconHistoricalSummaries, } /// A block from the database, which might have an execution payload or not. diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index ca35bc0b2..55697bd31 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -1,12 +1,13 @@ use crate::chunked_vector::{ - load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, RandaoMixes, - StateRoots, + load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, + HistoricalSummaries, RandaoMixes, StateRoots, }; use crate::{get_key_for_col, DBColumn, Error, KeyValueStore, KeyValueStoreOp}; use ssz::{Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::convert::TryInto; use std::sync::Arc; +use types::historical_summary::HistoricalSummary; use types::superstruct; use types::*; @@ -104,16 +105,20 @@ where )] pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844, - // Withdrawals + // Capella #[superstruct(only(Capella, Eip4844))] pub next_withdrawal_index: u64, #[superstruct(only(Capella, Eip4844))] pub next_withdrawal_validator_index: u64, + + #[ssz(skip_serializing, skip_deserializing)] + #[superstruct(only(Capella, Eip4844))] + pub historical_summaries: Option>, } /// Implement the conversion function from BeaconState -> PartialBeaconState. macro_rules! impl_from_state_forgetful { - ($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => { + ($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*], [$($extra_fields_opt:ident),*]) => { PartialBeaconState::$variant_name($struct_name { // Versioning genesis_time: $s.genesis_time, @@ -154,6 +159,11 @@ macro_rules! impl_from_state_forgetful { // Variant-specific fields $( $extra_fields: $s.$extra_fields.clone() + ),*, + + // Variant-specific optional + $( + $extra_fields_opt: None ),* }) } @@ -168,7 +178,8 @@ impl PartialBeaconState { outer, Base, PartialBeaconStateBase, - [previous_epoch_attestations, current_epoch_attestations] + [previous_epoch_attestations, current_epoch_attestations], + [] ), BeaconState::Altair(s) => impl_from_state_forgetful!( s, @@ -181,7 +192,8 @@ impl PartialBeaconState { current_sync_committee, next_sync_committee, inactivity_scores - ] + ], + [] ), BeaconState::Merge(s) => impl_from_state_forgetful!( s, @@ -195,7 +207,8 @@ impl PartialBeaconState { next_sync_committee, inactivity_scores, latest_execution_payload_header - ] + ], + [] ), BeaconState::Capella(s) => impl_from_state_forgetful!( s, @@ -211,7 +224,8 @@ impl PartialBeaconState { latest_execution_payload_header, next_withdrawal_index, next_withdrawal_validator_index - ] + ], + [historical_summaries] ), BeaconState::Eip4844(s) => impl_from_state_forgetful!( s, @@ -227,7 +241,8 @@ impl PartialBeaconState { latest_execution_payload_header, next_withdrawal_index, next_withdrawal_validator_index - ] + ], + [historical_summaries] ), } } @@ -303,6 +318,23 @@ impl PartialBeaconState { Ok(()) } + pub fn load_historical_summaries>( + &mut self, + store: &S, + spec: &ChainSpec, + ) -> Result<(), Error> { + let slot = self.slot(); + if let Ok(historical_summaries) = self.historical_summaries_mut() { + if historical_summaries.is_none() { + *historical_summaries = + Some(load_variable_list_from_db::( + store, slot, spec, + )?); + } + } + Ok(()) + } + pub fn load_randao_mixes>( &mut self, store: &S, @@ -326,7 +358,7 @@ impl PartialBeaconState { /// Implement the conversion from PartialBeaconState -> BeaconState. macro_rules! impl_try_into_beacon_state { - ($inner:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => { + ($inner:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*], [$($extra_opt_fields:ident),*]) => { BeaconState::$variant_name($struct_name { // Versioning genesis_time: $inner.genesis_time, @@ -371,6 +403,11 @@ macro_rules! impl_try_into_beacon_state { // Variant-specific fields $( $extra_fields: $inner.$extra_fields + ),*, + + // Variant-specific optional fields + $( + $extra_opt_fields: unpack_field($inner.$extra_opt_fields)? ),* }) } @@ -389,7 +426,8 @@ impl TryInto> for PartialBeaconState { inner, Base, BeaconStateBase, - [previous_epoch_attestations, current_epoch_attestations] + [previous_epoch_attestations, current_epoch_attestations], + [] ), PartialBeaconState::Altair(inner) => impl_try_into_beacon_state!( inner, @@ -401,7 +439,8 @@ impl TryInto> for PartialBeaconState { current_sync_committee, next_sync_committee, inactivity_scores - ] + ], + [] ), PartialBeaconState::Merge(inner) => impl_try_into_beacon_state!( inner, @@ -414,7 +453,8 @@ impl TryInto> for PartialBeaconState { next_sync_committee, inactivity_scores, latest_execution_payload_header - ] + ], + [] ), PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!( inner, @@ -429,7 +469,8 @@ impl TryInto> for PartialBeaconState { latest_execution_payload_header, next_withdrawal_index, next_withdrawal_validator_index - ] + ], + [historical_summaries] ), PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!( inner, @@ -444,7 +485,8 @@ impl TryInto> for PartialBeaconState { latest_execution_payload_header, next_withdrawal_index, next_withdrawal_validator_index - ] + ], + [historical_summaries] ), }; Ok(state) diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index f227b8286..996e39c27 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -3,14 +3,16 @@ pub use epoch_processing_summary::EpochProcessingSummary; use errors::EpochProcessingError as Error; pub use justification_and_finalization_state::JustificationAndFinalizationState; -pub use registry_updates::process_registry_updates; use safe_arith::SafeArith; -pub use slashings::process_slashings; use types::{BeaconState, ChainSpec, EthSpec}; + +pub use registry_updates::process_registry_updates; +pub use slashings::process_slashings; pub use weigh_justification_and_finalization::weigh_justification_and_finalization; pub mod altair; pub mod base; +pub mod capella; pub mod effective_balance_updates; pub mod epoch_processing_summary; pub mod errors; @@ -37,10 +39,8 @@ pub fn process_epoch( match state { BeaconState::Base(_) => base::process_epoch(state, spec), - BeaconState::Altair(_) - | BeaconState::Merge(_) - | BeaconState::Capella(_) - | BeaconState::Eip4844(_) => altair::process_epoch(state, spec), + BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_epoch(state, spec), + BeaconState::Capella(_) | BeaconState::Eip4844(_) => capella::process_epoch(state, spec), } } diff --git a/consensus/state_processing/src/per_epoch_processing/capella.rs b/consensus/state_processing/src/per_epoch_processing/capella.rs new file mode 100644 index 000000000..aaf301f29 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/capella.rs @@ -0,0 +1,78 @@ +use super::altair::inactivity_updates::process_inactivity_updates; +use super::altair::justification_and_finalization::process_justification_and_finalization; +use super::altair::participation_cache::ParticipationCache; +use super::altair::participation_flag_updates::process_participation_flag_updates; +use super::altair::rewards_and_penalties::process_rewards_and_penalties; +use super::altair::sync_committee_updates::process_sync_committee_updates; +use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error}; +use crate::per_epoch_processing::{ + effective_balance_updates::process_effective_balance_updates, + resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, +}; +use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; + +pub use historical_summaries_update::process_historical_summaries_update; + +mod historical_summaries_update; + +pub fn process_epoch( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result, Error> { + // Ensure the committee caches are built. + state.build_committee_cache(RelativeEpoch::Previous, spec)?; + state.build_committee_cache(RelativeEpoch::Current, spec)?; + state.build_committee_cache(RelativeEpoch::Next, spec)?; + + // Pre-compute participating indices and total balances. + let participation_cache = ParticipationCache::new(state, spec)?; + let sync_committee = state.current_sync_committee()?.clone(); + + // Justification and finalization. + let justification_and_finalization_state = + process_justification_and_finalization(state, &participation_cache)?; + justification_and_finalization_state.apply_changes_to_state(state); + + process_inactivity_updates(state, &participation_cache, spec)?; + + // Rewards and Penalties. + process_rewards_and_penalties(state, &participation_cache, spec)?; + + // Registry Updates. + process_registry_updates(state, spec)?; + + // Slashings. + process_slashings( + state, + participation_cache.current_epoch_total_active_balance(), + spec, + )?; + + // Reset eth1 data votes. + process_eth1_data_reset(state)?; + + // Update effective balances with hysteresis (lag). + process_effective_balance_updates(state, spec)?; + + // Reset slashings + process_slashings_reset(state)?; + + // Set randao mix + process_randao_mixes_reset(state)?; + + // Set historical summaries accumulator + process_historical_summaries_update(state)?; + + // Rotate current/previous epoch participation + process_participation_flag_updates(state)?; + + process_sync_committee_updates(state, spec)?; + + // Rotate the epoch caches to suit the epoch transition. + state.advance_caches(spec)?; + + Ok(EpochProcessingSummary::Altair { + participation_cache, + sync_committee, + }) +} diff --git a/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs b/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs new file mode 100644 index 000000000..9a87ceb60 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs @@ -0,0 +1,23 @@ +use crate::EpochProcessingError; +use safe_arith::SafeArith; +use types::historical_summary::HistoricalSummary; +use types::{BeaconState, EthSpec}; + +pub fn process_historical_summaries_update( + state: &mut BeaconState, +) -> Result<(), EpochProcessingError> { + // Set historical block root accumulator. + let next_epoch = state.next_epoch()?; + if next_epoch + .as_u64() + .safe_rem((T::slots_per_historical_root() as u64).safe_div(T::slots_per_epoch())?)? + == 0 + { + let summary = HistoricalSummary::new(state); + return state + .historical_summaries_mut()? + .push(summary) + .map_err(Into::into); + } + Ok(()) +} diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index dc759b384..3b933fac3 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -1,3 +1,4 @@ +use ssz_types::VariableList; use std::mem; use types::{BeaconState, BeaconStateCapella, BeaconStateError as Error, ChainSpec, EthSpec, Fork}; @@ -55,9 +56,10 @@ pub fn upgrade_to_capella( next_sync_committee: pre.next_sync_committee.clone(), // Execution latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_capella(), - // Withdrawals + // Capella next_withdrawal_index: 0, next_withdrawal_validator_index: 0, + historical_summaries: VariableList::default(), // Caches total_active_balance: pre.total_active_balance, committee_caches: mem::take(&mut pre.committee_caches), diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs index e829c01e7..4f6ff9d19 100644 --- a/consensus/state_processing/src/upgrade/eip4844.rs +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -57,9 +57,10 @@ pub fn upgrade_to_eip4844( next_sync_committee: pre.next_sync_committee.clone(), // Execution latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_eip4844(), - // Withdrawals + // Capella next_withdrawal_index: pre.next_withdrawal_index, next_withdrawal_validator_index: pre.next_withdrawal_validator_index, + historical_summaries: pre.historical_summaries.clone(), // Caches total_active_balance: pre.total_active_balance, committee_caches: mem::take(&mut pre.committee_caches), diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 625ff3d17..f51a7bf9f 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -14,6 +14,7 @@ use ssz::{ssz_encode, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::Unsigned, BitVector, FixedVector}; use std::convert::TryInto; +use std::hash::Hash; use std::{fmt, mem, sync::Arc}; use superstruct::superstruct; use swap_or_not_shuffle::compute_shuffled_index; @@ -25,6 +26,7 @@ pub use self::committee_cache::{ compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, CommitteeCache, }; +use crate::historical_summary::HistoricalSummary; pub use clone_config::CloneConfig; pub use eth_spec::*; pub use iter::BlockRootsIter; @@ -223,6 +225,7 @@ where pub block_roots: FixedVector, #[compare_fields(as_slice)] pub state_roots: FixedVector, + // Frozen in Capella, replaced by historical_summaries pub historical_roots: VariableList, // Ethereum 1.0 chain data @@ -296,11 +299,14 @@ where )] pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844, - // Withdrawals + // Capella #[superstruct(only(Capella, Eip4844), partial_getter(copy))] pub next_withdrawal_index: u64, #[superstruct(only(Capella, Eip4844), partial_getter(copy))] pub next_withdrawal_validator_index: u64, + // Deep history valid from Capella onwards. + #[superstruct(only(Capella, Eip4844))] + pub historical_summaries: VariableList, // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index 4cfc684f4..5515fb753 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -3,6 +3,7 @@ #![allow(clippy::indexing_slicing)] use super::Error; +use crate::historical_summary::HistoricalSummaryCache; use crate::{BeaconState, EthSpec, Hash256, ParticipationList, Slot, Unsigned, Validator}; use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; use rayon::prelude::*; @@ -142,6 +143,7 @@ pub struct BeaconTreeHashCacheInner { block_roots: TreeHashCache, state_roots: TreeHashCache, historical_roots: TreeHashCache, + historical_summaries: OptionalTreeHashCache, balances: TreeHashCache, randao_mixes: TreeHashCache, slashings: TreeHashCache, @@ -164,6 +166,14 @@ impl BeaconTreeHashCacheInner { let historical_roots = state .historical_roots() .new_tree_hash_cache(&mut fixed_arena); + let historical_summaries = OptionalTreeHashCache::new( + state + .historical_summaries() + .ok() + .map(HistoricalSummaryCache::new) + .as_ref(), + ); + let randao_mixes = state.randao_mixes().new_tree_hash_cache(&mut fixed_arena); let validators = ValidatorsListTreeHashCache::new::(state.validators()); @@ -200,6 +210,7 @@ impl BeaconTreeHashCacheInner { block_roots, state_roots, historical_roots, + historical_summaries, balances, randao_mixes, slashings, @@ -249,6 +260,7 @@ impl BeaconTreeHashCacheInner { .slashings() .recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)?, ]; + // Participation if let BeaconState::Base(state) = state { leaves.push(state.previous_epoch_attestations.tree_hash_root()); @@ -291,6 +303,24 @@ impl BeaconTreeHashCacheInner { if let Ok(payload_header) = state.latest_execution_payload_header() { leaves.push(payload_header.tree_hash_root()); } + + // Withdrawal indices (Capella and later). + if let Ok(next_withdrawal_index) = state.next_withdrawal_index() { + leaves.push(next_withdrawal_index.tree_hash_root()); + } + if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() { + leaves.push(next_withdrawal_validator_index.tree_hash_root()); + } + + // Historical roots/summaries (Capella and later). + if let Ok(historical_summaries) = state.historical_summaries() { + leaves.push( + self.historical_summaries.recalculate_tree_hash_root( + &HistoricalSummaryCache::new(historical_summaries), + )?, + ); + } + Ok(leaves) } @@ -335,14 +365,6 @@ impl BeaconTreeHashCacheInner { hasher.write(leaf.as_bytes())?; } - // Withdrawal indices (Capella and later). - if let Ok(next_withdrawal_index) = state.next_withdrawal_index() { - hasher.write(next_withdrawal_index.tree_hash_root().as_bytes())?; - } - if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() { - hasher.write(next_withdrawal_validator_index.tree_hash_root().as_bytes())?; - } - let root = hasher.finish()?; self.previous_state = Some((root, state.slot())); diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/historical_summary.rs new file mode 100644 index 000000000..1f79ee49f --- /dev/null +++ b/consensus/types/src/historical_summary.rs @@ -0,0 +1,88 @@ +use crate::test_utils::TestRandom; +use crate::Unsigned; +use crate::{BeaconState, EthSpec, Hash256}; +use cached_tree_hash::Error; +use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; +use compare_fields_derive::CompareFields; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use test_random_derive::TestRandom; +use tree_hash::{mix_in_length, TreeHash, BYTES_PER_CHUNK}; +use tree_hash_derive::TreeHash; + +/// `HistoricalSummary` matches the components of the phase0 `HistoricalBatch` +/// making the two hash_tree_root-compatible. This struct is introduced into the beacon state +/// in the Capella hard fork. +/// +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#historicalsummary +#[derive( + Debug, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + CompareFields, + Clone, + Copy, + Default, +)] +pub struct HistoricalSummary { + block_summary_root: Hash256, + state_summary_root: Hash256, +} + +impl HistoricalSummary { + pub fn new(state: &BeaconState) -> Self { + Self { + block_summary_root: state.block_roots().tree_hash_root(), + state_summary_root: state.state_roots().tree_hash_root(), + } + } +} + +/// Wrapper type allowing the implementation of `CachedTreeHash`. +#[derive(Debug)] +pub struct HistoricalSummaryCache<'a, N: Unsigned> { + pub inner: &'a VariableList, +} + +impl<'a, N: Unsigned> HistoricalSummaryCache<'a, N> { + pub fn new(inner: &'a VariableList) -> Self { + Self { inner } + } + + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + self.inner.len() + } +} + +impl<'a, N: Unsigned> CachedTreeHash for HistoricalSummaryCache<'a, N> { + fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache { + TreeHashCache::new(arena, int_log(N::to_usize()), self.len()) + } + + fn recalculate_tree_hash_root( + &self, + arena: &mut CacheArena, + cache: &mut TreeHashCache, + ) -> Result { + Ok(mix_in_length( + &cache.recalculate_merkle_root(arena, leaf_iter(self.inner))?, + self.len(), + )) + } +} + +pub fn leaf_iter( + values: &[HistoricalSummary], +) -> impl Iterator + ExactSizeIterator + '_ { + values + .iter() + .map(|value| value.tree_hash_root()) + .map(Hash256::to_fixed_bytes) +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 6cbb9568d..f0806e35a 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -49,6 +49,7 @@ pub mod fork_name; pub mod free_attestation; pub mod graffiti; pub mod historical_batch; +pub mod historical_summary; pub mod indexed_attestation; pub mod light_client_bootstrap; pub mod light_client_finality_update; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 238c7e9f1..3af485be1 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -741,7 +741,6 @@ fn main() { .value_name("PATH") .takes_value(true) .conflicts_with("beacon-url") - .requires("pre-state-path") .help("Path to load a SignedBeaconBlock from file as SSZ."), ) .arg( diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 69b18ee74..354631e5c 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.3.0-alpha.2 +TESTS_TAG := v1.3.0-rc.0 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 04f45ac5d..b09824136 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -41,8 +41,6 @@ excluded_paths = [ "tests/.*/.*/ssz_static/LightClientFinalityUpdate", # Eip4844 tests are disabled for now. "tests/.*/eip4844", - # Capella tests are disabled for now. - "tests/.*/capella", # One of the EF researchers likes to pack the tarballs on a Mac ".*\.DS_Store.*", # More Mac weirdness. diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index b0e16e12c..59a8ebd41 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -5,6 +5,7 @@ use crate::decode::{ssz_decode_state, yaml_decode_file}; use crate::type_name; use crate::type_name::TypeName; use serde_derive::Deserialize; +use state_processing::per_epoch_processing::capella::process_historical_summaries_update; use state_processing::per_epoch_processing::{ altair, base, effective_balance_updates::process_effective_balance_updates, @@ -57,6 +58,8 @@ pub struct RandaoMixesReset; #[derive(Debug)] pub struct HistoricalRootsUpdate; #[derive(Debug)] +pub struct HistoricalSummariesUpdate; +#[derive(Debug)] pub struct ParticipationRecordUpdates; #[derive(Debug)] pub struct SyncCommitteeUpdates; @@ -77,6 +80,7 @@ type_name!(EffectiveBalanceUpdates, "effective_balance_updates"); type_name!(SlashingsReset, "slashings_reset"); type_name!(RandaoMixesReset, "randao_mixes_reset"); type_name!(HistoricalRootsUpdate, "historical_roots_update"); +type_name!(HistoricalSummariesUpdate, "historical_summaries_update"); type_name!(ParticipationRecordUpdates, "participation_record_updates"); type_name!(SyncCommitteeUpdates, "sync_committee_updates"); type_name!(InactivityUpdates, "inactivity_updates"); @@ -194,7 +198,23 @@ impl EpochTransition for RandaoMixesReset { impl EpochTransition for HistoricalRootsUpdate { fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { - process_historical_roots_update(state) + match state { + BeaconState::Base(_) | BeaconState::Altair(_) | BeaconState::Merge(_) => { + process_historical_roots_update(state) + } + _ => Ok(()), + } + } +} + +impl EpochTransition for HistoricalSummariesUpdate { + fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { + match state { + BeaconState::Capella(_) | BeaconState::Eip4844(_) => { + process_historical_summaries_update(state) + } + _ => Ok(()), + } } } @@ -287,10 +307,16 @@ impl> Case for EpochProcessing { T::name() != "sync_committee_updates" && T::name() != "inactivity_updates" && T::name() != "participation_flag_updates" + && T::name() != "historical_summaries_update" } // No phase0 tests for Altair and later. - ForkName::Altair | ForkName::Merge | ForkName::Capella => { + ForkName::Altair | ForkName::Merge => { T::name() != "participation_record_updates" + && T::name() != "historical_summaries_update" + } + ForkName::Capella => { + T::name() != "participation_record_updates" + && T::name() != "historical_roots_update" } ForkName::Eip4844 => false, // TODO: revisit when tests are out } diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 66f81616b..07db7cd2a 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -374,6 +374,11 @@ impl Handler for SanitySlotsHandler { fn handler_name(&self) -> String { "slots".into() } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + // Some sanity tests compute sync committees, which requires real crypto. + fork_name == ForkName::Base || cfg!(not(feature = "fake_crypto")) + } } #[derive(Derivative)] diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index f52b7bca1..5ab2b4b7b 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,10 +1,10 @@ pub use case_result::CaseResult; pub use cases::WithdrawalsPayload; pub use cases::{ - Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates, - JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates, - RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, - SyncCommitteeUpdates, + Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, HistoricalSummariesUpdate, + InactivityUpdates, JustificationAndFinalization, ParticipationFlagUpdates, + ParticipationRecordUpdates, RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, + SlashingsReset, SyncCommitteeUpdates, }; pub use decode::log_file_access; pub use error::Error; diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index bee2d9b03..0239293e0 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -1,4 +1,5 @@ //! Mapping from types to canonical string identifiers used in testing. +use types::historical_summary::HistoricalSummary; use types::*; pub trait TypeName { @@ -87,3 +88,4 @@ type_name!(VoluntaryExit); type_name!(Withdrawal); type_name!(BlsToExecutionChange, "BLSToExecutionChange"); type_name!(SignedBlsToExecutionChange, "SignedBLSToExecutionChange"); +type_name!(HistoricalSummary); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index f84be64da..8a7209b89 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -215,6 +215,7 @@ macro_rules! ssz_static_test_no_run { #[cfg(feature = "fake_crypto")] mod ssz_static { use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler}; + use types::historical_summary::HistoricalSummary; use types::*; ssz_static_test!(aggregate_and_proof, AggregateAndProof<_>); @@ -357,6 +358,12 @@ mod ssz_static { SszStaticHandler::::capella_only().run(); SszStaticHandler::::capella_only().run(); } + + #[test] + fn historical_summary() { + SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_only().run(); + } } #[test] @@ -423,6 +430,12 @@ fn epoch_processing_historical_roots_update() { EpochProcessingHandler::::default().run(); } +#[test] +fn epoch_processing_historical_summaries_update() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + #[test] fn epoch_processing_participation_record_updates() { EpochProcessingHandler::::default().run();