diff --git a/Makefile b/Makefile index 33077a6c9..56e05fffc 100644 --- a/Makefile +++ b/Makefile @@ -20,6 +20,9 @@ CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx # Cargo profile for Cross builds. Default is for local builds, CI uses an override. CROSS_PROFILE ?= release +# List of features to use when running EF tests. +EF_TEST_FEATURES ?= beacon_chain/withdrawals,beacon_chain/withdrawals-processing + # Cargo profile for regular builds. PROFILE ?= release @@ -108,9 +111,9 @@ check-consensus: # Runs only the ef-test vectors. run-ef-tests: rm -rf $(EF_TESTS)/.accessed_file_log.txt - cargo test --release -p ef_tests --features "ef_tests" - cargo test --release -p ef_tests --features "ef_tests,fake_crypto" - cargo test --release -p ef_tests --features "ef_tests,milagro" + cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES)" + cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto" + cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro" ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests # Run the tests in the `beacon_chain` crate for all known forks. diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 49a96cbb4..40f5e990e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -77,6 +77,8 @@ use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; +#[cfg(feature = "withdrawals")] +use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::{ common::{get_attesting_indices_from_state, get_indexed_attestation}, per_block_processing, @@ -261,6 +263,8 @@ struct PartialBeaconBlock> { voluntary_exits: Vec, sync_aggregate: Option>, prepare_payload_handle: Option>, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes: Vec, } pub type BeaconForkChoice = ForkChoice< @@ -3407,12 +3411,14 @@ impl BeaconChain { // Wait for the execution layer to return an execution payload (if one is required). let prepare_payload_handle = partial_beacon_block.prepare_payload_handle.take(); let block_contents = if let Some(prepare_payload_handle) = prepare_payload_handle { - prepare_payload_handle - .await - .map_err(BlockProductionError::TokioJoin)? - .ok_or(BlockProductionError::ShuttingDown)?? + Some( + prepare_payload_handle + .await + .map_err(BlockProductionError::TokioJoin)? + .ok_or(BlockProductionError::ShuttingDown)??, + ) } else { - return Err(BlockProductionError::MissingExecutionPayload); + None }; // Part 3/3 (blocking) @@ -3507,6 +3513,9 @@ impl BeaconChain { let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; let deposits = eth1_chain.deposits_for_block_inclusion(&state, ð1_data, &self.spec)?; + let bls_to_execution_changes = self + .op_pool + .get_bls_to_execution_changes(&state, &self.spec); // Iterate through the naive aggregation pool and ensure all the attestations from there // are included in the operation pool. @@ -3664,13 +3673,15 @@ impl BeaconChain { voluntary_exits, sync_aggregate, prepare_payload_handle, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, }) } fn complete_partial_beacon_block>( &self, partial_beacon_block: PartialBeaconBlock, - block_contents: BlockProposalContents, + block_contents: Option>, verification: ProduceBlockVerification, ) -> Result, BlockProductionError> { let PartialBeaconBlock { @@ -3691,6 +3702,8 @@ impl BeaconChain { // this function. We can assume that the handle has already been consumed in order to // produce said `execution_payload`. prepare_payload_handle: _, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, } = partial_beacon_block; let (payload, kzg_commitments_opt, blobs) = block_contents.deconstruct(); @@ -3749,6 +3762,7 @@ impl BeaconChain { sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: payload + .ok_or(BlockProductionError::MissingExecutionPayload)? .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, }, @@ -3770,8 +3784,11 @@ impl BeaconChain { sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: payload + .ok_or(BlockProductionError::MissingExecutionPayload)? .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes: bls_to_execution_changes.into(), }, }), BeaconState::Eip4844(_) => { @@ -3794,8 +3811,11 @@ impl BeaconChain { sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: payload + .ok_or(BlockProductionError::MissingExecutionPayload)? .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes: bls_to_execution_changes.into(), blob_kzg_commitments: VariableList::from(kzg_commitments), }, }) @@ -4134,35 +4154,52 @@ impl BeaconChain { return Ok(()); } - let payload_attributes = match self.spec.fork_name_at_epoch(prepare_epoch) { + #[cfg(feature = "withdrawals")] + let head_state = &self.canonical_head.cached_head().snapshot.beacon_state; + #[cfg(feature = "withdrawals")] + let withdrawals = match self.spec.fork_name_at_epoch(prepare_epoch) { ForkName::Base | ForkName::Altair | ForkName::Merge => { - PayloadAttributes::V1(PayloadAttributesV1 { - timestamp: self - .slot_clock - .start_of(prepare_slot) - .ok_or(Error::InvalidSlot(prepare_slot))? - .as_secs(), - prev_randao: head_random, - suggested_fee_recipient: execution_layer - .get_suggested_fee_recipient(proposer as u64) - .await, - }) - } - ForkName::Capella | ForkName::Eip4844 => PayloadAttributes::V2(PayloadAttributesV2 { - timestamp: self - .slot_clock - .start_of(prepare_slot) - .ok_or(Error::InvalidSlot(prepare_slot))? - .as_secs(), - prev_randao: head_random, - suggested_fee_recipient: execution_layer - .get_suggested_fee_recipient(proposer as u64) - .await, - //FIXME(sean) - #[cfg(feature = "withdrawals")] - withdrawals: vec![], - }), - }; + None + }, + ForkName::Capella | ForkName::Eip4844 => match &head_state { + &BeaconState::Capella(_) | &BeaconState::Eip4844(_) => { + // The head_state is already BeaconState::Capella or later + // FIXME(mark) + // Might implement caching here in the future.. + Some(get_expected_withdrawals(head_state, &self.spec)) + } + &BeaconState::Base(_) | &BeaconState::Altair(_) | &BeaconState::Merge(_) => { + // We are the Capella transition block proposer, need advanced state + let mut prepare_state = self + .state_at_slot(prepare_slot, StateSkipConfig::WithoutStateRoots) + .or_else(|e| { + error!(self.log, "Capella Transition Proposer"; "Error Advancing State: " => ?e); + Err(e) + })?; + // FIXME(mark) + // Might implement caching here in the future.. + Some(get_expected_withdrawals(&prepare_state, &self.spec)) + } + }, + }.transpose().or_else(|e| { + error!(self.log, "Error preparing beacon proposer"; "while calculating expected withdrawals" => ?e); + Err(e) + }).map(|withdrawals_opt| withdrawals_opt.map(|w| w.into())) + .map_err(Error::PrepareProposerFailed)?; + + let payload_attributes = PayloadAttributes::V2(PayloadAttributesV2 { + timestamp: self + .slot_clock + .start_of(prepare_slot) + .ok_or(Error::InvalidSlot(prepare_slot))? + .as_secs(), + prev_randao: head_random, + suggested_fee_recipient: execution_layer + .get_suggested_fee_recipient(proposer as u64) + .await, + #[cfg(feature = "withdrawals")] + withdrawals, + }); debug!( self.log, diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index da944c102..e4d00d9ca 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -153,7 +153,7 @@ pub enum BeaconChainError { }, AddPayloadLogicError, ExecutionForkChoiceUpdateFailed(execution_layer::Error), - PrepareProposerBlockingFailed(execution_layer::Error), + PrepareProposerFailed(BlockProcessingError), ExecutionForkChoiceUpdateInvalid { status: PayloadStatus, }, diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 134e51e79..bf920a6da 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -17,6 +17,8 @@ use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; use slot_clock::SlotClock; +#[cfg(feature = "withdrawals")] +use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_block_processing::{ compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, partially_verify_execution_payload, @@ -362,6 +364,15 @@ pub fn get_execution_payload< let random = *state.get_randao_mix(current_epoch)?; let latest_execution_payload_header_block_hash = state.latest_execution_payload_header()?.block_hash(); + #[cfg(feature = "withdrawals")] + let withdrawals = match state { + &BeaconState::Capella(_) | &BeaconState::Eip4844(_) => { + Some(get_expected_withdrawals(state, spec)?.into()) + } + &BeaconState::Merge(_) => None, + // These shouldn't happen but they're here to make the pattern irrefutable + &BeaconState::Base(_) | &BeaconState::Altair(_) => None, + }; // Spawn a task to obtain the execution payload from the EL via a series of async calls. The // `join_handle` can be used to await the result of the function. @@ -378,6 +389,8 @@ pub fn get_execution_payload< proposer_index, latest_execution_payload_header_block_hash, builder_params, + #[cfg(feature = "withdrawals")] + withdrawals, ) .await }, @@ -411,6 +424,7 @@ pub async fn prepare_execution_payload( proposer_index: u64, latest_execution_payload_header_block_hash: ExecutionBlockHash, builder_params: BuilderParams, + #[cfg(feature = "withdrawals")] withdrawals: Option>, ) -> Result, BlockProductionError> where T: BeaconChainTypes, @@ -480,6 +494,9 @@ where proposer_index, forkchoice_update_params, builder_params, + fork, + #[cfg(feature = "withdrawals")] + withdrawals, &chain.spec, ) .await diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 2336c3ba9..611b20988 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -12,9 +12,9 @@ use beacon_chain::{ INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ - json_structures::{JsonForkChoiceStateV1, JsonPayloadAttributesV1}, + json_structures::{JsonForkchoiceStateV1, JsonPayloadAttributesV1}, test_utils::ExecutionBlockGenerator, - ExecutionLayer, ForkChoiceState, PayloadAttributes, + ExecutionLayer, ForkchoiceState, PayloadAttributes, }; use fork_choice::{ CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus, @@ -117,7 +117,7 @@ impl InvalidPayloadRig { &self.harness.chain.canonical_head } - fn previous_forkchoice_update_params(&self) -> (ForkChoiceState, PayloadAttributes) { + fn previous_forkchoice_update_params(&self) -> (ForkchoiceState, PayloadAttributes) { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); let json = mock_execution_layer .server @@ -126,7 +126,7 @@ impl InvalidPayloadRig { let params = json.get("params").expect("no params"); let fork_choice_state_json = params.get(0).expect("no payload param"); - let fork_choice_state: JsonForkChoiceStateV1 = + let fork_choice_state: JsonForkchoiceStateV1 = serde_json::from_value(fork_choice_state_json.clone()).unwrap(); let payload_param_json = params.get(1).expect("no payload param"); diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 68a4f6a41..b3bdc54d0 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -5,8 +5,8 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] -withdrawals = ["state_processing/withdrawals", "types/withdrawals"] -withdrawals-processing = ["state_processing/withdrawals-processing"] +withdrawals = ["state_processing/withdrawals", "types/withdrawals", "eth2/withdrawals"] +withdrawals-processing = ["state_processing/withdrawals-processing", "eth2/withdrawals-processing"] [dependencies] types = { path = "../../consensus/types"} diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index ed940d4a8..128f23386 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,4 +1,4 @@ -use crate::engines::ForkChoiceState; +use crate::engines::ForkchoiceState; pub use ethers_core::types::Transaction; use ethers_core::utils::rlp::{Decodable, Rlp}; use http::deposit_methods::RpcError; @@ -7,10 +7,11 @@ use reqwest::StatusCode; use serde::{Deserialize, Serialize}; use strum::IntoStaticStr; use superstruct::superstruct; +#[cfg(feature = "withdrawals")] use types::Withdrawal; pub use types::{ Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, - Hash256, Uint256, VariableList, + ForkName, Hash256, Uint256, VariableList, }; pub mod auth; @@ -44,6 +45,9 @@ pub enum Error { DeserializeWithdrawals(ssz_types::Error), BuilderApi(builder_client::Error), IncorrectStateVariant, + RequiredMethodUnsupported(&'static str), + UnsupportedForkVariant(String), + BadConversion(String), } impl From for Error { @@ -255,7 +259,29 @@ pub struct PayloadAttributes { pub suggested_fee_recipient: Address, #[cfg(feature = "withdrawals")] #[superstruct(only(V2))] - pub withdrawals: Vec, + pub withdrawals: Option>, +} + +impl PayloadAttributes { + pub fn downgrade_to_v1(self) -> Result { + match self { + PayloadAttributes::V1(_) => Ok(self), + PayloadAttributes::V2(v2) => { + #[cfg(features = "withdrawals")] + if v2.withdrawals.is_some() { + return Err(Error::BadConversion( + "Downgrading from PayloadAttributesV2 with non-null withdrawaals" + .to_string(), + )); + } + Ok(PayloadAttributes::V1(PayloadAttributesV1 { + timestamp: v2.timestamp, + prev_randao: v2.prev_randao, + suggested_fee_recipient: v2.suggested_fee_recipient, + })) + } + } + } } #[derive(Clone, Debug, PartialEq)] @@ -277,3 +303,17 @@ pub struct ProposeBlindedBlockResponse { pub latest_valid_hash: Option, pub validation_error: Option, } + +// This name is work in progress, it could +// change when this method is actually proposed +// but I'm writing this as it has been described +#[derive(Clone, Copy)] +pub struct SupportedApis { + pub new_payload_v1: bool, + pub new_payload_v2: bool, + pub forkchoice_updated_v1: bool, + pub forkchoice_updated_v2: bool, + pub get_payload_v1: bool, + pub get_payload_v2: bool, + pub exchange_transition_configuration_v1: bool, +} diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index bd9f387e5..446623744 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -7,6 +7,7 @@ use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; +use tokio::sync::RwLock; use std::time::Duration; use types::EthSpec; @@ -29,15 +30,18 @@ pub const ETH_SYNCING: &str = "eth_syncing"; pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_secs(1); pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1"; +pub const ENGINE_NEW_PAYLOAD_V2: &str = "engine_newPayloadV2"; pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; +pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_GET_BLOBS_BUNDLE_V1: &str = "engine_getBlobsBundleV1"; pub const ENGINE_GET_BLOBS_BUNDLE_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; +pub const ENGINE_FORKCHOICE_UPDATED_V2: &str = "engine_forkchoiceUpdatedV2"; pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = @@ -526,6 +530,7 @@ pub struct HttpJsonRpc { pub client: Client, pub url: SensitiveUrl, pub execution_timeout_multiplier: u32, + pub cached_supported_apis: RwLock>, auth: Option, } @@ -538,6 +543,7 @@ impl HttpJsonRpc { client: Client::builder().build()?, url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), + cached_supported_apis: Default::default(), auth: None, }) } @@ -551,6 +557,7 @@ impl HttpJsonRpc { client: Client::builder().build()?, url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), + cached_supported_apis: Default::default(), auth: Some(auth), }) } @@ -671,7 +678,7 @@ impl HttpJsonRpc { &self, execution_payload: ExecutionPayload, ) -> Result { - let params = json!([JsonExecutionPayload::from(execution_payload)]); + let params = json!([JsonExecutionPayloadV1::try_from(execution_payload)?]); let response: JsonPayloadStatusV1 = self .rpc_request( @@ -684,13 +691,31 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn new_payload_v2( + &self, + execution_payload: ExecutionPayload, + ) -> Result { + let params = json!([JsonExecutionPayloadV2::try_from(execution_payload)?]); + + let response: JsonPayloadStatusV1 = self + .rpc_request( + ENGINE_NEW_PAYLOAD_V2, + params, + ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response.into()) + } + pub async fn get_payload_v1( &self, + fork_name: ForkName, payload_id: PayloadId, ) -> Result, Error> { let params = json!([JsonPayloadIdRequest::from(payload_id)]); - let response: JsonExecutionPayload = self + let payload_v1: JsonExecutionPayloadV1 = self .rpc_request( ENGINE_GET_PAYLOAD_V1, params, @@ -698,7 +723,25 @@ impl HttpJsonRpc { ) .await?; - Ok(response.into()) + JsonExecutionPayload::V1(payload_v1).try_into_execution_payload(fork_name) + } + + pub async fn get_payload_v2( + &self, + fork_name: ForkName, + payload_id: PayloadId, + ) -> Result, Error> { + let params = json!([JsonPayloadIdRequest::from(payload_id)]); + + let payload_v2: JsonExecutionPayloadV2 = self + .rpc_request( + ENGINE_GET_PAYLOAD_V2, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + JsonExecutionPayload::V2(payload_v2).try_into_execution_payload(fork_name) } pub async fn get_blobs_bundle_v1( @@ -720,11 +763,11 @@ impl HttpJsonRpc { pub async fn forkchoice_updated_v1( &self, - forkchoice_state: ForkChoiceState, + forkchoice_state: ForkchoiceState, payload_attributes: Option, ) -> Result { let params = json!([ - JsonForkChoiceStateV1::from(forkchoice_state), + JsonForkchoiceStateV1::from(forkchoice_state), payload_attributes.map(JsonPayloadAttributes::from) ]); @@ -739,6 +782,27 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn forkchoice_updated_v2( + &self, + forkchoice_state: ForkchoiceState, + payload_attributes: Option, + ) -> Result { + let params = json!([ + JsonForkchoiceStateV1::from(forkchoice_state), + payload_attributes.map(JsonPayloadAttributes::from) + ]); + + let response: JsonForkchoiceUpdatedV1Response = self + .rpc_request( + ENGINE_FORKCHOICE_UPDATED_V2, + params, + ENGINE_FORKCHOICE_UPDATED_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response.into()) + } + pub async fn exchange_transition_configuration_v1( &self, transition_configuration: TransitionConfigurationV1, @@ -756,6 +820,94 @@ impl HttpJsonRpc { Ok(response) } + + // this is a stub as this method hasn't been defined yet + pub async fn supported_apis_v1(&self) -> Result { + Ok(SupportedApis { + new_payload_v1: true, + new_payload_v2: cfg!(all(feature = "withdrawals", not(test))), + forkchoice_updated_v1: true, + forkchoice_updated_v2: cfg!(all(feature = "withdrawals", not(test))), + get_payload_v1: true, + get_payload_v2: cfg!(all(feature = "withdrawals", not(test))), + exchange_transition_configuration_v1: true, + }) + } + + pub async fn set_cached_supported_apis(&self, supported_apis: SupportedApis) { + *self.cached_supported_apis.write().await = Some(supported_apis); + } + + pub async fn get_cached_supported_apis(&self) -> Result { + let cached_opt = *self.cached_supported_apis.read().await; + if let Some(supported_apis) = cached_opt { + Ok(supported_apis) + } else { + let supported_apis = self.supported_apis_v1().await?; + self.set_cached_supported_apis(supported_apis).await; + Ok(supported_apis) + } + } + + // automatically selects the latest version of + // new_payload that the execution engine supports + pub async fn new_payload( + &self, + execution_payload: ExecutionPayload, + ) -> Result { + let supported_apis = self.get_cached_supported_apis().await?; + if supported_apis.new_payload_v2 { + // FIXME: I haven't thought at all about how to handle 4844.. + self.new_payload_v2(execution_payload).await + } else if supported_apis.new_payload_v1 { + self.new_payload_v1(execution_payload).await + } else { + Err(Error::RequiredMethodUnsupported("engine_newPayload")) + } + } + + // automatically selects the latest version of + // get_payload that the execution engine supports + pub async fn get_payload( + &self, + fork_name: ForkName, + payload_id: PayloadId, + ) -> Result, Error> { + let supported_apis = self.get_cached_supported_apis().await?; + if supported_apis.get_payload_v2 { + // FIXME: I haven't thought at all about how to handle 4844.. + self.get_payload_v2(fork_name, payload_id).await + } else if supported_apis.new_payload_v1 { + self.get_payload_v1(fork_name, payload_id).await + } else { + Err(Error::RequiredMethodUnsupported("engine_getPayload")) + } + } + + // automatically selects the latest version of + // forkchoice_updated that the execution engine supports + pub async fn forkchoice_updated( + &self, + forkchoice_state: ForkchoiceState, + payload_attributes: Option, + ) -> Result { + let supported_apis = self.get_cached_supported_apis().await?; + if supported_apis.forkchoice_updated_v2 { + // FIXME: I haven't thought at all about how to handle 4844.. + self.forkchoice_updated_v2(forkchoice_state, payload_attributes) + .await + } else if supported_apis.forkchoice_updated_v1 { + self.forkchoice_updated_v1( + forkchoice_state, + payload_attributes + .map(|pa| pa.downgrade_to_v1()) + .transpose()?, + ) + .await + } else { + Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated")) + } + } } #[cfg(test)] @@ -767,8 +919,8 @@ mod test { use std::str::FromStr; use std::sync::Arc; use types::{ - AbstractExecPayload, ExecutionPayloadMerge, ForkName, FullPayload, MainnetEthSpec, - Transactions, Unsigned, VariableList, + ExecutionPayloadMerge, ForkName, FullPayload, MainnetEthSpec, Transactions, Unsigned, + VariableList, }; struct Tester { @@ -1052,7 +1204,7 @@ mod test { |client| async move { let _ = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::repeat_byte(1), safe_block_hash: ExecutionBlockHash::repeat_byte(1), finalized_block_hash: ExecutionBlockHash::zero(), @@ -1087,7 +1239,7 @@ mod test { .assert_auth_failure(|client| async move { client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::repeat_byte(1), safe_block_hash: ExecutionBlockHash::repeat_byte(1), finalized_block_hash: ExecutionBlockHash::zero(), @@ -1108,7 +1260,9 @@ mod test { Tester::new(true) .assert_request_equals( |client| async move { - let _ = client.get_payload_v1::([42; 8]).await; + let _ = client + .get_payload_v1::(ForkName::Merge, [42; 8]) + .await; }, json!({ "id": STATIC_ID, @@ -1121,7 +1275,9 @@ mod test { Tester::new(false) .assert_auth_failure(|client| async move { - client.get_payload_v1::([42; 8]).await + client + .get_payload_v1::(ForkName::Merge, [42; 8]) + .await }) .await; } @@ -1209,7 +1365,7 @@ mod test { |client| async move { let _ = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::repeat_byte(0), safe_block_hash: ExecutionBlockHash::repeat_byte(0), finalized_block_hash: ExecutionBlockHash::repeat_byte(1), @@ -1235,7 +1391,7 @@ mod test { .assert_auth_failure(|client| async move { client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::repeat_byte(0), safe_block_hash: ExecutionBlockHash::repeat_byte(0), finalized_block_hash: ExecutionBlockHash::repeat_byte(1), @@ -1274,7 +1430,7 @@ mod test { |client| async move { let _ = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), finalized_block_hash: ExecutionBlockHash::zero(), @@ -1321,7 +1477,7 @@ mod test { |client| async move { let response = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), finalized_block_hash: ExecutionBlockHash::zero(), @@ -1350,7 +1506,7 @@ mod test { // engine_getPayloadV1 REQUEST validation |client| async move { let _ = client - .get_payload_v1::(str_to_payload_id("0xa247243752eb10b4")) + .get_payload_v1::(ForkName::Merge,str_to_payload_id("0xa247243752eb10b4")) .await; }, json!({ @@ -1385,7 +1541,7 @@ mod test { })], |client| async move { let payload = client - .get_payload_v1::(str_to_payload_id("0xa247243752eb10b4")) + .get_payload_v1::(ForkName::Merge,str_to_payload_id("0xa247243752eb10b4")) .await .unwrap(); @@ -1468,7 +1624,7 @@ mod test { })], |client| async move { let response = client - .new_payload_v1::(FullPayload::default_at_fork(ForkName::Merge).into()) + .new_payload_v1::(ExecutionPayload::Merge(ExecutionPayloadMerge::default())) .await .unwrap(); @@ -1487,7 +1643,7 @@ mod test { |client| async move { let _ = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), finalized_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), @@ -1526,7 +1682,7 @@ mod test { |client| async move { let response = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), finalized_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 6d1d70e78..99459ec2b 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -3,11 +3,12 @@ use serde::{Deserialize, Serialize}; use strum::EnumString; use superstruct::superstruct; use types::{ - Blob, EthSpec, ExecutionBlockHash, ExecutionPayloadEip4844, ExecutionPayloadHeaderEip4844, - FixedVector, KzgCommitment, Transaction, Unsigned, VariableList, + Blob, EthSpec, ExecutionBlockHash, FixedVector, KzgCommitment, Transaction, Unsigned, + VariableList, Withdrawal, +}; +use types::{ + ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, }; -use types::{ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge}; -use types::{ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderMerge}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -62,169 +63,6 @@ pub struct JsonPayloadIdResponse { pub payload_id: PayloadId, } -// (V1,V2,V3) -> (Merge,Capella,EIP4844) -#[superstruct( - variants(V1, V2, V3), - variant_attributes( - derive(Debug, PartialEq, Default, Serialize, Deserialize,), - serde(bound = "T: EthSpec", rename_all = "camelCase"), - ), - cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") -)] -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(bound = "T: EthSpec", rename_all = "camelCase", untagged)] -pub struct JsonExecutionPayloadHeader { - pub parent_hash: ExecutionBlockHash, - pub fee_recipient: Address, - pub state_root: Hash256, - pub receipts_root: Hash256, - #[serde(with = "serde_logs_bloom")] - pub logs_bloom: FixedVector, - pub prev_randao: Hash256, - #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub block_number: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub gas_used: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub timestamp: u64, - #[serde(with = "ssz_types::serde_utils::hex_var_list")] - pub extra_data: VariableList, - #[serde(with = "eth2_serde_utils::u256_hex_be")] - pub base_fee_per_gas: Uint256, - #[serde(with = "eth2_serde_utils::u64_hex_be")] - #[superstruct(only(V3))] - pub excess_blobs: u64, - pub block_hash: ExecutionBlockHash, - pub transactions_root: Hash256, - #[cfg(feature = "withdrawals")] - #[superstruct(only(V2, V3))] - pub withdrawals_root: Hash256, -} - -impl From> for ExecutionPayloadHeader { - fn from(json_header: JsonExecutionPayloadHeader) -> Self { - match json_header { - JsonExecutionPayloadHeader::V1(v1) => Self::Merge(ExecutionPayloadHeaderMerge { - parent_hash: v1.parent_hash, - fee_recipient: v1.fee_recipient, - state_root: v1.state_root, - receipts_root: v1.receipts_root, - logs_bloom: v1.logs_bloom, - prev_randao: v1.prev_randao, - block_number: v1.block_number, - gas_limit: v1.gas_limit, - gas_used: v1.gas_used, - timestamp: v1.timestamp, - extra_data: v1.extra_data, - base_fee_per_gas: v1.base_fee_per_gas, - block_hash: v1.block_hash, - transactions_root: v1.transactions_root, - }), - JsonExecutionPayloadHeader::V2(v2) => Self::Capella(ExecutionPayloadHeaderCapella { - parent_hash: v2.parent_hash, - fee_recipient: v2.fee_recipient, - state_root: v2.state_root, - receipts_root: v2.receipts_root, - logs_bloom: v2.logs_bloom, - prev_randao: v2.prev_randao, - block_number: v2.block_number, - gas_limit: v2.gas_limit, - gas_used: v2.gas_used, - timestamp: v2.timestamp, - extra_data: v2.extra_data, - base_fee_per_gas: v2.base_fee_per_gas, - block_hash: v2.block_hash, - transactions_root: v2.transactions_root, - #[cfg(feature = "withdrawals")] - withdrawals_root: v2.withdrawals_root, - }), - JsonExecutionPayloadHeader::V3(v3) => Self::Eip4844(ExecutionPayloadHeaderEip4844 { - parent_hash: v3.parent_hash, - fee_recipient: v3.fee_recipient, - state_root: v3.state_root, - receipts_root: v3.receipts_root, - logs_bloom: v3.logs_bloom, - prev_randao: v3.prev_randao, - block_number: v3.block_number, - gas_limit: v3.gas_limit, - gas_used: v3.gas_used, - timestamp: v3.timestamp, - extra_data: v3.extra_data, - base_fee_per_gas: v3.base_fee_per_gas, - excess_blobs: v3.excess_blobs, - block_hash: v3.block_hash, - transactions_root: v3.transactions_root, - #[cfg(feature = "withdrawals")] - withdrawals_root: v3.withdrawals_root, - }), - } - } -} - -impl From> for JsonExecutionPayloadHeader { - fn from(header: ExecutionPayloadHeader) -> Self { - match header { - ExecutionPayloadHeader::Merge(merge) => Self::V1(JsonExecutionPayloadHeaderV1 { - parent_hash: merge.parent_hash, - fee_recipient: merge.fee_recipient, - state_root: merge.state_root, - receipts_root: merge.receipts_root, - logs_bloom: merge.logs_bloom, - prev_randao: merge.prev_randao, - block_number: merge.block_number, - gas_limit: merge.gas_limit, - gas_used: merge.gas_used, - timestamp: merge.timestamp, - extra_data: merge.extra_data, - base_fee_per_gas: merge.base_fee_per_gas, - block_hash: merge.block_hash, - transactions_root: merge.transactions_root, - }), - ExecutionPayloadHeader::Capella(capella) => Self::V2(JsonExecutionPayloadHeaderV2 { - parent_hash: capella.parent_hash, - fee_recipient: capella.fee_recipient, - state_root: capella.state_root, - receipts_root: capella.receipts_root, - logs_bloom: capella.logs_bloom, - prev_randao: capella.prev_randao, - block_number: capella.block_number, - gas_limit: capella.gas_limit, - gas_used: capella.gas_used, - timestamp: capella.timestamp, - extra_data: capella.extra_data, - base_fee_per_gas: capella.base_fee_per_gas, - block_hash: capella.block_hash, - transactions_root: capella.transactions_root, - #[cfg(feature = "withdrawals")] - withdrawals_root: capella.withdrawals_root, - }), - ExecutionPayloadHeader::Eip4844(eip4844) => Self::V3(JsonExecutionPayloadHeaderV3 { - parent_hash: eip4844.parent_hash, - fee_recipient: eip4844.fee_recipient, - state_root: eip4844.state_root, - receipts_root: eip4844.receipts_root, - logs_bloom: eip4844.logs_bloom, - prev_randao: eip4844.prev_randao, - block_number: eip4844.block_number, - gas_limit: eip4844.gas_limit, - gas_used: eip4844.gas_used, - timestamp: eip4844.timestamp, - extra_data: eip4844.extra_data, - base_fee_per_gas: eip4844.base_fee_per_gas, - excess_blobs: eip4844.excess_blobs, - block_hash: eip4844.block_hash, - transactions_root: eip4844.transactions_root, - #[cfg(feature = "withdrawals")] - withdrawals_root: eip4844.withdrawals_root, - }), - } - } -} - -// (V1,V2, V2) -> (Merge,Capella,EIP4844) #[superstruct( variants(V1, V2, V3), variant_attributes( @@ -257,81 +95,173 @@ pub struct JsonExecutionPayload { #[serde(with = "eth2_serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, #[superstruct(only(V3))] + // FIXME: can't easily make this an option because of custom deserialization.. #[serde(with = "eth2_serde_utils::u64_hex_be")] pub excess_blobs: u64, pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: VariableList, T::MaxTransactionsPerPayload>, - #[cfg(feature = "withdrawals")] #[superstruct(only(V2, V3))] - pub withdrawals: VariableList, + pub withdrawals: Option>, } -impl From> for ExecutionPayload { - fn from(json_payload: JsonExecutionPayload) -> Self { - match json_payload { - JsonExecutionPayload::V1(v1) => Self::Merge(ExecutionPayloadMerge { - parent_hash: v1.parent_hash, - fee_recipient: v1.fee_recipient, - state_root: v1.state_root, - receipts_root: v1.receipts_root, - logs_bloom: v1.logs_bloom, - prev_randao: v1.prev_randao, - block_number: v1.block_number, - gas_limit: v1.gas_limit, - gas_used: v1.gas_used, - timestamp: v1.timestamp, - extra_data: v1.extra_data, - base_fee_per_gas: v1.base_fee_per_gas, - block_hash: v1.block_hash, - transactions: v1.transactions, - }), - JsonExecutionPayload::V2(v2) => Self::Capella(ExecutionPayloadCapella { - parent_hash: v2.parent_hash, - fee_recipient: v2.fee_recipient, - state_root: v2.state_root, - receipts_root: v2.receipts_root, - logs_bloom: v2.logs_bloom, - prev_randao: v2.prev_randao, - block_number: v2.block_number, - gas_limit: v2.gas_limit, - gas_used: v2.gas_used, - timestamp: v2.timestamp, - extra_data: v2.extra_data, - base_fee_per_gas: v2.base_fee_per_gas, - block_hash: v2.block_hash, - transactions: v2.transactions, - #[cfg(feature = "withdrawals")] - withdrawals: v2.withdrawals, - }), - JsonExecutionPayload::V3(v3) => Self::Eip4844(ExecutionPayloadEip4844 { - parent_hash: v3.parent_hash, - fee_recipient: v3.fee_recipient, - state_root: v3.state_root, - receipts_root: v3.receipts_root, - logs_bloom: v3.logs_bloom, - prev_randao: v3.prev_randao, - block_number: v3.block_number, - gas_limit: v3.gas_limit, - gas_used: v3.gas_used, - timestamp: v3.timestamp, - extra_data: v3.extra_data, - base_fee_per_gas: v3.base_fee_per_gas, - excess_blobs: v3.excess_blobs, - block_hash: v3.block_hash, - transactions: v3.transactions, - #[cfg(feature = "withdrawals")] - withdrawals: v3.withdrawals, - }), +impl JsonExecutionPayload { + pub fn try_into_execution_payload( + self, + fork_name: ForkName, + ) -> Result, Error> { + match self { + JsonExecutionPayload::V1(v1) => match fork_name { + ForkName::Merge => Ok(ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: v1.parent_hash, + fee_recipient: v1.fee_recipient, + state_root: v1.state_root, + receipts_root: v1.receipts_root, + logs_bloom: v1.logs_bloom, + prev_randao: v1.prev_randao, + block_number: v1.block_number, + gas_limit: v1.gas_limit, + gas_used: v1.gas_used, + timestamp: v1.timestamp, + extra_data: v1.extra_data, + base_fee_per_gas: v1.base_fee_per_gas, + block_hash: v1.block_hash, + transactions: v1.transactions, + })), + _ => Err(Error::UnsupportedForkVariant(format!("Unsupported conversion from JsonExecutionPayloadV1 for {}", fork_name))), + } + JsonExecutionPayload::V2(v2) => match fork_name { + ForkName::Merge => Ok(ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: v2.parent_hash, + fee_recipient: v2.fee_recipient, + state_root: v2.state_root, + receipts_root: v2.receipts_root, + logs_bloom: v2.logs_bloom, + prev_randao: v2.prev_randao, + block_number: v2.block_number, + gas_limit: v2.gas_limit, + gas_used: v2.gas_used, + timestamp: v2.timestamp, + extra_data: v2.extra_data, + base_fee_per_gas: v2.base_fee_per_gas, + block_hash: v2.block_hash, + transactions: v2.transactions, + })), + ForkName::Capella => Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: v2.parent_hash, + fee_recipient: v2.fee_recipient, + state_root: v2.state_root, + receipts_root: v2.receipts_root, + logs_bloom: v2.logs_bloom, + prev_randao: v2.prev_randao, + block_number: v2.block_number, + gas_limit: v2.gas_limit, + gas_used: v2.gas_used, + timestamp: v2.timestamp, + extra_data: v2.extra_data, + base_fee_per_gas: v2.base_fee_per_gas, + block_hash: v2.block_hash, + transactions: v2.transactions, + #[cfg(feature = "withdrawals")] + withdrawals: v2 + .withdrawals + .map(|v| { + Into::>::into(v) + .into_iter() + .map(Into::into) + .collect::>() + .into() + }) + .ok_or(Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadCapella".to_string()))? + })), + ForkName::Eip4844 => Err(Error::UnsupportedForkVariant("JsonExecutionPayloadV2 -> ExecutionPayloadEip4844 not implemented yet as it might never be".to_string())), + _ => Err(Error::UnsupportedForkVariant(format!("Unsupported conversion from JsonExecutionPayloadV2 for {}", fork_name))), + } + JsonExecutionPayload::V3(v3) => match fork_name { + ForkName::Merge => Ok(ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: v3.parent_hash, + fee_recipient: v3.fee_recipient, + state_root: v3.state_root, + receipts_root: v3.receipts_root, + logs_bloom: v3.logs_bloom, + prev_randao: v3.prev_randao, + block_number: v3.block_number, + gas_limit: v3.gas_limit, + gas_used: v3.gas_used, + timestamp: v3.timestamp, + extra_data: v3.extra_data, + base_fee_per_gas: v3.base_fee_per_gas, + block_hash: v3.block_hash, + transactions: v3.transactions, + })), + ForkName::Capella => Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: v3.parent_hash, + fee_recipient: v3.fee_recipient, + state_root: v3.state_root, + receipts_root: v3.receipts_root, + logs_bloom: v3.logs_bloom, + prev_randao: v3.prev_randao, + block_number: v3.block_number, + gas_limit: v3.gas_limit, + gas_used: v3.gas_used, + timestamp: v3.timestamp, + extra_data: v3.extra_data, + base_fee_per_gas: v3.base_fee_per_gas, + block_hash: v3.block_hash, + transactions: v3.transactions, + #[cfg(feature = "withdrawals")] + withdrawals: v3 + .withdrawals + .map(|v| { + Into::>::into(v) + .into_iter() + .map(Into::into) + .collect::>() + .into() + }) + .ok_or(Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV3 -> ExecutionPayloadCapella".to_string()))? + })), + ForkName::Eip4844 => Ok(ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { + parent_hash: v3.parent_hash, + fee_recipient: v3.fee_recipient, + state_root: v3.state_root, + receipts_root: v3.receipts_root, + logs_bloom: v3.logs_bloom, + prev_randao: v3.prev_randao, + block_number: v3.block_number, + gas_limit: v3.gas_limit, + gas_used: v3.gas_used, + timestamp: v3.timestamp, + extra_data: v3.extra_data, + base_fee_per_gas: v3.base_fee_per_gas, + // FIXME: excess_blobs probably will be an option whenever the engine API is finalized + excess_blobs: v3.excess_blobs, + block_hash: v3.block_hash, + transactions: v3.transactions, + #[cfg(feature = "withdrawals")] + withdrawals: v3 + .withdrawals + .map(|v| { + Vec::from(v) + .into_iter() + .map(Into::into) + .collect::>() + .into() + }) + .ok_or(Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV3 -> ExecutionPayloadEip4844".to_string()))?, + })), + _ => Err(Error::UnsupportedForkVariant(format!("Unsupported conversion from JsonExecutionPayloadV2 for {}", fork_name))), + } } } } -impl From> for JsonExecutionPayload { - fn from(payload: ExecutionPayload) -> Self { +impl TryFrom> for JsonExecutionPayloadV1 { + type Error = Error; + fn try_from(payload: ExecutionPayload) -> Result { match payload { - ExecutionPayload::Merge(merge) => Self::V1(JsonExecutionPayloadV1 { + ExecutionPayload::Merge(merge) => Ok(JsonExecutionPayloadV1 { parent_hash: merge.parent_hash, fee_recipient: merge.fee_recipient, state_root: merge.state_root, @@ -347,7 +277,40 @@ impl From> for JsonExecutionPayload { block_hash: merge.block_hash, transactions: merge.transactions, }), - ExecutionPayload::Capella(capella) => Self::V2(JsonExecutionPayloadV2 { + ExecutionPayload::Capella(_) => Err(Error::UnsupportedForkVariant(format!( + "Unsupported conversion to JsonExecutionPayloadV1 for {}", + ForkName::Capella + ))), + ExecutionPayload::Eip4844(_) => Err(Error::UnsupportedForkVariant(format!( + "Unsupported conversion to JsonExecutionPayloadV1 for {}", + ForkName::Eip4844 + ))), + } + } +} + +impl TryFrom> for JsonExecutionPayloadV2 { + type Error = Error; + fn try_from(payload: ExecutionPayload) -> Result { + match payload { + ExecutionPayload::Merge(merge) => Ok(JsonExecutionPayloadV2 { + parent_hash: merge.parent_hash, + fee_recipient: merge.fee_recipient, + state_root: merge.state_root, + receipts_root: merge.receipts_root, + logs_bloom: merge.logs_bloom, + prev_randao: merge.prev_randao, + block_number: merge.block_number, + gas_limit: merge.gas_limit, + gas_used: merge.gas_used, + timestamp: merge.timestamp, + extra_data: merge.extra_data, + base_fee_per_gas: merge.base_fee_per_gas, + block_hash: merge.block_hash, + transactions: merge.transactions, + withdrawals: None, + }), + ExecutionPayload::Capella(capella) => Ok(JsonExecutionPayloadV2 { parent_hash: capella.parent_hash, fee_recipient: capella.fee_recipient, state_root: capella.state_root, @@ -363,27 +326,20 @@ impl From> for JsonExecutionPayload { block_hash: capella.block_hash, transactions: capella.transactions, #[cfg(feature = "withdrawals")] - withdrawals: capella.withdrawals, - }), - ExecutionPayload::Eip4844(eip4844) => Self::V3(JsonExecutionPayloadV3 { - parent_hash: eip4844.parent_hash, - fee_recipient: eip4844.fee_recipient, - state_root: eip4844.state_root, - receipts_root: eip4844.receipts_root, - logs_bloom: eip4844.logs_bloom, - prev_randao: eip4844.prev_randao, - block_number: eip4844.block_number, - gas_limit: eip4844.gas_limit, - gas_used: eip4844.gas_used, - timestamp: eip4844.timestamp, - extra_data: eip4844.extra_data, - base_fee_per_gas: eip4844.base_fee_per_gas, - excess_blobs: eip4844.excess_blobs, - block_hash: eip4844.block_hash, - transactions: eip4844.transactions, - #[cfg(feature = "withdrawals")] - withdrawals: eip4844.withdrawals, + withdrawals: Some( + Vec::from(capella.withdrawals) + .into_iter() + .map(Into::into) + .collect::>() + .into(), + ), + #[cfg(not(feature = "withdrawals"))] + withdrawals: None, }), + ExecutionPayload::Eip4844(_) => Err(Error::UnsupportedForkVariant(format!( + "Unsupported conversion to JsonExecutionPayloadV1 for {}", + ForkName::Eip4844 + ))), } } } @@ -424,12 +380,15 @@ impl From for Withdrawal { #[superstruct( variants(V1, V2), - variant_attributes(derive(Clone, Debug, PartialEq, Serialize, Deserialize),), + variant_attributes( + derive(Clone, Debug, PartialEq, Serialize, Deserialize), + serde(rename_all = "camelCase") + ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] #[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase", untagged)] +#[serde(untagged)] pub struct JsonPayloadAttributes { #[serde(with = "eth2_serde_utils::u64_hex_be")] pub timestamp: u64, @@ -437,7 +396,7 @@ pub struct JsonPayloadAttributes { pub suggested_fee_recipient: Address, #[cfg(feature = "withdrawals")] #[superstruct(only(V2))] - pub withdrawals: Vec, + pub withdrawals: Option>, } impl From for JsonPayloadAttributes { @@ -453,7 +412,9 @@ impl From for JsonPayloadAttributes { prev_randao: pa.prev_randao, suggested_fee_recipient: pa.suggested_fee_recipient, #[cfg(feature = "withdrawals")] - withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(), + withdrawals: pa + .withdrawals + .map(|w| w.into_iter().map(Into::into).collect()), }), } } @@ -472,7 +433,9 @@ impl From for PayloadAttributes { prev_randao: jpa.prev_randao, suggested_fee_recipient: jpa.suggested_fee_recipient, #[cfg(feature = "withdrawals")] - withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(), + withdrawals: jpa + .withdrawals + .map(|jw| jw.into_iter().map(Into::into).collect()), }), } } @@ -488,16 +451,16 @@ pub struct JsonBlobBundles { #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct JsonForkChoiceStateV1 { +pub struct JsonForkchoiceStateV1 { pub head_block_hash: ExecutionBlockHash, pub safe_block_hash: ExecutionBlockHash, pub finalized_block_hash: ExecutionBlockHash, } -impl From for JsonForkChoiceStateV1 { - fn from(f: ForkChoiceState) -> Self { +impl From for JsonForkchoiceStateV1 { + fn from(f: ForkchoiceState) -> Self { // Use this verbose deconstruction pattern to ensure no field is left unused. - let ForkChoiceState { + let ForkchoiceState { head_block_hash, safe_block_hash, finalized_block_hash, @@ -511,10 +474,10 @@ impl From for JsonForkChoiceStateV1 { } } -impl From for ForkChoiceState { - fn from(j: JsonForkChoiceStateV1) -> Self { +impl From for ForkchoiceState { + fn from(j: JsonForkchoiceStateV1) -> Self { // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonForkChoiceStateV1 { + let JsonForkchoiceStateV1 { head_block_hash, safe_block_hash, finalized_block_hash, diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index da77bd9cf..264303b5d 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -88,7 +88,7 @@ impl State { } #[derive(Copy, Clone, PartialEq, Debug)] -pub struct ForkChoiceState { +pub struct ForkchoiceState { pub head_block_hash: ExecutionBlockHash, pub safe_block_hash: ExecutionBlockHash, pub finalized_block_hash: ExecutionBlockHash, @@ -115,7 +115,7 @@ pub struct Engine { pub api: HttpJsonRpc, payload_id_cache: Mutex>, state: RwLock, - latest_forkchoice_state: RwLock>, + latest_forkchoice_state: RwLock>, executor: TaskExecutor, log: Logger, } @@ -161,13 +161,13 @@ impl Engine { pub async fn notify_forkchoice_updated( &self, - forkchoice_state: ForkChoiceState, + forkchoice_state: ForkchoiceState, payload_attributes: Option, log: &Logger, ) -> Result { let response = self .api - .forkchoice_updated_v1(forkchoice_state, payload_attributes.clone()) + .forkchoice_updated(forkchoice_state, payload_attributes.clone()) .await?; if let Some(payload_id) = response.payload_id { @@ -187,11 +187,11 @@ impl Engine { Ok(response) } - async fn get_latest_forkchoice_state(&self) -> Option { + async fn get_latest_forkchoice_state(&self) -> Option { *self.latest_forkchoice_state.read().await } - pub async fn set_latest_forkchoice_state(&self, state: ForkChoiceState) { + pub async fn set_latest_forkchoice_state(&self, state: ForkchoiceState) { *self.latest_forkchoice_state.write().await = Some(state); } @@ -216,7 +216,7 @@ impl Engine { // For simplicity, payload attributes are never included in this call. It may be // reasonable to include them in the future. - if let Err(e) = self.api.forkchoice_updated_v1(forkchoice_state, None).await { + if let Err(e) = self.api.forkchoice_updated(forkchoice_state, None).await { debug!( self.log, "Failed to issue latest head to engine"; @@ -349,7 +349,7 @@ impl Engine { // TODO: revisit this - do we need to key on withdrawals as well here? impl PayloadIdCacheKey { - fn new(state: &ForkChoiceState, attributes: &PayloadAttributes) -> Self { + fn new(state: &ForkchoiceState, attributes: &PayloadAttributes) -> Self { Self { head_block_hash: state.head_block_hash, timestamp: attributes.timestamp(), diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 05d45c07a..5f2baf9a4 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -11,7 +11,7 @@ use engine_api::Error as ApiError; pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; use engines::{Engine, EngineError}; -pub use engines::{EngineState, ForkChoiceState}; +pub use engines::{EngineState, ForkchoiceState}; use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; use payload_status::process_payload_status; @@ -32,6 +32,8 @@ use tokio::{ time::sleep, }; use tokio_stream::wrappers::WatchStream; +#[cfg(feature = "withdrawals")] +use types::Withdrawal; use types::{AbstractExecPayload, Blob, ExecPayload, ExecutionPayloadEip4844, KzgCommitment}; use types::{ BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName, @@ -623,6 +625,8 @@ impl ExecutionLayer { proposer_index: u64, forkchoice_update_params: ForkchoiceUpdateParameters, builder_params: BuilderParams, + current_fork: ForkName, + #[cfg(feature = "withdrawals")] withdrawals: Option>, spec: &ChainSpec, ) -> Result, Error> { let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; @@ -640,6 +644,9 @@ impl ExecutionLayer { suggested_fee_recipient, forkchoice_update_params, builder_params, + current_fork, + #[cfg(feature = "withdrawals")] + withdrawals, spec, ) .await @@ -655,6 +662,9 @@ impl ExecutionLayer { prev_randao, suggested_fee_recipient, forkchoice_update_params, + current_fork, + #[cfg(feature = "withdrawals")] + withdrawals, ) .await } @@ -670,6 +680,8 @@ impl ExecutionLayer { suggested_fee_recipient: Address, forkchoice_update_params: ForkchoiceUpdateParameters, builder_params: BuilderParams, + current_fork: ForkName, + #[cfg(feature = "withdrawals")] withdrawals: Option>, spec: &ChainSpec, ) -> Result, Error> { if let Some(builder) = self.builder() { @@ -693,6 +705,9 @@ impl ExecutionLayer { prev_randao, suggested_fee_recipient, forkchoice_update_params, + current_fork, + #[cfg(feature = "withdrawals")] + withdrawals, ) ); @@ -822,6 +837,9 @@ impl ExecutionLayer { prev_randao, suggested_fee_recipient, forkchoice_update_params, + current_fork, + #[cfg(feature = "withdrawals")] + withdrawals, ) .await } @@ -834,6 +852,8 @@ impl ExecutionLayer { prev_randao: Hash256, suggested_fee_recipient: Address, forkchoice_update_params: ForkchoiceUpdateParameters, + current_fork: ForkName, + #[cfg(feature = "withdrawals")] withdrawals: Option>, ) -> Result, Error> { self.get_full_payload_with( parent_hash, @@ -841,6 +861,9 @@ impl ExecutionLayer { prev_randao, suggested_fee_recipient, forkchoice_update_params, + current_fork, + #[cfg(feature = "withdrawals")] + withdrawals, noop, ) .await @@ -854,6 +877,8 @@ impl ExecutionLayer { prev_randao: Hash256, suggested_fee_recipient: Address, forkchoice_update_params: ForkchoiceUpdateParameters, + current_fork: ForkName, + #[cfg(feature = "withdrawals")] withdrawals: Option>, ) -> Result, Error> { self.get_full_payload_with( parent_hash, @@ -861,6 +886,9 @@ impl ExecutionLayer { prev_randao, suggested_fee_recipient, forkchoice_update_params, + current_fork, + #[cfg(feature = "withdrawals")] + withdrawals, Self::cache_payload, ) .await @@ -873,10 +901,14 @@ impl ExecutionLayer { prev_randao: Hash256, suggested_fee_recipient: Address, forkchoice_update_params: ForkchoiceUpdateParameters, + current_fork: ForkName, + #[cfg(feature = "withdrawals")] withdrawals: Option>, f: fn(&ExecutionLayer, &ExecutionPayload) -> Option>, ) -> Result, Error> { + #[cfg(feature = "withdrawals")] + let withdrawals_ref = &withdrawals; self.engine() - .request(|engine| async move { + .request(move |engine| async move { let payload_id = if let Some(id) = engine .get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient) .await @@ -894,7 +926,7 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, &[metrics::MISS], ); - let fork_choice_state = ForkChoiceState { + let fork_choice_state = ForkchoiceState { head_block_hash: parent_hash, safe_block_hash: forkchoice_update_params .justified_hash @@ -903,12 +935,14 @@ impl ExecutionLayer { .finalized_hash .unwrap_or_else(ExecutionBlockHash::zero), }; - // FIXME: This will have to properly handle forks. To do that, - // withdrawals will need to be passed into this function - let payload_attributes = PayloadAttributes::V1(PayloadAttributesV1 { + // This must always be the latest PayloadAttributes + // FIXME: How to non-capella EIP4844 testnets handle this? + let payload_attributes = PayloadAttributes::V2(PayloadAttributesV2 { timestamp, prev_randao, suggested_fee_recipient, + #[cfg(feature = "withdrawals")] + withdrawals: withdrawals_ref.clone(), }); let response = engine @@ -935,16 +969,22 @@ impl ExecutionLayer { }; let blob_fut = async { - //FIXME(sean) do a fork check here and return None otherwise - debug!( - self.log(), - "Issuing engine_getBlobsBundle"; - "suggested_fee_recipient" => ?suggested_fee_recipient, - "prev_randao" => ?prev_randao, - "timestamp" => timestamp, - "parent_hash" => ?parent_hash, - ); - Some(engine.api.get_blobs_bundle_v1::(payload_id).await) + match current_fork { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + None + } + ForkName::Eip4844 => { + debug!( + self.log(), + "Issuing engine_getBlobsBundle"; + "suggested_fee_recipient" => ?suggested_fee_recipient, + "prev_randao" => ?prev_randao, + "timestamp" => timestamp, + "parent_hash" => ?parent_hash, + ); + Some(engine.api.get_blobs_bundle_v1::(payload_id).await) + } + } }; let payload_fut = async { debug!( @@ -955,9 +995,8 @@ impl ExecutionLayer { "timestamp" => timestamp, "parent_hash" => ?parent_hash, ); - engine.api.get_payload_v1::(payload_id).await + engine.api.get_payload::(current_fork, payload_id).await }; - let (blob, payload) = tokio::join!(blob_fut, payload_fut); let payload = payload.map(|full_payload| { if full_payload.fee_recipient() != suggested_fee_recipient { @@ -1030,7 +1069,7 @@ impl ExecutionLayer { let result = self .engine() - .request(|engine| engine.api.new_payload_v1(execution_payload.clone())) + .request(|engine| engine.api.new_payload(execution_payload.clone())) .await; if let Ok(status) = &result { @@ -1160,7 +1199,7 @@ impl ExecutionLayer { } } - let forkchoice_state = ForkChoiceState { + let forkchoice_state = ForkchoiceState { head_block_hash, safe_block_hash: justified_block_hash, finalized_block_hash, diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 37eb8ba8f..f2282c603 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,4 +1,4 @@ -use crate::engines::ForkChoiceState; +use crate::engines::ForkchoiceState; use crate::{ engine_api::{ json_structures::{ @@ -13,8 +13,7 @@ use std::collections::HashMap; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use types::{ - EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge, - Hash256, Uint256, + EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadMerge, Hash256, Uint256, }; const GAS_LIMIT: u64 = 16384; @@ -315,7 +314,7 @@ impl ExecutionBlockGenerator { pub fn forkchoice_updated_v1( &mut self, - forkchoice_state: ForkChoiceState, + forkchoice_state: ForkchoiceState, payload_attributes: Option, ) -> Result { if let Some(payload) = self @@ -369,7 +368,6 @@ impl ExecutionBlockGenerator { let id = payload_id_from_u64(self.next_payload_id); self.next_payload_id += 1; - // FIXME: think about how to test different forks let mut execution_payload = match &attributes { PayloadAttributes::V1(pa) => ExecutionPayload::Merge(ExecutionPayloadMerge { parent_hash: forkchoice_state.head_block_hash, @@ -388,7 +386,8 @@ impl ExecutionBlockGenerator { transactions: vec![].into(), }), PayloadAttributes::V2(pa) => { - ExecutionPayload::Capella(ExecutionPayloadCapella { + // FIXME: think about how to test different forks + ExecutionPayload::Merge(ExecutionPayloadMerge { parent_hash: forkchoice_state.head_block_hash, fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), @@ -403,14 +402,6 @@ impl ExecutionBlockGenerator { base_fee_per_gas: Uint256::one(), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), - #[cfg(feature = "withdrawals")] - withdrawals: pa - .withdrawals - .iter() - .cloned() - .map(Into::into) - .collect::>() - .into(), }) } }; diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index ba26591ba..fe765cc94 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -4,7 +4,7 @@ use crate::json_structures::*; use serde::de::DeserializeOwned; use serde_json::Value as JsonValue; use std::sync::Arc; -use types::EthSpec; +use types::{EthSpec, ForkName}; pub async fn handle_rpc( body: JsonValue, @@ -97,7 +97,8 @@ pub async fn handle_rpc( Some( ctx.execution_block_generator .write() - .new_payload(request.into()), + // FIXME: should this worry about other forks? + .new_payload(request.try_into_execution_payload(ForkName::Merge).unwrap()), ) } else { None @@ -117,10 +118,10 @@ pub async fn handle_rpc( .get_payload(&id) .ok_or_else(|| format!("no payload for id {:?}", id))?; - Ok(serde_json::to_value(JsonExecutionPayload::from(response)).unwrap()) + Ok(serde_json::to_value(JsonExecutionPayloadV1::try_from(response).unwrap()).unwrap()) } ENGINE_FORKCHOICE_UPDATED_V1 => { - let forkchoice_state: JsonForkChoiceStateV1 = get_param(params, 0)?; + let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0)?; let payload_attributes: Option = get_param(params, 1)?; let head_block_hash = forkchoice_state.head_block_hash; diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 1323ea3e4..5c69fffbf 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -26,7 +26,8 @@ use task_executor::TaskExecutor; use tempfile::NamedTempFile; use tree_hash::TreeHash; use types::{ - Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, Hash256, Slot, Uint256, + Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, ForkName, Hash256, Slot, + Uint256, }; #[derive(Clone)] @@ -313,6 +314,10 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { *prev_randao, fee_recipient, forkchoice_update_params, + // TODO: do we need to write a test for this if this is Capella fork? + ForkName::Merge, + #[cfg(feature = "withdrawals")] + None, ) .await .map_err(convert_err)? diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 62336279b..cadeec1b3 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -114,7 +114,7 @@ impl MockExecutionLayer { suggested_fee_recipient: Address::repeat_byte(42), // FIXME: think about adding withdrawals here.. #[cfg(feature = "withdrawals")] - withdrawals: vec![], + withdrawals: Some(vec![]), }) } }, @@ -159,6 +159,10 @@ impl MockExecutionLayer { validator_index, forkchoice_update_params, builder_params, + // FIXME: do we need to consider other forks somehow? What about withdrawals? + ForkName::Merge, + #[cfg(feature = "withdrawals")] + Some(vec![]), &self.spec, ) .await @@ -191,6 +195,10 @@ impl MockExecutionLayer { validator_index, forkchoice_update_params, builder_params, + // FIXME: do we need to consider other forks somehow? What about withdrawals? + ForkName::Merge, + #[cfg(feature = "withdrawals")] + Some(vec![]), &self.spec, ) .await diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index da86fa7d0..4538d8389 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -672,12 +672,12 @@ fn handle_v2_response( decoded_buffer, )?), )))), - ForkName::Capella => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + ForkName::Capella => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( decoded_buffer, )?), )))), - ForkName::Eip4844 => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + ForkName::Eip4844 => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Eip4844(SignedBeaconBlockEip4844::from_ssz_bytes( decoded_buffer, )?), @@ -723,8 +723,8 @@ mod tests { }; use std::sync::Arc; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Epoch, ForkContext, - FullPayload, Hash256, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, Epoch, + ForkContext, FullPayload, Hash256, Signature, SignedBeaconBlock, Slot, }; use snap::write::FrameEncoder; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index ed8260e3b..c95a6a0ca 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -22,8 +22,8 @@ use tokio_util::{ }; use types::BlobsSidecar; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Blob, EthSpec, ForkContext, - ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Blob, EmptyBlock, EthSpec, + ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, }; lazy_static! { diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 1be94a93f..8cecc2e68 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -11,7 +11,7 @@ use crate::Subnet; pub const TOPIC_PREFIX: &str = "eth2"; pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; -pub const BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC: &str = "beacon_blocks_and_blobs_sidecar"; +pub const BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC: &str = "beacon_block_and_blobs_sidecar"; pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof"; pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_"; pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 4fe5a7254..ba0567277 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -32,7 +32,8 @@ use std::ptr; use types::{ sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, Attestation, AttestationData, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ProposerSlashing, - SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, Validator, + SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, SyncAggregate, + SyncCommitteeContribution, Validator, }; type SyncContributions = RwLock>>>; @@ -49,6 +50,8 @@ pub struct OperationPool { proposer_slashings: RwLock>>, /// Map from exiting validator to their exit data. voluntary_exits: RwLock>>, + /// Map from credential changing validator to their execution change data. + bls_to_execution_changes: RwLock>>, /// Reward cache for accelerating attestation packing. reward_cache: RwLock, _phantom: PhantomData, @@ -509,6 +512,16 @@ impl OperationPool { ); } + /// Get a list of execution changes for inclusion in a block. + pub fn get_bls_to_execution_changes( + &self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Vec { + // FIXME: actually implement this + return vec![]; + } + /// Prune all types of transactions given the latest head state and head fork. pub fn prune_all(&self, head_state: &BeaconState, current_epoch: Epoch) { self.prune_attestations(current_epoch); diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index ed15369df..92c5bd92f 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -142,6 +142,8 @@ impl PersistedOperationPool { attester_slashings, proposer_slashings, voluntary_exits, + // FIXME: IMPLEMENT THIS + bls_to_execution_changes: Default::default(), reward_cache: Default::default(), _phantom: Default::default(), }; diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index c0fbef973..e8c782b8c 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -492,17 +492,15 @@ impl, Cold: ItemStore> HotColdDB pub fn get_blobs(&self, block_root: &Hash256) -> Result>, Error> { if let Some(blobs) = self.blob_cache.lock().get(block_root) { Ok(Some(blobs.clone())) + } else if let Some(bytes) = self + .hot_db + .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? + { + let ret = BlobsSidecar::from_ssz_bytes(&bytes)?; + self.blob_cache.lock().put(*block_root, ret.clone()); + Ok(Some(ret)) } else { - if let Some(bytes) = self - .hot_db - .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? - { - let ret = BlobsSidecar::from_ssz_bytes(&bytes)?; - self.blob_cache.lock().put(*block_root, ret.clone()); - Ok(Some(ret)) - } else { - Ok(None) - } + Ok(None) } } diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 5cff00529..12c562849 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -107,13 +107,10 @@ where // Withdrawals #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] - pub withdrawal_queue: VariableList, - #[cfg(feature = "withdrawals")] - #[superstruct(only(Capella, Eip4844))] pub next_withdrawal_index: u64, #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] - pub next_partial_withdrawal_validator_index: u64, + pub next_withdrawal_validator_index: u64, } /// Implement the conversion function from BeaconState -> PartialBeaconState. @@ -215,9 +212,8 @@ impl PartialBeaconState { next_sync_committee, inactivity_scores, latest_execution_payload_header, - withdrawal_queue, next_withdrawal_index, - next_partial_withdrawal_validator_index + next_withdrawal_validator_index ] ), #[cfg(not(feature = "withdrawals"))] @@ -248,9 +244,8 @@ impl PartialBeaconState { next_sync_committee, inactivity_scores, latest_execution_payload_header, - withdrawal_queue, next_withdrawal_index, - next_partial_withdrawal_validator_index + next_withdrawal_validator_index ] ), #[cfg(not(feature = "withdrawals"))] @@ -467,9 +462,8 @@ impl TryInto> for PartialBeaconState { next_sync_committee, inactivity_scores, latest_execution_payload_header, - withdrawal_queue, next_withdrawal_index, - next_partial_withdrawal_validator_index + next_withdrawal_validator_index ] ), #[cfg(not(feature = "withdrawals"))] @@ -498,9 +492,8 @@ impl TryInto> for PartialBeaconState { next_sync_committee, inactivity_scores, latest_execution_payload_header, - withdrawal_queue, next_withdrawal_index, - next_partial_withdrawal_validator_index + next_withdrawal_validator_index ] ), #[cfg(not(feature = "withdrawals"))] diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index eca086d83..6ee02b71b 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -35,3 +35,5 @@ procinfo = { version = "0.4.2", optional = true } [features] default = ["lighthouse"] lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"] +withdrawals = ["store/withdrawals"] +withdrawals-processing = ["store/withdrawals-processing"] \ No newline at end of file diff --git a/consensus/state_processing/src/common/mod.rs b/consensus/state_processing/src/common/mod.rs index 34091127c..8a2e2439b 100644 --- a/consensus/state_processing/src/common/mod.rs +++ b/consensus/state_processing/src/common/mod.rs @@ -4,7 +4,6 @@ mod get_attesting_indices; mod get_indexed_attestation; mod initiate_validator_exit; mod slash_validator; -mod withdraw_balance; pub mod altair; pub mod base; @@ -15,8 +14,6 @@ pub use get_attesting_indices::{get_attesting_indices, get_attesting_indices_fro pub use get_indexed_attestation::get_indexed_attestation; pub use initiate_validator_exit::initiate_validator_exit; pub use slash_validator::slash_validator; -#[cfg(feature = "withdrawals")] -pub use withdraw_balance::withdraw_balance; use safe_arith::SafeArith; use types::{BeaconState, BeaconStateError, EthSpec}; diff --git a/consensus/state_processing/src/common/withdraw_balance.rs b/consensus/state_processing/src/common/withdraw_balance.rs deleted file mode 100644 index 65343f311..000000000 --- a/consensus/state_processing/src/common/withdraw_balance.rs +++ /dev/null @@ -1,29 +0,0 @@ -use crate::common::decrease_balance; -use safe_arith::SafeArith; -use types::{BeaconStateError as Error, *}; - -#[cfg(feature = "withdrawals")] -pub fn withdraw_balance( - state: &mut BeaconState, - validator_index: usize, - amount: u64, -) -> Result<(), Error> { - decrease_balance(state, validator_index as usize, amount)?; - - let withdrawal_address = Address::from_slice( - &state - .get_validator(validator_index)? - .withdrawal_credentials - .as_bytes()[12..], - ); - let withdrawal = Withdrawal { - index: *state.next_withdrawal_index()?, - validator_index: validator_index as u64, - address: withdrawal_address, - amount, - }; - state.next_withdrawal_index_mut()?.safe_add_assign(1)?; - state.withdrawal_queue_mut()?.push(withdrawal)?; - - Ok(()) -} diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 7a093d558..40fdcdd19 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -19,6 +19,8 @@ pub use process_operations::process_operations; pub use verify_attestation::{ verify_attestation_for_block_inclusion, verify_attestation_for_state, }; +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +pub use verify_bls_to_execution_change::verify_bls_to_execution_change; pub use verify_deposit::{ get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, }; @@ -34,10 +36,15 @@ pub mod signature_sets; pub mod tests; mod verify_attestation; mod verify_attester_slashing; +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +mod verify_bls_to_execution_change; mod verify_deposit; mod verify_exit; mod verify_proposer_slashing; +#[cfg(feature = "withdrawals-processing")] +use crate::common::decrease_balance; + #[cfg(feature = "arbitrary-fuzz")] use arbitrary::Arbitrary; @@ -161,6 +168,8 @@ pub fn per_block_processing>( // previous block. if is_execution_enabled(state, block.body()) { let payload = block.body().execution_payload()?; + #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] + process_withdrawals::(state, payload, spec)?; process_execution_payload::(state, payload, spec)?; } @@ -454,3 +463,100 @@ pub fn compute_timestamp_at_slot( .safe_mul(spec.seconds_per_slot) .and_then(|since_genesis| state.genesis_time().safe_add(since_genesis)) } + +/// FIXME: add link to this function once the spec is stable +#[cfg(feature = "withdrawals")] +pub fn get_expected_withdrawals( + state: &BeaconState, + spec: &ChainSpec, +) -> Result, BlockProcessingError> { + let epoch = state.current_epoch(); + let mut withdrawal_index = state.next_withdrawal_index()?; + let mut validator_index = state.next_withdrawal_validator_index()?; + let mut withdrawals = vec![]; + + if cfg!(not(feature = "withdrawals-processing")) { + return Ok(withdrawals.into()); + } + + for _ in 0..state.validators().len() { + let validator = state.get_validator(validator_index as usize)?; + let balance = *state.balances().get(validator_index as usize).ok_or( + BeaconStateError::BalancesOutOfBounds(validator_index as usize), + )?; + if validator.is_fully_withdrawable_at(balance, epoch, spec) { + withdrawals.push(Withdrawal { + index: withdrawal_index, + validator_index, + address: validator + .get_eth1_withdrawal_address(spec) + .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, + amount: balance, + }); + withdrawal_index.safe_add_assign(1)?; + } else if validator.is_partially_withdrawable_validator(balance, spec) { + withdrawals.push(Withdrawal { + index: withdrawal_index, + validator_index, + address: validator + .get_eth1_withdrawal_address(spec) + .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, + amount: balance.safe_sub(spec.max_effective_balance)?, + }); + withdrawal_index.safe_add_assign(1)?; + } + if withdrawals.len() == T::max_withdrawals_per_payload() { + break; + } + validator_index = validator_index + .safe_add(1)? + .safe_rem(state.validators().len() as u64)?; + } + + Ok(withdrawals.into()) +} + +/// FIXME: add link to this function once the spec is stable +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload>( + state: &mut BeaconState, + payload: Payload::Ref<'payload>, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + match state { + BeaconState::Merge(_) => Ok(()), + BeaconState::Capella(_) | BeaconState::Eip4844(_) => { + let expected_withdrawals = get_expected_withdrawals(state, spec)?; + let expected_root = expected_withdrawals.tree_hash_root(); + let withdrawals_root = payload.withdrawals_root()?; + + if expected_root != withdrawals_root { + return Err(BlockProcessingError::WithdrawalsRootMismatch { + expected: expected_root, + found: withdrawals_root, + }); + } + + for withdrawal in expected_withdrawals.iter() { + decrease_balance( + state, + withdrawal.validator_index as usize, + withdrawal.amount, + )?; + } + + if let Some(latest_withdrawal) = expected_withdrawals.last() { + *state.next_withdrawal_index_mut()? = latest_withdrawal.index.safe_add(1)?; + let next_validator_index = latest_withdrawal + .validator_index + .safe_add(1)? + .safe_rem(state.validators().len() as u64)?; + *state.next_withdrawal_validator_index_mut()? = next_validator_index; + } + + Ok(()) + } + // these shouldn't even be encountered but they're here for completeness + BeaconState::Base(_) | BeaconState::Altair(_) => Ok(()), + } +} diff --git a/consensus/state_processing/src/per_block_processing/eip4844.rs b/consensus/state_processing/src/per_block_processing/eip4844.rs index 120ba304d..23ab3c5c0 100644 --- a/consensus/state_processing/src/per_block_processing/eip4844.rs +++ b/consensus/state_processing/src/per_block_processing/eip4844.rs @@ -1 +1,2 @@ +#[allow(clippy::module_inception)] pub mod eip4844; diff --git a/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs b/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs index 284d0d0d6..0998756fd 100644 --- a/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs +++ b/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs @@ -6,8 +6,8 @@ use ssz::Decode; use ssz_types::VariableList; use types::consts::eip4844::{BLOB_TX_TYPE, VERSIONED_HASH_VERSION_KZG}; use types::{ - AbstractExecPayload, BeaconBlockBodyRef, EthSpec, ExecPayload, FullPayload, FullPayloadRef, - KzgCommitment, Transaction, Transactions, VersionedHash, + AbstractExecPayload, BeaconBlockBodyRef, EthSpec, ExecPayload, KzgCommitment, Transaction, + Transactions, VersionedHash, }; pub fn process_blob_kzg_commitments>( @@ -35,7 +35,7 @@ pub fn verify_kzg_commitments_against_transactions( let nested_iter = transactions .into_iter() .filter(|tx| { - tx.get(0) + tx.first() .map(|tx_type| *tx_type == BLOB_TX_TYPE) .unwrap_or(false) }) diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index fdb13f428..7b355b0dd 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -49,6 +49,10 @@ pub enum BlockProcessingError { index: usize, reason: ExitInvalid, }, + BlsExecutionChangeInvalid { + index: usize, + reason: BlsExecutionChangeInvalid, + }, SyncAggregateInvalid { reason: SyncAggregateInvalid, }, @@ -74,6 +78,10 @@ pub enum BlockProcessingError { }, ExecutionInvalid, ConsensusContext(ContextError), + WithdrawalsRootMismatch { + expected: Hash256, + found: Hash256, + }, BlobVersionHashMismatch, /// The number of commitments in blob transactions in the payload does not match the number /// of commitments in the block. @@ -86,6 +94,7 @@ pub enum BlockProcessingError { index: usize, length: usize, }, + WithdrawalCredentialsInvalid, } impl From for BlockProcessingError { @@ -180,7 +189,8 @@ impl_into_block_processing_error_with_index!( IndexedAttestationInvalid, AttestationInvalid, DepositInvalid, - ExitInvalid + ExitInvalid, + BlsExecutionChangeInvalid ); pub type HeaderValidationError = BlockOperationError; @@ -190,6 +200,7 @@ pub type AttestationValidationError = BlockOperationError; pub type SyncCommitteeMessageValidationError = BlockOperationError; pub type DepositValidationError = BlockOperationError; pub type ExitValidationError = BlockOperationError; +pub type BlsExecutionChangeValidationError = BlockOperationError; #[derive(Debug, PartialEq, Clone)] pub enum BlockOperationError { @@ -405,6 +416,18 @@ pub enum ExitInvalid { SignatureSetError(SignatureSetError), } +#[derive(Debug, PartialEq, Clone)] +pub enum BlsExecutionChangeInvalid { + /// The specified validator is not in the state's validator registry. + ValidatorUnknown(u64), + /// Validator does not have BLS Withdrawal credentials before this change + NonBlsWithdrawalCredentials, + /// Provided BLS pubkey does not match withdrawal credentials + WithdrawalCredentialsMismatch, + /// The signature is invalid + BadSignature, +} + #[derive(Debug, PartialEq, Clone)] pub enum SyncAggregateInvalid { /// One or more of the aggregate public keys is invalid. diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 6aca565b5..32e36c6ce 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -33,6 +33,12 @@ pub fn process_operations<'a, T: EthSpec, Payload: AbstractExecPayload>( process_attestations(state, block_body, verify_signatures, ctxt, spec)?; process_deposits(state, block_body.deposits(), spec)?; process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?; + + #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] + if let Ok(bls_to_execution_changes) = block_body.bls_to_execution_changes() { + process_bls_to_execution_changes(state, bls_to_execution_changes, verify_signatures, spec)?; + } + Ok(()) } @@ -279,6 +285,32 @@ pub fn process_exits( Ok(()) } +/// Validates each `bls_to_execution_change` and updates the state +/// +/// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returs +/// an `Err` describing the invalid object or cause of failure. +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +pub fn process_bls_to_execution_changes( + state: &mut BeaconState, + bls_to_execution_changes: &[SignedBlsToExecutionChange], + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + for (i, signed_address_change) in bls_to_execution_changes.iter().enumerate() { + verify_bls_to_execution_change(state, signed_address_change, verify_signatures, spec) + .map_err(|e| e.into_with_index(i))?; + + state + .get_validator_mut(signed_address_change.message.validator_index as usize)? + .change_withdrawal_credentials( + &signed_address_change.message.to_execution_address, + spec, + ); + } + + Ok(()) +} + /// Validates each `Deposit` and updates the state, short-circuiting on an invalid object. /// /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index d07f8bb6e..fa37681c7 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -11,8 +11,8 @@ use types::{ BeaconStateError, ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, IndexedAttestation, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, - SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, SigningData, Slot, SyncAggregate, - SyncAggregatorSelectionData, Unsigned, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, + SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, }; pub type Result = std::result::Result; @@ -156,6 +156,33 @@ where )) } +pub fn bls_execution_change_signature_set<'a, T: EthSpec>( + state: &'a BeaconState, + signed_address_change: &'a SignedBlsToExecutionChange, + spec: &'a ChainSpec, +) -> Result> { + let domain = spec.get_domain( + state.current_epoch(), + Domain::BlsToExecutionChange, + &state.fork(), + state.genesis_validators_root(), + ); + let message = signed_address_change.message.signing_root(domain); + let signing_key = Cow::Owned( + signed_address_change + .message + .from_bls_pubkey + .decompress() + .map_err(|_| Error::PublicKeyDecompressionFailed)?, + ); + + Ok(SignatureSet::single_pubkey( + &signed_address_change.signature, + signing_key, + message, + )) +} + /// A signature set that is valid if the block proposers randao reveal signature is correct. pub fn randao_signature_set<'a, T, F, Payload: AbstractExecPayload>( state: &'a BeaconState, diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs new file mode 100644 index 000000000..34700a33e --- /dev/null +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -0,0 +1,57 @@ +use super::errors::{BlockOperationError, BlsExecutionChangeInvalid as Invalid}; +use crate::per_block_processing::signature_sets::bls_execution_change_signature_set; +use crate::VerifySignatures; +use eth2_hashing::hash; +use types::*; + +type Result = std::result::Result>; + +fn error(reason: Invalid) -> BlockOperationError { + BlockOperationError::invalid(reason) +} + +/// Indicates if a `BlsToExecutionChange` is valid to be included in a block in the current epoch of the given +/// state. +/// +/// Returns `Ok(())` if the `SignedBlsToExecutionChange` is valid, otherwise indicates the reason for invalidity. +pub fn verify_bls_to_execution_change( + state: &BeaconState, + signed_address_change: &SignedBlsToExecutionChange, + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<()> { + let address_change = &signed_address_change.message; + + let validator = state + .validators() + .get(address_change.validator_index as usize) + .ok_or_else(|| error(Invalid::ValidatorUnknown(address_change.validator_index)))?; + + verify!( + validator + .withdrawal_credentials + .as_bytes() + .first() + .map(|byte| *byte == spec.bls_withdrawal_prefix_byte) + .unwrap_or(false), + Invalid::NonBlsWithdrawalCredentials + ); + + let pubkey_hash = hash(address_change.from_bls_pubkey.as_serialized()); + + // FIXME: Should this check be put inside the verify_signatures.is_true() condition? + // I believe that's used for fuzzing so this is a Mehdi question.. + verify!( + validator.withdrawal_credentials.as_bytes().get(1..) == pubkey_hash.get(1..), + Invalid::WithdrawalCredentialsMismatch + ); + + if verify_signatures.is_true() { + verify!( + bls_execution_change_signature_set(state, signed_address_change, spec)?.verify(), + Invalid::BadSignature + ); + } + + Ok(()) +} diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index 565fae9db..f227b8286 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -11,7 +11,6 @@ pub use weigh_justification_and_finalization::weigh_justification_and_finalizati pub mod altair; pub mod base; -pub mod capella; pub mod effective_balance_updates; pub mod epoch_processing_summary; pub mod errors; @@ -38,8 +37,10 @@ pub fn process_epoch( match state { BeaconState::Base(_) => base::process_epoch(state, spec), - BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_epoch(state, spec), - BeaconState::Capella(_) | BeaconState::Eip4844(_) => capella::process_epoch(state, spec), + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => altair::process_epoch(state, spec), } } diff --git a/consensus/state_processing/src/per_epoch_processing/capella.rs b/consensus/state_processing/src/per_epoch_processing/capella.rs deleted file mode 100644 index ed5665d77..000000000 --- a/consensus/state_processing/src/per_epoch_processing/capella.rs +++ /dev/null @@ -1,87 +0,0 @@ -use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error}; -use crate::per_epoch_processing::{ - altair, - effective_balance_updates::process_effective_balance_updates, - historical_roots_update::process_historical_roots_update, - resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, -}; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -pub use full_withdrawals::process_full_withdrawals; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -pub use partial_withdrawals::process_partial_withdrawals; -use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; - -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -pub mod full_withdrawals; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -pub mod partial_withdrawals; - -pub fn process_epoch( - state: &mut BeaconState, - spec: &ChainSpec, -) -> Result, Error> { - // Ensure the committee caches are built. - state.build_committee_cache(RelativeEpoch::Previous, spec)?; - state.build_committee_cache(RelativeEpoch::Current, spec)?; - state.build_committee_cache(RelativeEpoch::Next, spec)?; - - // Pre-compute participating indices and total balances. - let participation_cache = altair::ParticipationCache::new(state, spec)?; - let sync_committee = state.current_sync_committee()?.clone(); - - // Justification and finalization. - let justification_and_finalization_state = - altair::process_justification_and_finalization(state, &participation_cache)?; - justification_and_finalization_state.apply_changes_to_state(state); - - altair::process_inactivity_updates(state, &participation_cache, spec)?; - - // Rewards and Penalties. - altair::process_rewards_and_penalties(state, &participation_cache, spec)?; - - // Registry Updates. - process_registry_updates(state, spec)?; - - // Slashings. - process_slashings( - state, - participation_cache.current_epoch_total_active_balance(), - spec, - )?; - - // Reset eth1 data votes. - process_eth1_data_reset(state)?; - - // Update effective balances with hysteresis (lag). - process_effective_balance_updates(state, spec)?; - - // Reset slashings - process_slashings_reset(state)?; - - // Set randao mix - process_randao_mixes_reset(state)?; - - // Set historical root accumulator - process_historical_roots_update(state)?; - - // Rotate current/previous epoch participation - altair::process_participation_flag_updates(state)?; - - altair::process_sync_committee_updates(state, spec)?; - - // Withdrawals - #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] - process_full_withdrawals(state, spec)?; - - #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] - process_partial_withdrawals(state, spec)?; - - // Rotate the epoch caches to suit the epoch transition. - state.advance_caches(spec)?; - - // FIXME: do we need a Capella variant for this? - Ok(EpochProcessingSummary::Altair { - participation_cache, - sync_committee, - }) -} diff --git a/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs b/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs deleted file mode 100644 index 619301f16..000000000 --- a/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs +++ /dev/null @@ -1,25 +0,0 @@ -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -use crate::common::withdraw_balance; -use crate::EpochProcessingError; -use types::{beacon_state::BeaconState, eth_spec::EthSpec, ChainSpec}; - -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -pub fn process_full_withdrawals( - state: &mut BeaconState, - spec: &ChainSpec, -) -> Result<(), EpochProcessingError> { - let current_epoch = state.current_epoch(); - // FIXME: is this the most efficient way to do this? - for validator_index in 0..state.validators().len() { - // TODO: is this the correct way to handle validators not existing? - if let (Some(validator), Some(balance)) = ( - state.validators().get(validator_index), - state.balances().get(validator_index), - ) { - if validator.is_fully_withdrawable_at(*balance, current_epoch, spec) { - withdraw_balance(state, validator_index, *balance)?; - } - } - } - Ok(()) -} diff --git a/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs b/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs deleted file mode 100644 index d1ae4fee5..000000000 --- a/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs +++ /dev/null @@ -1,41 +0,0 @@ -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -use crate::common::withdraw_balance; -use crate::EpochProcessingError; -use safe_arith::SafeArith; -use types::{beacon_state::BeaconState, eth_spec::EthSpec, ChainSpec}; - -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -pub fn process_partial_withdrawals( - state: &mut BeaconState, - spec: &ChainSpec, -) -> Result<(), EpochProcessingError> { - let mut partial_withdrawals_count = 0; - let mut validator_index = *state.next_partial_withdrawal_validator_index()? as usize; - - let n_validators = state.validators().len(); - // FIXME: is this the most efficient way to do this? - for _ in 0..n_validators { - // TODO: is this the correct way to handle validators not existing? - if let (Some(validator), Some(balance)) = ( - state.validators().get(validator_index), - state.balances().get(validator_index), - ) { - if validator.is_partially_withdrawable_validator(*balance, spec) { - withdraw_balance( - state, - validator_index, - *balance - spec.max_effective_balance, - )?; - partial_withdrawals_count.safe_add_assign(1)?; - - validator_index = validator_index.safe_add(1)? % n_validators; - if partial_withdrawals_count == T::max_partial_withdrawals_per_epoch() { - break; - } - } - } - } - *state.next_partial_withdrawal_validator_index_mut()? = validator_index as u64; - - Ok(()) -} diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index 9018db65b..8d2600bb4 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -1,4 +1,6 @@ -use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; +use crate::upgrade::{ + upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_eip4844, +}; use crate::{per_epoch_processing::EpochProcessingSummary, *}; use safe_arith::{ArithError, SafeArith}; use types::*; @@ -55,6 +57,14 @@ pub fn per_slot_processing( if spec.bellatrix_fork_epoch == Some(state.current_epoch()) { upgrade_to_bellatrix(state, spec)?; } + // Capella. + if spec.capella_fork_epoch == Some(state.current_epoch()) { + upgrade_to_capella(state, spec)?; + } + // Eip4844 + if spec.eip4844_fork_epoch == Some(state.current_epoch()) { + upgrade_to_eip4844(state, spec)?; + } } Ok(summary) diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index e64c83980..9a8836988 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -1,4 +1,3 @@ -use ssz_types::VariableList; use std::mem; use types::{BeaconState, BeaconStateCapella, BeaconStateError as Error, ChainSpec, EthSpec, Fork}; @@ -58,11 +57,9 @@ pub fn upgrade_to_capella( latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_capella(), // Withdrawals #[cfg(feature = "withdrawals")] - withdrawal_queue: VariableList::empty(), - #[cfg(feature = "withdrawals")] next_withdrawal_index: 0, #[cfg(feature = "withdrawals")] - next_partial_withdrawal_validator_index: 0, + next_withdrawal_validator_index: 0, // Caches total_active_balance: pre.total_active_balance, committee_caches: mem::take(&mut pre.committee_caches), diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs index 78fb16033..478024f17 100644 --- a/consensus/state_processing/src/upgrade/eip4844.rs +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -65,11 +65,9 @@ pub fn upgrade_to_eip4844( latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_eip4844(), // Withdrawals #[cfg(feature = "withdrawals")] - withdrawal_queue: mem::take(&mut pre.withdrawal_queue), - #[cfg(feature = "withdrawals")] next_withdrawal_index: pre.next_withdrawal_index, #[cfg(feature = "withdrawals")] - next_partial_withdrawal_validator_index: pre.next_partial_withdrawal_validator_index, + next_withdrawal_validator_index: 0, // Caches total_active_balance: pre.total_active_balance, committee_caches: mem::take(&mut pre.committee_caches), diff --git a/consensus/types/presets/mainnet/capella.yaml b/consensus/types/presets/mainnet/capella.yaml new file mode 100644 index 000000000..0c087255b --- /dev/null +++ b/consensus/types/presets/mainnet/capella.yaml @@ -0,0 +1,12 @@ +# Mainnet preset - Capella + +# Misc +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_BLS_TO_EXECUTION_CHANGES: 16 + +# Execution +# --------------------------------------------------------------- +# 2**4 (= 16) withdrawals +MAX_WITHDRAWALS_PER_PAYLOAD: 16 \ No newline at end of file diff --git a/consensus/types/presets/minimal/capella.yaml b/consensus/types/presets/minimal/capella.yaml new file mode 100644 index 000000000..eacd6c7cb --- /dev/null +++ b/consensus/types/presets/minimal/capella.yaml @@ -0,0 +1,12 @@ +# Minimal preset - Capella + +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_BLS_TO_EXECUTION_CHANGES: 16 + + +# Execution +# --------------------------------------------------------------- +# [customized] 2**2 (= 4) +MAX_WITHDRAWALS_PER_PAYLOAD: 4 \ No newline at end of file diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index d58e890c6..124cb08bc 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -78,17 +78,20 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignedRoot { } +/// Empty block trait for each block variant to implement. +pub trait EmptyBlock { + /// Returns an empty block to be used during genesis. + fn empty(spec: &ChainSpec) -> Self; +} + impl> BeaconBlock { - // FIXME: deal with capella / eip4844 forks here as well /// Returns an empty block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { - if spec.bellatrix_fork_epoch == Some(T::genesis_epoch()) { - Self::Merge(BeaconBlockMerge::empty(spec)) - } else if spec.altair_fork_epoch == Some(T::genesis_epoch()) { - Self::Altair(BeaconBlockAltair::empty(spec)) - } else { - Self::Base(BeaconBlockBase::empty(spec)) - } + map_fork_name!( + spec.fork_name_at_epoch(T::genesis_epoch()), + Self, + EmptyBlock::empty(spec) + ) } /// Custom SSZ decoder that takes a `ChainSpec` as context. @@ -117,13 +120,12 @@ impl> BeaconBlock { /// Usually it's better to prefer `from_ssz_bytes` which will decode the correct variant based /// on the fork slot. pub fn any_from_ssz_bytes(bytes: &[u8]) -> Result { - BeaconBlockMerge::from_ssz_bytes(bytes) - .map(BeaconBlock::Merge) - .or_else(|_| { - BeaconBlockAltair::from_ssz_bytes(bytes) - .map(BeaconBlock::Altair) - .or_else(|_| BeaconBlockBase::from_ssz_bytes(bytes).map(BeaconBlock::Base)) - }) + BeaconBlockEip4844::from_ssz_bytes(bytes) + .map(BeaconBlock::Eip4844) + .or_else(|_| BeaconBlockCapella::from_ssz_bytes(bytes).map(BeaconBlock::Capella)) + .or_else(|_| BeaconBlockMerge::from_ssz_bytes(bytes).map(BeaconBlock::Merge)) + .or_else(|_| BeaconBlockAltair::from_ssz_bytes(bytes).map(BeaconBlock::Altair)) + .or_else(|_| BeaconBlockBase::from_ssz_bytes(bytes).map(BeaconBlock::Base)) } /// Convenience accessor for the `body` as a `BeaconBlockBodyRef`. @@ -266,9 +268,8 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockRefMut<'a, T, P } } -impl> BeaconBlockBase { - /// Returns an empty block to be used during genesis. - pub fn empty(spec: &ChainSpec) -> Self { +impl> EmptyBlock for BeaconBlockBase { + fn empty(spec: &ChainSpec) -> Self { BeaconBlockBase { slot: spec.genesis_slot, proposer_index: 0, @@ -291,7 +292,9 @@ impl> BeaconBlockBase { }, } } +} +impl> BeaconBlockBase { /// Return a block where the block has maximum size. pub fn full(spec: &ChainSpec) -> Self { let header = BeaconBlockHeader { @@ -387,9 +390,9 @@ impl> BeaconBlockBase { } } -impl> BeaconBlockAltair { +impl> EmptyBlock for BeaconBlockAltair { /// Returns an empty Altair block to be used during genesis. - pub fn empty(spec: &ChainSpec) -> Self { + fn empty(spec: &ChainSpec) -> Self { BeaconBlockAltair { slot: spec.genesis_slot, proposer_index: 0, @@ -413,7 +416,9 @@ impl> BeaconBlockAltair }, } } +} +impl> BeaconBlockAltair { /// Return an Altair block where the block has maximum size. pub fn full(spec: &ChainSpec) -> Self { let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec); @@ -446,9 +451,9 @@ impl> BeaconBlockAltair } } -impl> BeaconBlockMerge { +impl> EmptyBlock for BeaconBlockMerge { /// Returns an empty Merge block to be used during genesis. - pub fn empty(spec: &ChainSpec) -> Self { + fn empty(spec: &ChainSpec) -> Self { BeaconBlockMerge { slot: spec.genesis_slot, proposer_index: 0, @@ -474,6 +479,67 @@ impl> BeaconBlockMerge { } } +impl> EmptyBlock for BeaconBlockCapella { + /// Returns an empty Capella block to be used during genesis. + fn empty(spec: &ChainSpec) -> Self { + BeaconBlockCapella { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyCapella { + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::empty(), + execution_payload: Payload::Capella::default(), + #[cfg(feature = "withdrawals")] + bls_to_execution_changes: VariableList::empty(), + }, + } + } +} + +impl> EmptyBlock for BeaconBlockEip4844 { + /// Returns an empty Eip4844 block to be used during genesis. + fn empty(spec: &ChainSpec) -> Self { + BeaconBlockEip4844 { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyEip4844 { + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::empty(), + execution_payload: Payload::Eip4844::default(), + #[cfg(feature = "withdrawals")] + bls_to_execution_changes: VariableList::empty(), + blob_kzg_commitments: VariableList::empty(), + }, + } + } +} + // We can convert pre-Bellatrix blocks without payloads into blocks "with" payloads. impl From>> for BeaconBlockBase> diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 36e0ce770..1dd938ac4 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -62,6 +62,10 @@ pub struct BeaconBlockBody = FullPay #[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))] #[serde(flatten)] pub execution_payload: Payload::Eip4844, + #[cfg(feature = "withdrawals")] + #[superstruct(only(Capella, Eip4844))] + pub bls_to_execution_changes: + VariableList, #[superstruct(only(Eip4844))] pub blob_kzg_commitments: VariableList, #[superstruct(only(Base, Altair))] @@ -72,7 +76,7 @@ pub struct BeaconBlockBody = FullPay } impl> BeaconBlockBody { - pub fn execution_payload<'a>(&'a self) -> Result, Error> { + pub fn execution_payload(&self) -> Result, Error> { self.to_ref().execution_payload() } } @@ -297,6 +301,8 @@ impl From>> voluntary_exits, sync_aggregate, execution_payload: FullPayloadCapella { execution_payload }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, } = body; ( @@ -313,6 +319,8 @@ impl From>> execution_payload: BlindedPayloadCapella { execution_payload_header: From::from(execution_payload.clone()), }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, }, Some(execution_payload), ) @@ -337,6 +345,8 @@ impl From>> voluntary_exits, sync_aggregate, execution_payload: FullPayloadEip4844 { execution_payload }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, blob_kzg_commitments, } = body; @@ -354,6 +364,8 @@ impl From>> execution_payload: BlindedPayloadEip4844 { execution_payload_header: From::from(execution_payload.clone()), }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, blob_kzg_commitments, }, Some(execution_payload), @@ -421,6 +433,8 @@ impl BeaconBlockBodyCapella> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadCapella { execution_payload }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, } = self; BeaconBlockBodyCapella { @@ -436,6 +450,8 @@ impl BeaconBlockBodyCapella> { execution_payload: BlindedPayloadCapella { execution_payload_header: From::from(execution_payload.clone()), }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes: bls_to_execution_changes.clone(), } } } @@ -453,6 +469,8 @@ impl BeaconBlockBodyEip4844> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadEip4844 { execution_payload }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, blob_kzg_commitments, } = self; @@ -469,6 +487,8 @@ impl BeaconBlockBodyEip4844> { execution_payload: BlindedPayloadEip4844 { execution_payload_header: From::from(execution_payload.clone()), }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes: bls_to_execution_changes.clone(), blob_kzg_commitments: blob_kzg_commitments.clone(), } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index ec5aa9c4f..000e6f671 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -296,14 +296,11 @@ where // Withdrawals #[cfg(feature = "withdrawals")] - #[superstruct(only(Capella, Eip4844))] - pub withdrawal_queue: VariableList, - #[cfg(feature = "withdrawals")] - #[superstruct(only(Capella, Eip4844))] + #[superstruct(only(Capella, Eip4844), partial_getter(copy))] pub next_withdrawal_index: u64, #[cfg(feature = "withdrawals")] - #[superstruct(only(Capella, Eip4844))] - pub next_partial_withdrawal_validator_index: u64, + #[superstruct(only(Capella, Eip4844), partial_getter(copy))] + pub next_withdrawal_validator_index: u64, // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] @@ -1787,6 +1784,8 @@ impl CompareFields for BeaconState { (BeaconState::Base(x), BeaconState::Base(y)) => x.compare_fields(y), (BeaconState::Altair(x), BeaconState::Altair(y)) => x.compare_fields(y), (BeaconState::Merge(x), BeaconState::Merge(y)) => x.compare_fields(y), + (BeaconState::Capella(x), BeaconState::Capella(y)) => x.compare_fields(y), + (BeaconState::Eip4844(x), BeaconState::Eip4844(y)) => x.compare_fields(y), _ => panic!("compare_fields: mismatched state variants",), } } diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index e67d4096d..e50265e66 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -363,6 +363,16 @@ impl BeaconTreeHashCacheInner { hasher.write(payload_header.tree_hash_root().as_bytes())?; } + // Withdrawal indices (Capella and later). + #[cfg(feature = "withdrawals")] + if let Ok(next_withdrawal_index) = state.next_withdrawal_index() { + hasher.write(next_withdrawal_index.tree_hash_root().as_bytes())?; + } + #[cfg(feature = "withdrawals")] + if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() { + hasher.write(next_withdrawal_validator_index.tree_hash_root().as_bytes())?; + } + let root = hasher.finish()?; self.previous_state = Some((root, state.slot())); diff --git a/consensus/types/src/blobs_sidecar.rs b/consensus/types/src/blobs_sidecar.rs index 4e9174d94..d4e779606 100644 --- a/consensus/types/src/blobs_sidecar.rs +++ b/consensus/types/src/blobs_sidecar.rs @@ -4,7 +4,6 @@ use serde_derive::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; -use tree_hash::TreeHash; use tree_hash_derive::TreeHash; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] @@ -23,6 +22,7 @@ impl BlobsSidecar { pub fn empty() -> Self { Self::default() } + #[allow(clippy::integer_arithmetic)] pub fn max_size() -> usize { // Fixed part Self::empty().as_ssz_bytes().len() diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs new file mode 100644 index 000000000..ca8e0ecf7 --- /dev/null +++ b/consensus/types/src/bls_to_execution_change.rs @@ -0,0 +1,30 @@ +use crate::test_utils::TestRandom; +use crate::*; +use bls::PublicKeyBytes; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +/// A deposit to potentially become a beacon chain validator. +/// +/// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] +pub struct BlsToExecutionChange { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub validator_index: u64, + pub from_bls_pubkey: PublicKeyBytes, + pub to_execution_address: Address, +} + +impl SignedRoot for BlsToExecutionChange {} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(BlsToExecutionChange); +} diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 4cf102bd7..661484fde 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -98,8 +98,6 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + /* * New in Capella */ - type MaxPartialWithdrawalsPerEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq; - type WithdrawalQueueLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxBlsToExecutionChanges: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxWithdrawalsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* @@ -235,16 +233,6 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + Self::BytesPerLogsBloom::to_usize() } - /// Returns the `MAX_PARTIAL_WITHDRAWALS_PER_EPOCH` constant for this specification. - fn max_partial_withdrawals_per_epoch() -> usize { - Self::MaxPartialWithdrawalsPerEpoch::to_usize() - } - - /// Returns the `WITHDRAWAL_QUEUE_LIMIT` constant for this specification. - fn withdrawal_queue_limit() -> usize { - Self::WithdrawalQueueLimit::to_usize() - } - /// Returns the `MAX_BLS_TO_EXECUTION_CHANGES` constant for this specification. fn max_bls_to_execution_changes() -> usize { Self::MaxBlsToExecutionChanges::to_usize() @@ -309,8 +297,6 @@ impl EthSpec for MainnetEthSpec { type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch - type MaxPartialWithdrawalsPerEpoch = U256; - type WithdrawalQueueLimit = U1099511627776; type MaxBlsToExecutionChanges = U16; type MaxWithdrawalsPerPayload = U16; @@ -338,6 +324,7 @@ impl EthSpec for MinimalEthSpec { type SyncSubcommitteeSize = U8; // 32 committee size / 4 sync committee subnet count type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch type SlotsPerEth1VotingPeriod = U32; // 4 epochs * 8 slots per epoch + type MaxWithdrawalsPerPayload = U4; params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, @@ -358,10 +345,7 @@ impl EthSpec for MinimalEthSpec { GasLimitDenominator, MinGasLimit, MaxExtraDataBytes, - MaxPartialWithdrawalsPerEpoch, - WithdrawalQueueLimit, MaxBlsToExecutionChanges, - MaxWithdrawalsPerPayload, MaxBlobsPerBlock, FieldElementsPerBlob }); @@ -408,8 +392,6 @@ impl EthSpec for GnosisEthSpec { type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U2048; // 128 max attestations * 16 slots per epoch type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch - type MaxPartialWithdrawalsPerEpoch = U256; - type WithdrawalQueueLimit = U1099511627776; type MaxBlsToExecutionChanges = U16; type MaxWithdrawalsPerPayload = U16; type MaxBlobsPerBlock = U16; // 2**4 = 16 diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 022f378e3..6036973d5 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -1,9 +1,8 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; -use ssz::Encode; +use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::slice::Iter; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -13,6 +12,8 @@ pub type Transactions = VariableList< ::MaxTransactionsPerPayload, >; +pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; + #[superstruct( variants(Merge, Capella, Eip4844), variant_attributes( @@ -82,10 +83,21 @@ pub struct ExecutionPayload { pub transactions: Transactions, #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] - pub withdrawals: VariableList, + pub withdrawals: Withdrawals, } impl ExecutionPayload { + pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid(format!( + "unsupported fork for ExecutionPayload: {fork_name}", + ))), + ForkName::Merge => ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge), + ForkName::Capella => ExecutionPayloadCapella::from_ssz_bytes(bytes).map(Self::Capella), + ForkName::Eip4844 => ExecutionPayloadEip4844::from_ssz_bytes(bytes).map(Self::Eip4844), + } + } + #[allow(clippy::integer_arithmetic)] /// Returns the maximum size of an execution payload. pub fn max_execution_payload_merge_size() -> usize { diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 7546ca2e5..6f6b5aa95 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -1,6 +1,7 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; +use ssz::Decode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; @@ -84,31 +85,34 @@ impl ExecutionPayloadHeader { pub fn transactions(&self) -> Option<&Transactions> { None } -} -impl<'a, T: EthSpec> ExecutionPayloadHeaderRef<'a, T> { - // FIXME: maybe this could be a derived trait.. - pub fn is_default(self) -> bool { - match self { - ExecutionPayloadHeaderRef::Merge(header) => { - *header == ExecutionPayloadHeaderMerge::default() + pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid(format!( + "unsupported fork for ExecutionPayloadHeader: {fork_name}", + ))), + ForkName::Merge => ExecutionPayloadHeaderMerge::from_ssz_bytes(bytes).map(Self::Merge), + ForkName::Capella => { + ExecutionPayloadHeaderCapella::from_ssz_bytes(bytes).map(Self::Capella) } - ExecutionPayloadHeaderRef::Capella(header) => { - *header == ExecutionPayloadHeaderCapella::default() - } - ExecutionPayloadHeaderRef::Eip4844(header) => { - *header == ExecutionPayloadHeaderEip4844::default() + ForkName::Eip4844 => { + ExecutionPayloadHeaderEip4844::from_ssz_bytes(bytes).map(Self::Eip4844) } } } } +impl<'a, T: EthSpec> ExecutionPayloadHeaderRef<'a, T> { + pub fn is_default(self) -> bool { + map_execution_payload_header_ref!(&'a _, self, |inner, cons| { + let _ = cons(inner); + *inner == Default::default() + }) + } +} + impl ExecutionPayloadHeaderMerge { pub fn upgrade_to_capella(&self) -> ExecutionPayloadHeaderCapella { - #[cfg(feature = "withdrawals")] - // TODO: if this is correct we should calculate and hardcode this.. - let empty_withdrawals_root = - VariableList::::empty().tree_hash_root(); ExecutionPayloadHeaderCapella { parent_hash: self.parent_hash, fee_recipient: self.fee_recipient, @@ -125,8 +129,7 @@ impl ExecutionPayloadHeaderMerge { block_hash: self.block_hash, transactions_root: self.transactions_root, #[cfg(feature = "withdrawals")] - // FIXME: the spec doesn't seem to define what to do here.. - withdrawals_root: empty_withdrawals_root, + withdrawals_root: Hash256::zero(), } } } diff --git a/consensus/types/src/kzg_commitment.rs b/consensus/types/src/kzg_commitment.rs index eaa429a13..9844df028 100644 --- a/consensus/types/src/kzg_commitment.rs +++ b/consensus/types/src/kzg_commitment.rs @@ -14,7 +14,7 @@ pub struct KzgCommitment(#[serde(with = "BigArray")] pub [u8; 48]); impl Display for KzgCommitment { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{}", eth2_serde_utils::hex::encode(&self.0)) + write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) } } diff --git a/consensus/types/src/kzg_proof.rs b/consensus/types/src/kzg_proof.rs index 7cd6a8e58..1c8e49a44 100644 --- a/consensus/types/src/kzg_proof.rs +++ b/consensus/types/src/kzg_proof.rs @@ -1,7 +1,6 @@ use crate::test_utils::{RngCore, TestRandom}; use serde::{Deserialize, Serialize}; use serde_big_array::BigArray; -use ssz::{Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::fmt; use tree_hash::{PackedEncoding, TreeHash}; @@ -15,7 +14,7 @@ pub struct KzgProof(#[serde(with = "BigArray")] pub [u8; KZG_PROOF_BYTES_LEN]); impl fmt::Display for KzgProof { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", eth2_serde_utils::hex::encode(&self.0)) + write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index e970b17c9..077ad7ecc 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -27,6 +27,7 @@ pub mod beacon_block_body; pub mod beacon_block_header; pub mod beacon_committee; pub mod beacon_state; +pub mod bls_to_execution_change; pub mod builder_bid; pub mod chain_spec; pub mod checkpoint; @@ -61,6 +62,7 @@ pub mod shuffling_id; pub mod signed_aggregate_and_proof; pub mod signed_beacon_block; pub mod signed_beacon_block_header; +pub mod signed_bls_to_execution_change; pub mod signed_contribution_and_proof; pub mod signed_voluntary_exit; pub mod signing_data; @@ -108,7 +110,7 @@ pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockEip4844, - BeaconBlockMerge, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, + BeaconBlockMerge, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, EmptyBlock, }; pub use crate::beacon_block_body::{ BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyCapella, @@ -118,6 +120,7 @@ pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *}; pub use crate::blobs_sidecar::BlobsSidecar; +pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; pub use crate::config_and_preset::{ @@ -134,7 +137,7 @@ pub use crate::eth_spec::EthSpecId; pub use crate::execution_block_hash::ExecutionBlockHash; pub use crate::execution_payload::{ ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, - ExecutionPayloadRef, Transaction, Transactions, + ExecutionPayloadRef, Transaction, Transactions, Withdrawals, }; pub use crate::execution_payload_header::{ ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderEip4844, @@ -171,6 +174,7 @@ pub use crate::signed_beacon_block::{ SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; +pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; pub use crate::signed_block_and_blobs::SignedBeaconBlockAndBlobsSidecar; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 5b457daee..3081dd1cb 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -36,6 +36,9 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn fee_recipient(&self) -> Address; fn gas_limit(&self) -> u64; fn transactions(&self) -> Option<&Transactions>; + /// fork-specific fields + #[cfg(feature = "withdrawals")] + fn withdrawals_root(&self) -> Result; /// Is this a default payload? (pre-merge) fn is_default(&self) -> bool; @@ -218,13 +221,26 @@ impl ExecPayload for FullPayload { }) } - fn transactions<'a>(&'a self) -> Option<&Transactions> { + fn transactions<'a>(&'a self) -> Option<&'a Transactions> { map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); Some(&payload.execution_payload.transactions) }) } + #[cfg(feature = "withdrawals")] + fn withdrawals_root(&self) -> Result { + match self { + FullPayload::Merge(_) => Err(Error::IncorrectStateVariant), + FullPayload::Capella(ref inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } + FullPayload::Eip4844(ref inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } + } + } + fn is_default<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -249,7 +265,7 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { fn to_execution_payload_header<'a>(&'a self) -> ExecutionPayloadHeader { map_full_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); - ExecutionPayloadHeader::from(payload.to_execution_payload_header()) + payload.to_execution_payload_header() }) } @@ -302,13 +318,26 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { }) } - fn transactions<'a>(&'a self) -> Option<&Transactions> { + fn transactions<'a>(&'a self) -> Option<&'a Transactions> { map_full_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); Some(&payload.execution_payload.transactions) }) } + #[cfg(feature = "withdrawals")] + fn withdrawals_root(&self) -> Result { + match self { + FullPayloadRef::Merge(_) => Err(Error::IncorrectStateVariant), + FullPayloadRef::Capella(inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } + FullPayloadRef::Eip4844(inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } + } + } + // TODO: can this function be optimized? fn is_default<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self, move |payload, cons| { @@ -459,10 +488,23 @@ impl ExecPayload for BlindedPayload { }) } - fn transactions<'a>(&'a self) -> Option<&Transactions> { + fn transactions(&self) -> Option<&Transactions> { None } + #[cfg(feature = "withdrawals")] + fn withdrawals_root(&self) -> Result { + match self { + BlindedPayload::Merge(_) => Err(Error::IncorrectStateVariant), + BlindedPayload::Capella(ref inner) => { + Ok(inner.execution_payload_header.withdrawals_root) + } + BlindedPayload::Eip4844(ref inner) => { + Ok(inner.execution_payload_header.withdrawals_root) + } + } + } + fn is_default<'a>(&'a self) -> bool { map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -532,10 +574,23 @@ impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { }) } - fn transactions<'a>(&'a self) -> Option<&Transactions> { + fn transactions(&self) -> Option<&Transactions> { None } + #[cfg(feature = "withdrawals")] + fn withdrawals_root(&self) -> Result { + match self { + BlindedPayloadRef::Merge(_) => Err(Error::IncorrectStateVariant), + BlindedPayloadRef::Capella(inner) => { + Ok(inner.execution_payload_header.withdrawals_root) + } + BlindedPayloadRef::Eip4844(inner) => { + Ok(inner.execution_payload_header.withdrawals_root) + } + } + } + // TODO: can this function be optimized? fn is_default<'a>(&'a self) -> bool { map_blinded_payload_ref!(&'a _, self, move |payload, cons| { @@ -546,7 +601,7 @@ impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { } macro_rules! impl_exec_payload_common { - ($wrapper_type:ident, $wrapped_type_full:ident, $wrapped_header_type:ident, $wrapped_field:ident, $fork_variant:ident, $block_type_variant:ident, $f:block) => { + ($wrapper_type:ident, $wrapped_type_full:ident, $wrapped_header_type:ident, $wrapped_field:ident, $fork_variant:ident, $block_type_variant:ident, $f:block, $g:block) => { impl ExecPayload for $wrapper_type { fn block_type() -> BlockType { BlockType::$block_type_variant @@ -594,6 +649,12 @@ macro_rules! impl_exec_payload_common { let f = $f; f(self) } + + #[cfg(feature = "withdrawals")] + fn withdrawals_root(&self) -> Result { + let g = $g; + g(self) + } } impl From<$wrapped_type_full> for $wrapper_type { @@ -615,7 +676,15 @@ macro_rules! impl_exec_payload_for_fork { execution_payload_header, $fork_variant, Blinded, - { |_| { None } } + { |_| { None } }, + { + let c: for<'a> fn(&'a $wrapper_type_header) -> Result = + |payload: &$wrapper_type_header| { + let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); + wrapper_ref_type.withdrawals_root() + }; + c + } ); impl TryInto<$wrapper_type_header> for BlindedPayload { @@ -680,6 +749,14 @@ macro_rules! impl_exec_payload_for_fork { let c: for<'a> fn(&'a $wrapper_type_full) -> Option<&'a Transactions> = |payload: &$wrapper_type_full| Some(&payload.execution_payload.transactions); c + }, + { + let c: for<'a> fn(&'a $wrapper_type_full) -> Result = + |payload: &$wrapper_type_full| { + let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); + wrapper_ref_type.withdrawals_root() + }; + c } ); diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 70cc4c112..2a8398f83 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -341,6 +341,8 @@ impl SignedBeaconBlockCapella> { voluntary_exits, sync_aggregate, execution_payload: BlindedPayloadCapella { .. }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, }, }, signature, @@ -362,6 +364,8 @@ impl SignedBeaconBlockCapella> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadCapella { execution_payload }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, }, }, signature, @@ -393,6 +397,8 @@ impl SignedBeaconBlockEip4844> { voluntary_exits, sync_aggregate, execution_payload: BlindedPayloadEip4844 { .. }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, blob_kzg_commitments, }, }, @@ -415,6 +421,8 @@ impl SignedBeaconBlockEip4844> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadEip4844 { execution_payload }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, blob_kzg_commitments, }, }, diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/signed_bls_to_execution_change.rs new file mode 100644 index 000000000..fc636bb82 --- /dev/null +++ b/consensus/types/src/signed_bls_to_execution_change.rs @@ -0,0 +1,26 @@ +use crate::test_utils::TestRandom; +use crate::*; +use bls::Signature; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +/// A deposit to potentially become a beacon chain validator. +/// +/// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] +pub struct SignedBlsToExecutionChange { + pub message: BlsToExecutionChange, + pub signature: Signature, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(SignedBlsToExecutionChange); +} diff --git a/consensus/types/src/tree_hash_impls.rs b/consensus/types/src/tree_hash_impls.rs index ec23927d3..34043c0e8 100644 --- a/consensus/types/src/tree_hash_impls.rs +++ b/consensus/types/src/tree_hash_impls.rs @@ -17,7 +17,7 @@ impl CachedTreeHash for Validator { /// Efficiently tree hash a `Validator`, assuming it was updated by a valid state transition. /// - /// Specifically, we assume that the `pubkey` and `withdrawal_credentials` fields are constant. + /// Specifically, we assume that the `pubkey` field is constant. fn recalculate_tree_hash_root( &self, arena: &mut CacheArena, @@ -29,8 +29,8 @@ impl CachedTreeHash for Validator { .iter_mut(arena)? .enumerate() .flat_map(|(i, leaf)| { - // Fields pubkey and withdrawal_credentials are constant - if (i == 0 || i == 1) && cache.initialized { + // Pubkey field (index 0) is constant. + if i == 0 && cache.initialized { None } else if process_field_by_index(self, i, leaf, !cache.initialized) { Some(i) diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 6e63c943a..e4497c809 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,5 +1,6 @@ use crate::{ - test_utils::TestRandom, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, + test_utils::TestRandom, Address, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, + PublicKeyBytes, }; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -75,6 +76,28 @@ impl Validator { .unwrap_or(false) } + /// Get the eth1 withdrawal address if this validator has one initialized. + pub fn get_eth1_withdrawal_address(&self, spec: &ChainSpec) -> Option
{ + self.has_eth1_withdrawal_credential(spec) + .then(|| { + self.withdrawal_credentials + .as_bytes() + .get(12..) + .map(Address::from_slice) + }) + .flatten() + } + + /// Changes withdrawal credentials to the provided eth1 execution address + /// + /// WARNING: this function does NO VALIDATION - it just does it! + pub fn change_withdrawal_credentials(&mut self, execution_address: &Address, spec: &ChainSpec) { + let mut bytes = [0u8; 32]; + bytes[0] = spec.eth1_address_withdrawal_prefix_byte; + bytes[12..].copy_from_slice(execution_address.as_bytes()); + self.withdrawal_credentials = Hash256::from(bytes); + } + /// Returns `true` if the validator is fully withdrawable at some epoch pub fn is_fully_withdrawable_at(&self, balance: u64, epoch: Epoch, spec: &ChainSpec) -> bool { self.has_eth1_withdrawal_credential(spec) && self.withdrawable_epoch <= epoch && balance > 0 diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index e05ef0b06..5dd22de8d 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.2.0 +TESTS_TAG := f5c7cf78 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 26a05715b..b0e16e12c 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -289,8 +289,9 @@ impl> Case for EpochProcessing { && T::name() != "participation_flag_updates" } // No phase0 tests for Altair and later. - ForkName::Altair | ForkName::Merge => T::name() != "participation_record_updates", - ForkName::Capella => false, // TODO: revisit when tests are out + ForkName::Altair | ForkName::Merge | ForkName::Capella => { + T::name() != "participation_record_updates" + } ForkName::Eip4844 => false, // TODO: revisit when tests are out } } diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index bcc76b855..f79e13005 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -3,7 +3,7 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::cases::common::previous_fork; use crate::decode::{ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; -use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; +use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella}; use types::{BeaconState, ForkName}; #[derive(Debug, Clone, Default, Deserialize)] @@ -61,8 +61,8 @@ impl Case for ForkTest { ForkName::Base => panic!("phase0 not supported"), ForkName::Altair => upgrade_to_altair(&mut result_state, spec).map(|_| result_state), ForkName::Merge => upgrade_to_bellatrix(&mut result_state, spec).map(|_| result_state), + ForkName::Capella => upgrade_to_capella(&mut result_state, spec).map(|_| result_state), ForkName::Eip4844 => panic!("eip4844 not supported"), - ForkName::Capella => panic!("capella not supported"), }; compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/testing/ef_tests/src/cases/genesis_initialization.rs b/testing/ef_tests/src/cases/genesis_initialization.rs index d447fbd8f..dbf6c70b2 100644 --- a/testing/ef_tests/src/cases/genesis_initialization.rs +++ b/testing/ef_tests/src/cases/genesis_initialization.rs @@ -1,13 +1,10 @@ use super::*; use crate::case_result::compare_beacon_state_results_without_caches; -use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; +use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::initialize_beacon_state_from_eth1; use std::path::PathBuf; -use types::{ - BeaconState, Deposit, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderMerge, ForkName, - Hash256, -}; +use types::{BeaconState, Deposit, EthSpec, ExecutionPayloadHeader, ForkName, Hash256}; #[derive(Debug, Clone, Deserialize)] struct Metadata { @@ -41,14 +38,10 @@ impl LoadCase for GenesisInitialization { let meta: Metadata = yaml_decode_file(&path.join("meta.yaml"))?; let execution_payload_header: Option> = if meta.execution_payload_header.unwrap_or(false) { - //FIXME(sean) we could decode based on timestamp - we probably don't do decode a payload - // without a block this elsewhere at presetn. But when we support SSZ in the builder api we may need to. - // Although that API should include fork info. Hardcoding this for now - Some(ExecutionPayloadHeader::Merge(ssz_decode_file::< - ExecutionPayloadHeaderMerge, - >( + Some(ssz_decode_file_with( &path.join("execution_payload_header.ssz_snappy"), - )?)) + |bytes| ExecutionPayloadHeader::from_ssz_bytes(bytes, fork_name), + )?) } else { None }; diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index e3dfb7f67..9e3562bc7 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -3,8 +3,11 @@ use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; -use crate::type_name::TypeName; use serde_derive::Deserialize; +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +use state_processing::per_block_processing::process_operations::{ + process_bls_to_execution_changes, process_bls_to_execution_changes, +}; use state_processing::{ per_block_processing::{ errors::BlockProcessingError, @@ -21,7 +24,7 @@ use std::fmt::Debug; use std::path::Path; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, - EthSpec, ExecutionPayload, ExecutionPayloadMerge, ForkName, FullPayload, ProposerSlashing, + EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, SyncAggregate, }; @@ -36,6 +39,12 @@ struct ExecutionMetadata { execution_valid: bool, } +/// Newtype for testing withdrawals. +#[derive(Debug, Clone, Deserialize)] +pub struct WithdrawalsPayload { + payload: FullPayload, +} + #[derive(Debug, Clone)] pub struct Operations> { metadata: Metadata, @@ -45,10 +54,8 @@ pub struct Operations> { pub post: Option>, } -pub trait Operation: TypeName + Debug + Sync + Sized { - fn handler_name() -> String { - Self::name().to_lowercase() - } +pub trait Operation: Debug + Sync + Sized { + fn handler_name() -> String; fn filename() -> String { format!("{}.ssz_snappy", Self::handler_name()) @@ -58,7 +65,7 @@ pub trait Operation: TypeName + Debug + Sync + Sized { true } - fn decode(path: &Path, spec: &ChainSpec) -> Result; + fn decode(path: &Path, fork_name: ForkName, spec: &ChainSpec) -> Result; fn apply_to( &self, @@ -69,7 +76,11 @@ pub trait Operation: TypeName + Debug + Sync + Sized { } impl Operation for Attestation { - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn handler_name() -> String { + "attestation".into() + } + + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -109,7 +120,7 @@ impl Operation for AttesterSlashing { "attester_slashing".into() } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -131,7 +142,11 @@ impl Operation for AttesterSlashing { } impl Operation for Deposit { - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn handler_name() -> String { + "deposit".into() + } + + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -155,7 +170,7 @@ impl Operation for ProposerSlashing { "proposer_slashing".into() } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -181,7 +196,7 @@ impl Operation for SignedVoluntaryExit { "voluntary_exit".into() } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -204,7 +219,7 @@ impl Operation for BeaconBlock { "block.ssz_snappy".into() } - fn decode(path: &Path, spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, spec: &ChainSpec) -> Result { ssz_decode_file_with(path, |bytes| BeaconBlock::from_ssz_bytes(bytes, spec)) } @@ -239,7 +254,7 @@ impl Operation for SyncAggregate { fork_name != ForkName::Base } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -267,13 +282,11 @@ impl Operation for FullPayload { fork_name != ForkName::Base && fork_name != ForkName::Altair } - //FIXME(sean) we could decode based on timestamp - we probably don't do decode a payload - // without a block this elsewhere at presetn. But when we support SSZ in the builder api we may need to. - // Although that API should include fork info. Hardcoding this for now - fn decode(path: &Path, _spec: &ChainSpec) -> Result { - ssz_decode_file::>(path) - .map(ExecutionPayload::Merge) - .map(Into::into) + fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file_with(path, |bytes| { + ExecutionPayload::from_ssz_bytes(bytes, fork_name) + }) + .map(Into::into) } fn apply_to( @@ -306,13 +319,11 @@ impl Operation for BlindedPayload { fork_name != ForkName::Base && fork_name != ForkName::Altair } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { - //FIXME(sean) we could decode based on timestamp - we probably don't do decode a payload - // without a block this elsewhere at presetn. But when we support SSZ in the builder api we may need to. - // Although that API should include fork info. Hardcoding this for now - let payload: Result, Error> = - ssz_decode_file::>(path).map(Into::into); - payload.map(Into::into) + fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file_with(path, |bytes| { + ExecutionPayload::from_ssz_bytes(bytes, fork_name) + }) + .map(Into::into) } fn apply_to( @@ -333,6 +344,67 @@ impl Operation for BlindedPayload { } } +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +impl Operation for WithdrawalsPayload { + fn handler_name() -> String { + "withdrawals".into() + } + + fn filename() -> String { + "execution_payload.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name != ForkName::Base && fork_name != ForkName::Altair && fork_name != ForkName::Merge + } + + fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file_with(path, |bytes| { + ExecutionPayload::from_ssz_bytes(bytes, fork_name) + }) + .map(|payload| WithdrawalsPayload { + payload: payload.into(), + }) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + _: &Operations, + ) -> Result<(), BlockProcessingError> { + process_withdrawals::<_, FullPayload<_>>(state, self.payload.to_ref(), spec) + } +} + +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +impl Operation for SignedBlsToExecutionChange { + fn handler_name() -> String { + "bls_to_execution_change".into() + } + + fn filename() -> String { + "address_change.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name != ForkName::Base && fork_name != ForkName::Altair && fork_name != ForkName::Merge + } + + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file(path) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + _extra: &Operations, + ) -> Result<(), BlockProcessingError> { + process_bls_to_execution_changes(state, &[self.clone()], VerifySignatures::True, spec) + } +} + impl> LoadCase for Operations { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); @@ -356,7 +428,7 @@ impl> LoadCase for Operations { // Check BLS setting here before SSZ deserialization, as most types require signatures // to be valid. let (operation, bls_error) = if metadata.bls_setting.unwrap_or_default().check().is_ok() { - match O::decode(&path.join(O::filename()), spec) { + match O::decode(&path.join(O::filename()), fork_name, spec) { Ok(op) => (Some(op), None), Err(Error::InvalidBLSInput(error)) => (None, Some(error)), Err(e) => return Err(e), @@ -399,9 +471,11 @@ impl> Case for Operations { let mut expected = self.post.clone(); // Processing requires the committee caches. - state - .build_all_committee_caches(spec) - .expect("committee caches OK"); + // NOTE: some of the withdrawals tests have 0 active validators, do not try + // to build the commitee cache in this case. + if O::handler_name() != "withdrawals" { + state.build_all_committee_caches(spec).unwrap(); + } let mut result = self .operation diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index 469285ab0..fb7ccfea6 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -42,14 +42,17 @@ impl LoadCase for TransitionTest { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(metadata.fork_epoch); } - ForkName::Eip4844 => { - spec.bellatrix_fork_epoch = Some(Epoch::new(0)); - spec.eip4844_fork_epoch = Some(metadata.fork_epoch); - } ForkName::Capella => { - spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec.capella_fork_epoch = Some(metadata.fork_epoch); } + ForkName::Eip4844 => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.eip4844_fork_epoch = Some(metadata.fork_epoch); + } } // Load blocks diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index dd5ed82da..ed376af44 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -24,6 +24,11 @@ pub trait Handler { fn run(&self) { for fork_name in ForkName::list_all() { + // FIXME(eip4844): enable eip4844 + if fork_name == ForkName::Eip4844 { + continue; + } + if self.is_enabled_for_fork(fork_name) { self.run_for_fork(fork_name) } @@ -218,6 +223,10 @@ impl SszStaticHandler { Self::for_forks(vec![ForkName::Merge]) } + pub fn capella_only() -> Self { + Self::for_forks(vec![ForkName::Capella]) + } + pub fn merge_and_later() -> Self { Self::for_forks(ForkName::list_all()[2..].to_vec()) } @@ -533,10 +542,8 @@ impl Handler for ForkChoiceHandler { } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { - // Merge block tests are only enabled for Bellatrix or later. - if self.handler_name == "on_merge_block" - && (fork_name == ForkName::Base || fork_name == ForkName::Altair) - { + // Merge block tests are only enabled for Bellatrix. + if self.handler_name == "on_merge_block" && fork_name != ForkName::Merge { return false; } diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index 5c2ca3fb5..d45b1e15c 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,10 +1,9 @@ pub use case_result::CaseResult; -pub use cases::Case; pub use cases::{ - EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates, + Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates, JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates, RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, - SyncCommitteeUpdates, + SyncCommitteeUpdates, WithdrawalsPayload, }; pub use decode::log_file_access; pub use error::Error; diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index c075e89b3..bee2d9b03 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -45,6 +45,8 @@ type_name_generic!(BeaconBlockBody); type_name_generic!(BeaconBlockBodyBase, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyAltair, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyMerge, "BeaconBlockBody"); +type_name_generic!(BeaconBlockBodyCapella, "BeaconBlockBody"); +type_name_generic!(BeaconBlockBodyEip4844, "BeaconBlockBody"); type_name!(BeaconBlockHeader); type_name_generic!(BeaconState); type_name!(Checkpoint); @@ -54,8 +56,14 @@ type_name!(DepositData); type_name!(DepositMessage); type_name!(Eth1Data); type_name_generic!(ExecutionPayload); +type_name_generic!(ExecutionPayloadMerge, "ExecutionPayload"); +type_name_generic!(ExecutionPayloadCapella, "ExecutionPayload"); +type_name_generic!(ExecutionPayloadEip4844, "ExecutionPayload"); type_name_generic!(FullPayload, "ExecutionPayload"); type_name_generic!(ExecutionPayloadHeader); +type_name_generic!(ExecutionPayloadHeaderMerge, "ExecutionPayloadHeader"); +type_name_generic!(ExecutionPayloadHeaderCapella, "ExecutionPayloadHeader"); +type_name_generic!(ExecutionPayloadHeaderEip4844, "ExecutionPayloadHeader"); type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); type_name!(ForkData); @@ -76,3 +84,6 @@ type_name_generic!(SyncAggregate); type_name_generic!(SyncCommittee); type_name!(Validator); type_name!(VoluntaryExit); +type_name!(Withdrawal); +type_name!(BlsToExecutionChange, "BLSToExecutionChange"); +type_name!(SignedBlsToExecutionChange, "SignedBLSToExecutionChange"); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 28c57028c..338a56b9f 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -82,6 +82,18 @@ fn operations_execution_payload_blinded() { OperationsHandler::>::default().run(); } +#[test] +fn operations_withdrawals() { + OperationsHandler::>::default().run(); + OperationsHandler::>::default().run(); +} + +#[test] +fn operations_bls_to_execution_change() { + OperationsHandler::::default().run(); + OperationsHandler::::default().run(); +} + #[test] fn sanity_blocks() { SanityBlocksHandler::::default().run(); @@ -250,6 +262,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::merge_only() .run(); + SszStaticHandler::, MinimalEthSpec>::capella_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::capella_only() + .run(); } // Altair and later @@ -302,18 +318,44 @@ mod ssz_static { // Merge and later #[test] fn execution_payload() { - SszStaticHandler::, MinimalEthSpec>::merge_and_later() + SszStaticHandler::, MinimalEthSpec>::merge_only() .run(); - SszStaticHandler::, MainnetEthSpec>::merge_and_later() + SszStaticHandler::, MainnetEthSpec>::merge_only() + .run(); + SszStaticHandler::, MinimalEthSpec>::capella_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::capella_only() .run(); } #[test] fn execution_payload_header() { - SszStaticHandler::, MinimalEthSpec>::merge_and_later() + SszStaticHandler::, MinimalEthSpec>::merge_only() .run(); - SszStaticHandler::, MainnetEthSpec>::merge_and_later() + SszStaticHandler::, MainnetEthSpec>::merge_only() .run(); + SszStaticHandler::, MinimalEthSpec> + ::capella_only().run(); + SszStaticHandler::, MainnetEthSpec> + ::capella_only().run(); + } + + #[test] + fn withdrawal() { + SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_only().run(); + } + + #[test] + fn bls_to_execution_change() { + SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_only().run(); + } + + #[test] + fn signed_bls_to_execution_change() { + SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_only().run(); } } diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index a85138be9..b5923aafe 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -21,3 +21,7 @@ deposit_contract = { path = "../../common/deposit_contract" } reqwest = { version = "0.11.0", features = ["json"] } hex = "0.4.2" fork_choice = { path = "../../consensus/fork_choice" } + +[features] +default = [] +withdrawals = [] \ No newline at end of file diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index b3464ec98..9ef96687a 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -16,8 +16,8 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; use tokio::time::sleep; use types::{ - Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, - MainnetEthSpec, PublicKeyBytes, Slot, Uint256, + Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ForkName, FullPayload, + Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, }; const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(20); @@ -326,6 +326,10 @@ impl TestRig { proposer_index, forkchoice_update_params, builder_params, + // FIXME: think about how to test other forks + ForkName::Merge, + #[cfg(feature = "withdrawals")] + None, &self.spec, ) .await @@ -450,6 +454,10 @@ impl TestRig { proposer_index, forkchoice_update_params, builder_params, + // FIXME: think about how to test other forks + ForkName::Merge, + #[cfg(feature = "withdrawals")] + None, &self.spec, ) .await