Clean capella (#4019)

## Issue Addressed

Cleans up all the remnants of 4844 in capella. This makes sure when 4844 is reviewed there is nothing we are missing because it got included here 

## Proposed Changes

drop a bomb on every 4844 thing 

## Additional Info

Merge process I did (locally) is as follows:
- squash merge to produce one commit
- in new branch off unstable with the squashed commit create a `git revert HEAD` commit
- merge that new branch onto 4844 with `--strategy ours`
- compare local 4844 to remote 4844 and make sure the diff is empty
- enjoy

Co-authored-by: Paul Hauner <paul@paulhauner.com>
This commit is contained in:
Divma 2023-03-01 03:19:02 +00:00
parent 17d9a620cf
commit 047c7544e3
96 changed files with 199 additions and 2307 deletions

11
Cargo.lock generated
View File

@ -6784,16 +6784,6 @@ dependencies = [
"serde_derive",
]
[[package]]
name = "serde-big-array"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18b20e7752957bbe9661cff4e0bb04d183d0948cdab2ea58cdb9df36a61dfe62"
dependencies = [
"serde",
"serde_derive",
]
[[package]]
name = "serde_array_query"
version = "0.1.0"
@ -8301,7 +8291,6 @@ dependencies = [
"rusqlite",
"safe_arith",
"serde",
"serde-big-array",
"serde_derive",
"serde_json",
"serde_with",

View File

@ -4106,9 +4106,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
None
};
//FIXME(sean) waiting for the BN<>EE api for this to stabilize
let kzg_commitments = vec![];
// Part 3/3 (blocking)
//
// Perform the final steps of combining all the parts and computing the state root.
@ -4119,7 +4116,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
chain.complete_partial_beacon_block(
partial_beacon_block,
block_contents,
kzg_commitments,
verification,
)
},
@ -4190,7 +4186,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// allows it to run concurrently with things like attestation packing.
let prepare_payload_handle = match &state {
BeaconState::Base(_) | BeaconState::Altair(_) => None,
BeaconState::Merge(_) | BeaconState::Capella(_) | BeaconState::Eip4844(_) => {
BeaconState::Merge(_) | BeaconState::Capella(_) => {
let prepare_payload_handle =
get_execution_payload(self.clone(), &state, proposer_index, builder_params)?;
Some(prepare_payload_handle)
@ -4373,7 +4369,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
&self,
partial_beacon_block: PartialBeaconBlock<T::EthSpec, Payload>,
block_contents: Option<BlockProposalContents<T::EthSpec, Payload>>,
kzg_commitments: Vec<KzgCommitment>,
verification: ProduceBlockVerification,
) -> Result<BeaconBlockAndState<T::EthSpec, Payload>, BlockProductionError> {
let PartialBeaconBlock {
@ -4481,31 +4476,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
bls_to_execution_changes: bls_to_execution_changes.into(),
},
}),
BeaconState::Eip4844(_) => BeaconBlock::Eip4844(BeaconBlockEip4844 {
slot,
proposer_index,
parent_root,
state_root: Hash256::zero(),
body: BeaconBlockBodyEip4844 {
randao_reveal,
eth1_data,
graffiti,
proposer_slashings: proposer_slashings.into(),
attester_slashings: attester_slashings.into(),
attestations: attestations.into(),
deposits: deposits.into(),
voluntary_exits: voluntary_exits.into(),
sync_aggregate: sync_aggregate
.ok_or(BlockProductionError::MissingSyncAggregate)?,
execution_payload: block_contents
.ok_or(BlockProductionError::MissingExecutionPayload)?
.to_payload()
.try_into()
.map_err(|_| BlockProductionError::InvalidPayloadFork)?,
bls_to_execution_changes: bls_to_execution_changes.into(),
blob_kzg_commitments: VariableList::from(kzg_commitments),
},
}),
};
let block = SignedBeaconBlock::from_block(
@ -4760,7 +4730,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let withdrawals = match self.spec.fork_name_at_slot::<T::EthSpec>(prepare_slot) {
ForkName::Base | ForkName::Altair | ForkName::Merge => None,
ForkName::Capella | ForkName::Eip4844 => {
ForkName::Capella => {
// We must use the advanced state because balances can change at epoch boundaries
// and balances affect withdrawals.
// FIXME(mark)

View File

@ -1,136 +0,0 @@
use derivative::Derivative;
use slot_clock::SlotClock;
use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY};
use crate::BeaconChainError;
use bls::PublicKey;
use types::{consts::eip4844::BLS_MODULUS, BeaconStateError, BlobsSidecar, Slot};
pub enum BlobError {
/// The blob sidecar is from a slot that is later than the current slot (with respect to the
/// gossip clock disparity).
///
/// ## Peer scoring
///
/// Assuming the local clock is correct, the peer has sent an invalid message.
FutureSlot {
message_slot: Slot,
latest_permissible_slot: Slot,
},
/// The blob sidecar is from a slot that is prior to the earliest permissible slot (with
/// respect to the gossip clock disparity).
///
/// ## Peer scoring
///
/// Assuming the local clock is correct, the peer has sent an invalid message.
PastSlot {
message_slot: Slot,
earliest_permissible_slot: Slot,
},
/// The blob sidecar contains an incorrectly formatted `BLSFieldElement` > `BLS_MODULUS`.
///
///
/// ## Peer scoring
///
/// The peer has sent an invalid message.
BlobOutOfRange { blob_index: usize },
/// The blob sidecar contains a KZGCommitment that is not a valid G1 point on
/// the bls curve.
///
/// ## Peer scoring
///
/// The peer has sent an invalid message.
InvalidKZGCommitment,
/// The proposal signature in invalid.
///
/// ## Peer scoring
///
/// The signature on the blob sidecar invalid and the peer is faulty.
ProposalSignatureInvalid,
/// A blob sidecar for this proposer and slot has already been observed.
///
/// ## Peer scoring
///
/// The `proposer` has already proposed a sidecar at this slot. The existing sidecar may or may not
/// be equal to the given sidecar.
RepeatSidecar { proposer: u64, slot: Slot },
/// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid.
///
/// ## Peer scoring
///
/// We were unable to process this sync committee message due to an internal error. It's unclear if the
/// sync committee message is valid.
BeaconChainError(BeaconChainError),
}
impl From<BeaconChainError> for BlobError {
fn from(e: BeaconChainError) -> Self {
BlobError::BeaconChainError(e)
}
}
impl From<BeaconStateError> for BlobError {
fn from(e: BeaconStateError) -> Self {
BlobError::BeaconChainError(BeaconChainError::BeaconStateError(e))
}
}
/// A wrapper around a `BlobsSidecar` that indicates it has been verified w.r.t the corresponding
/// `SignedBeaconBlock`.
#[derive(Derivative)]
#[derivative(Debug(bound = "T: BeaconChainTypes"))]
pub struct VerifiedBlobsSidecar<'a, T: BeaconChainTypes> {
pub blob_sidecar: &'a BlobsSidecar<T::EthSpec>,
}
impl<'a, T: BeaconChainTypes> VerifiedBlobsSidecar<'a, T> {
pub fn verify(
blob_sidecar: &'a BlobsSidecar<T::EthSpec>,
chain: &BeaconChain<T>,
) -> Result<Self, BlobError> {
let blob_slot = blob_sidecar.beacon_block_slot;
// Do not gossip or process blobs from future or past slots.
let latest_permissible_slot = chain
.slot_clock
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
.ok_or(BeaconChainError::UnableToReadSlot)?;
if blob_slot > latest_permissible_slot {
return Err(BlobError::FutureSlot {
message_slot: latest_permissible_slot,
latest_permissible_slot: blob_slot,
});
}
let earliest_permissible_slot = chain
.slot_clock
.now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
.ok_or(BeaconChainError::UnableToReadSlot)?;
if blob_slot > earliest_permissible_slot {
return Err(BlobError::PastSlot {
message_slot: earliest_permissible_slot,
earliest_permissible_slot: blob_slot,
});
}
// Verify that blobs are properly formatted
//TODO: add the check while constructing a Blob type from bytes instead of after
for (i, blob) in blob_sidecar.blobs.iter().enumerate() {
if blob.iter().any(|b| *b >= *BLS_MODULUS) {
return Err(BlobError::BlobOutOfRange { blob_index: i });
}
}
// Verify that the KZG proof is a valid G1 point
if PublicKey::deserialize(&blob_sidecar.kzg_aggregate_proof.0).is_err() {
return Err(BlobError::InvalidKZGCommitment);
}
// TODO: Check that we have not already received a sidecar with a valid signature for this slot.
Ok(Self { blob_sidecar })
}
}

View File

@ -258,11 +258,6 @@ pub enum BlockProductionError {
BlockingFailed(execution_layer::Error),
TerminalPoWBlockLookupFailed(execution_layer::Error),
GetPayloadFailed(execution_layer::Error),
GetBlobsFailed(execution_layer::Error),
BlobPayloadMismatch {
blob_block_hash: ExecutionBlockHash,
payload_block_hash: ExecutionBlockHash,
},
FailedToReadFinalizedBlock(store::Error),
MissingFinalizedBlock(Hash256),
BlockTooLarge(usize),

View File

@ -406,9 +406,7 @@ pub fn get_execution_payload<
let latest_execution_payload_header_block_hash =
state.latest_execution_payload_header()?.block_hash();
let withdrawals = match state {
&BeaconState::Capella(_) | &BeaconState::Eip4844(_) => {
Some(get_expected_withdrawals(state, spec)?.into())
}
&BeaconState::Capella(_) => Some(get_expected_withdrawals(state, spec)?.into()),
&BeaconState::Merge(_) => None,
// These shouldn't happen but they're here to make the pattern irrefutable
&BeaconState::Base(_) | &BeaconState::Altair(_) => None,

View File

@ -7,7 +7,6 @@ mod beacon_chain;
mod beacon_fork_choice_store;
pub mod beacon_proposer_cache;
mod beacon_snapshot;
pub mod blob_verification;
pub mod block_reward;
mod block_times_cache;
mod block_verification;

View File

@ -972,22 +972,6 @@ lazy_static! {
"beacon_pre_finalization_block_lookup_count",
"Number of block roots subject to single block lookups"
);
/*
* Blob sidecar Verification
*/
pub static ref BLOBS_SIDECAR_PROCESSING_REQUESTS: Result<IntCounter> = try_create_int_counter(
"beacon_blobs_sidecar_processing_requests_total",
"Count of all blob sidecars submitted for processing"
);
pub static ref BLOBS_SIDECAR_PROCESSING_SUCCESSES: Result<IntCounter> = try_create_int_counter(
"beacon_blobs_sidecar_processing_successes_total",
"Number of blob sidecars verified for gossip"
);
pub static ref BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES: Result<Histogram> = try_create_histogram(
"beacon_blobs_sidecar_gossip_verification_seconds",
"Full runtime of blob sidecars gossip verification"
);
}
// Fifth lazy-static block is used to account for macro recursion limit.

View File

@ -420,10 +420,6 @@ where
spec.capella_fork_epoch.map(|epoch| {
genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
});
mock.server.execution_block_generator().eip4844_time =
spec.eip4844_fork_epoch.map(|epoch| {
genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
});
self
}
@ -433,14 +429,10 @@ where
let shanghai_time = spec.capella_fork_epoch.map(|epoch| {
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
});
let eip4844_time = spec.eip4844_fork_epoch.map(|epoch| {
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
});
let mock = MockExecutionLayer::new(
self.runtime.task_executor.clone(),
DEFAULT_TERMINAL_BLOCK,
shanghai_time,
eip4844_time,
None,
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
spec,
@ -464,14 +456,10 @@ where
let shanghai_time = spec.capella_fork_epoch.map(|epoch| {
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
});
let eip4844_time = spec.eip4844_fork_epoch.map(|epoch| {
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
});
let mock_el = MockExecutionLayer::new(
self.runtime.task_executor.clone(),
DEFAULT_TERMINAL_BLOCK,
shanghai_time,
eip4844_time,
builder_threshold,
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
spec.clone(),

View File

@ -17,7 +17,7 @@ pub use types::{
Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader,
ExecutionPayloadRef, FixedVector, ForkName, Hash256, Uint256, VariableList, Withdrawal,
};
use types::{ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge};
use types::{ExecutionPayloadCapella, ExecutionPayloadMerge};
pub mod auth;
pub mod http;
@ -134,7 +134,7 @@ pub struct ExecutionBlock {
/// Representation of an execution block with enough detail to reconstruct a payload.
#[superstruct(
variants(Merge, Capella, Eip4844),
variants(Merge, Capella),
variant_attributes(
derive(Clone, Debug, PartialEq, Serialize, Deserialize,),
serde(bound = "T: EthSpec", rename_all = "camelCase"),
@ -165,13 +165,10 @@ pub struct ExecutionBlockWithTransactions<T: EthSpec> {
#[serde(with = "ssz_types::serde_utils::hex_var_list")]
pub extra_data: VariableList<u8, T::MaxExtraDataBytes>,
pub base_fee_per_gas: Uint256,
#[superstruct(only(Eip4844))]
#[serde(with = "eth2_serde_utils::u256_hex_be")]
pub excess_data_gas: Uint256,
#[serde(rename = "hash")]
pub block_hash: ExecutionBlockHash,
pub transactions: Vec<Transaction>,
#[superstruct(only(Capella, Eip4844))]
#[superstruct(only(Capella))]
pub withdrawals: Vec<JsonWithdrawal>,
}
@ -226,33 +223,6 @@ impl<T: EthSpec> TryFrom<ExecutionPayload<T>> for ExecutionBlockWithTransactions
.collect(),
})
}
ExecutionPayload::Eip4844(block) => {
Self::Eip4844(ExecutionBlockWithTransactionsEip4844 {
parent_hash: block.parent_hash,
fee_recipient: block.fee_recipient,
state_root: block.state_root,
receipts_root: block.receipts_root,
logs_bloom: block.logs_bloom,
prev_randao: block.prev_randao,
block_number: block.block_number,
gas_limit: block.gas_limit,
gas_used: block.gas_used,
timestamp: block.timestamp,
extra_data: block.extra_data,
base_fee_per_gas: block.base_fee_per_gas,
excess_data_gas: block.excess_data_gas,
block_hash: block.block_hash,
transactions: block
.transactions
.iter()
.map(|tx| Transaction::decode(&Rlp::new(tx)))
.collect::<Result<Vec<_>, _>>()?,
withdrawals: Vec::from(block.withdrawals)
.into_iter()
.map(|withdrawal| withdrawal.into())
.collect(),
})
}
};
Ok(json_payload)
}
@ -320,7 +290,7 @@ pub struct ProposeBlindedBlockResponse {
}
#[superstruct(
variants(Merge, Capella, Eip4844),
variants(Merge, Capella),
variant_attributes(derive(Clone, Debug, PartialEq),),
map_into(ExecutionPayload),
map_ref_into(ExecutionPayloadRef),
@ -333,8 +303,6 @@ pub struct GetPayloadResponse<T: EthSpec> {
pub execution_payload: ExecutionPayloadMerge<T>,
#[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))]
pub execution_payload: ExecutionPayloadCapella<T>,
#[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))]
pub execution_payload: ExecutionPayloadEip4844<T>,
pub block_value: Uint256,
}
@ -365,10 +333,6 @@ impl<T: EthSpec> From<GetPayloadResponse<T>> for (ExecutionPayload<T>, Uint256)
ExecutionPayload::Capella(inner.execution_payload),
inner.block_value,
),
GetPayloadResponse::Eip4844(inner) => (
ExecutionPayload::Eip4844(inner.execution_payload),
inner.block_value,
),
}
}
}

View File

@ -38,9 +38,6 @@ pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1";
pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2";
pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2);
pub const ENGINE_GET_BLOBS_BUNDLE_V1: &str = "engine_getBlobsBundleV1";
pub const ENGINE_GET_BLOBS_BUNDLE_TIMEOUT: Duration = Duration::from_secs(2);
pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1";
pub const ENGINE_FORKCHOICE_UPDATED_V2: &str = "engine_forkchoiceUpdatedV2";
pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8);
@ -742,14 +739,6 @@ impl HttpJsonRpc {
)
.await?,
),
ForkName::Eip4844 => ExecutionBlockWithTransactions::Eip4844(
self.rpc_request(
ETH_GET_BLOCK_BY_HASH,
params,
ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier,
)
.await?,
),
ForkName::Base | ForkName::Altair => {
return Err(Error::UnsupportedForkVariant(format!(
"called get_block_by_hash_with_txns with fork {:?}",
@ -844,29 +833,13 @@ impl HttpJsonRpc {
.await?;
Ok(JsonGetPayloadResponse::V2(response).into())
}
ForkName::Base | ForkName::Altair | ForkName::Eip4844 => Err(
Error::UnsupportedForkVariant(format!("called get_payload_v2 with {}", fork_name)),
),
ForkName::Base | ForkName::Altair => Err(Error::UnsupportedForkVariant(format!(
"called get_payload_v2 with {}",
fork_name
))),
}
}
pub async fn get_blobs_bundle_v1<T: EthSpec>(
&self,
payload_id: PayloadId,
) -> Result<JsonBlobBundles<T>, Error> {
let params = json!([JsonPayloadIdRequest::from(payload_id)]);
let response: JsonBlobBundles<T> = self
.rpc_request(
ENGINE_GET_BLOBS_BUNDLE_V1,
params,
ENGINE_GET_BLOBS_BUNDLE_TIMEOUT,
)
.await?;
Ok(response)
}
pub async fn forkchoice_updated_v1(
&self,
forkchoice_state: ForkchoiceState,

View File

@ -3,12 +3,9 @@ use serde::{Deserialize, Serialize};
use strum::EnumString;
use superstruct::superstruct;
use types::{
Blob, EthSpec, ExecutionBlockHash, FixedVector, KzgCommitment, Transaction, Unsigned,
VariableList, Withdrawal,
};
use types::{
ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge,
EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList, Withdrawal,
};
use types::{ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge};
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
@ -64,7 +61,7 @@ pub struct JsonPayloadIdResponse {
}
#[superstruct(
variants(V1, V2, V3),
variants(V1, V2),
variant_attributes(
derive(Debug, PartialEq, Default, Serialize, Deserialize,),
serde(bound = "T: EthSpec", rename_all = "camelCase"),
@ -94,14 +91,11 @@ pub struct JsonExecutionPayload<T: EthSpec> {
pub extra_data: VariableList<u8, T::MaxExtraDataBytes>,
#[serde(with = "eth2_serde_utils::u256_hex_be")]
pub base_fee_per_gas: Uint256,
#[superstruct(only(V3))]
#[serde(with = "eth2_serde_utils::u256_hex_be")]
pub excess_data_gas: Uint256,
pub block_hash: ExecutionBlockHash,
#[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")]
pub transactions:
VariableList<Transaction<T::MaxBytesPerTransaction>, T::MaxTransactionsPerPayload>,
#[superstruct(only(V2, V3))]
#[superstruct(only(V2))]
pub withdrawals: VariableList<JsonWithdrawal, T::MaxWithdrawalsPerPayload>,
}
@ -151,40 +145,12 @@ impl<T: EthSpec> From<ExecutionPayloadCapella<T>> for JsonExecutionPayloadV2<T>
}
}
}
impl<T: EthSpec> From<ExecutionPayloadEip4844<T>> for JsonExecutionPayloadV3<T> {
fn from(payload: ExecutionPayloadEip4844<T>) -> Self {
JsonExecutionPayloadV3 {
parent_hash: payload.parent_hash,
fee_recipient: payload.fee_recipient,
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom,
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,
timestamp: payload.timestamp,
extra_data: payload.extra_data,
base_fee_per_gas: payload.base_fee_per_gas,
excess_data_gas: payload.excess_data_gas,
block_hash: payload.block_hash,
transactions: payload.transactions,
withdrawals: payload
.withdrawals
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
}
}
}
impl<T: EthSpec> From<ExecutionPayload<T>> for JsonExecutionPayload<T> {
fn from(execution_payload: ExecutionPayload<T>) -> Self {
match execution_payload {
ExecutionPayload::Merge(payload) => JsonExecutionPayload::V1(payload.into()),
ExecutionPayload::Capella(payload) => JsonExecutionPayload::V2(payload.into()),
ExecutionPayload::Eip4844(payload) => JsonExecutionPayload::V3(payload.into()),
}
}
}
@ -235,46 +201,18 @@ impl<T: EthSpec> From<JsonExecutionPayloadV2<T>> for ExecutionPayloadCapella<T>
}
}
}
impl<T: EthSpec> From<JsonExecutionPayloadV3<T>> for ExecutionPayloadEip4844<T> {
fn from(payload: JsonExecutionPayloadV3<T>) -> Self {
ExecutionPayloadEip4844 {
parent_hash: payload.parent_hash,
fee_recipient: payload.fee_recipient,
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom,
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,
timestamp: payload.timestamp,
extra_data: payload.extra_data,
base_fee_per_gas: payload.base_fee_per_gas,
excess_data_gas: payload.excess_data_gas,
block_hash: payload.block_hash,
transactions: payload.transactions,
withdrawals: payload
.withdrawals
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
}
}
}
impl<T: EthSpec> From<JsonExecutionPayload<T>> for ExecutionPayload<T> {
fn from(json_execution_payload: JsonExecutionPayload<T>) -> Self {
match json_execution_payload {
JsonExecutionPayload::V1(payload) => ExecutionPayload::Merge(payload.into()),
JsonExecutionPayload::V2(payload) => ExecutionPayload::Capella(payload.into()),
JsonExecutionPayload::V3(payload) => ExecutionPayload::Eip4844(payload.into()),
}
}
}
#[superstruct(
variants(V1, V2, V3),
variants(V1, V2),
variant_attributes(
derive(Debug, PartialEq, Serialize, Deserialize),
serde(bound = "T: EthSpec", rename_all = "camelCase")
@ -289,8 +227,6 @@ pub struct JsonGetPayloadResponse<T: EthSpec> {
pub execution_payload: JsonExecutionPayloadV1<T>,
#[superstruct(only(V2), partial_getter(rename = "execution_payload_v2"))]
pub execution_payload: JsonExecutionPayloadV2<T>,
#[superstruct(only(V3), partial_getter(rename = "execution_payload_v3"))]
pub execution_payload: JsonExecutionPayloadV3<T>,
#[serde(with = "eth2_serde_utils::u256_hex_be")]
pub block_value: Uint256,
}
@ -310,12 +246,6 @@ impl<T: EthSpec> From<JsonGetPayloadResponse<T>> for GetPayloadResponse<T> {
block_value: response.block_value,
})
}
JsonGetPayloadResponse::V3(response) => {
GetPayloadResponse::Eip4844(GetPayloadResponseEip4844 {
execution_payload: response.execution_payload.into(),
block_value: response.block_value,
})
}
}
}
}
@ -410,14 +340,6 @@ impl From<JsonPayloadAttributes> for PayloadAttributes {
}
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(bound = "T: EthSpec", rename_all = "camelCase")]
pub struct JsonBlobBundles<T: EthSpec> {
pub block_hash: ExecutionBlockHash,
pub kzgs: Vec<KzgCommitment>,
pub blobs: Vec<Blob<T>>,
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct JsonForkchoiceStateV1 {

View File

@ -26,6 +26,7 @@ use std::collections::HashMap;
use std::fmt;
use std::future::Future;
use std::io::Write;
use std::marker::PhantomData;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
@ -37,12 +38,11 @@ use tokio::{
};
use tokio_stream::wrappers::WatchStream;
use tree_hash::TreeHash;
use types::{AbstractExecPayload, BeaconStateError, Blob, ExecPayload, KzgCommitment, Withdrawals};
use types::{AbstractExecPayload, BeaconStateError, ExecPayload, Withdrawals};
use types::{
BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ExecutionPayload,
ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, ForkName,
ForkVersionedResponse, ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock,
Slot, Uint256,
ExecutionPayloadCapella, ExecutionPayloadMerge, ForkName, ForkVersionedResponse,
ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256,
};
mod block_hash;
@ -122,12 +122,8 @@ pub enum BlockProposalContents<T: EthSpec, Payload: AbstractExecPayload<T>> {
Payload {
payload: Payload,
block_value: Uint256,
},
PayloadAndBlobs {
payload: Payload,
block_value: Uint256,
kzg_commitments: Vec<KzgCommitment>,
blobs: Vec<Blob<T>>,
// TODO: remove for 4844, since it appears in PayloadAndBlobs
_phantom: PhantomData<T>,
},
}
@ -137,12 +133,7 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Paylo
Self::Payload {
payload,
block_value: _,
} => payload,
Self::PayloadAndBlobs {
payload,
block_value: _,
kzg_commitments: _,
blobs: _,
_phantom: _,
} => payload,
}
}
@ -151,41 +142,8 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Paylo
Self::Payload {
payload,
block_value: _,
_phantom: _,
} => payload,
Self::PayloadAndBlobs {
payload,
block_value: _,
kzg_commitments: _,
blobs: _,
} => payload,
}
}
pub fn kzg_commitments(&self) -> Option<&[KzgCommitment]> {
match self {
Self::Payload {
payload: _,
block_value: _,
} => None,
Self::PayloadAndBlobs {
payload: _,
block_value: _,
kzg_commitments,
blobs: _,
} => Some(kzg_commitments),
}
}
pub fn blobs(&self) -> Option<&[Blob<T>]> {
match self {
Self::Payload {
payload: _,
block_value: _,
} => None,
Self::PayloadAndBlobs {
payload: _,
block_value: _,
kzg_commitments: _,
blobs,
} => Some(blobs),
}
}
pub fn block_value(&self) -> &Uint256 {
@ -193,12 +151,7 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Paylo
Self::Payload {
payload: _,
block_value,
} => block_value,
Self::PayloadAndBlobs {
payload: _,
block_value,
kzg_commitments: _,
blobs: _,
_phantom: _,
} => block_value,
}
}
@ -208,14 +161,9 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Paylo
BlockProposalContents::Payload {
payload: Payload::default_at_fork(fork_name)?,
block_value: Uint256::zero(),
_phantom: PhantomData::default(),
}
}
ForkName::Eip4844 => BlockProposalContents::PayloadAndBlobs {
payload: Payload::default_at_fork(fork_name)?,
block_value: Uint256::zero(),
blobs: vec![],
kzg_commitments: vec![],
},
})
}
}
@ -871,6 +819,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
BlockProposalContents::Payload {
payload: relay.data.message.header,
block_value: relay.data.message.value,
_phantom: PhantomData::default(),
},
)),
Err(reason) if !reason.payload_invalid() => {
@ -925,6 +874,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
BlockProposalContents::Payload {
payload: relay.data.message.header,
block_value: relay.data.message.value,
_phantom: PhantomData::default(),
},
)),
// If the payload is valid then use it. The local EE failed
@ -933,6 +883,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
BlockProposalContents::Payload {
payload: relay.data.message.header,
block_value: relay.data.message.value,
_phantom: PhantomData::default(),
},
)),
Err(reason) => {
@ -1101,24 +1052,6 @@ impl<T: EthSpec> ExecutionLayer<T> {
}
};
let blob_fut = async {
match current_fork {
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
None
}
ForkName::Eip4844 => {
debug!(
self.log(),
"Issuing engine_getBlobsBundle";
"suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(),
"prev_randao" => ?payload_attributes.prev_randao(),
"timestamp" => payload_attributes.timestamp(),
"parent_hash" => ?parent_hash,
);
Some(engine.api.get_blobs_bundle_v1::<T>(payload_id).await)
}
}
};
let payload_fut = async {
debug!(
self.log(),
@ -1130,7 +1063,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
);
engine.api.get_payload::<T>(current_fork, payload_id).await
};
let (blob, payload_response) = tokio::join!(blob_fut, payload_fut);
let payload_response = payload_fut.await;
let (execution_payload, block_value) = payload_response.map(|payload_response| {
if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() {
error!(
@ -1154,20 +1087,11 @@ impl<T: EthSpec> ExecutionLayer<T> {
}
payload_response.into()
})?;
if let Some(blob) = blob.transpose()? {
// FIXME(sean) cache blobs
Ok(BlockProposalContents::PayloadAndBlobs {
payload: execution_payload.into(),
block_value,
blobs: blob.blobs,
kzg_commitments: blob.kzgs,
})
} else {
Ok(BlockProposalContents::Payload {
payload: execution_payload.into(),
block_value,
})
}
Ok(BlockProposalContents::Payload {
payload: execution_payload.into(),
block_value,
_phantom: PhantomData::default(),
})
})
.await
.map_err(Box::new)
@ -1667,7 +1591,6 @@ impl<T: EthSpec> ExecutionLayer<T> {
return match fork {
ForkName::Merge => Ok(Some(ExecutionPayloadMerge::default().into())),
ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())),
ForkName::Eip4844 => Ok(Some(ExecutionPayloadEip4844::default().into())),
ForkName::Base | ForkName::Altair => Err(ApiError::UnsupportedForkVariant(
format!("called get_payload_by_block_hash_from_engine with {}", fork),
)),
@ -1740,34 +1663,6 @@ impl<T: EthSpec> ExecutionLayer<T> {
withdrawals,
})
}
ExecutionBlockWithTransactions::Eip4844(eip4844_block) => {
let withdrawals = VariableList::new(
eip4844_block
.withdrawals
.into_iter()
.map(Into::into)
.collect(),
)
.map_err(ApiError::DeserializeWithdrawals)?;
ExecutionPayload::Eip4844(ExecutionPayloadEip4844 {
parent_hash: eip4844_block.parent_hash,
fee_recipient: eip4844_block.fee_recipient,
state_root: eip4844_block.state_root,
receipts_root: eip4844_block.receipts_root,
logs_bloom: eip4844_block.logs_bloom,
prev_randao: eip4844_block.prev_randao,
block_number: eip4844_block.block_number,
gas_limit: eip4844_block.gas_limit,
gas_used: eip4844_block.gas_used,
timestamp: eip4844_block.timestamp,
extra_data: eip4844_block.extra_data,
base_fee_per_gas: eip4844_block.base_fee_per_gas,
excess_data_gas: eip4844_block.excess_data_gas,
block_hash: eip4844_block.block_hash,
transactions,
withdrawals,
})
}
};
Ok(Some(payload))

View File

@ -13,8 +13,8 @@ use std::collections::HashMap;
use tree_hash::TreeHash;
use tree_hash_derive::TreeHash;
use types::{
EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella,
ExecutionPayloadEip4844, ExecutionPayloadMerge, ForkName, Hash256, Uint256,
EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge,
ForkName, Hash256, Uint256,
};
const GAS_LIMIT: u64 = 16384;
@ -118,7 +118,6 @@ pub struct ExecutionBlockGenerator<T: EthSpec> {
* Post-merge fork triggers
*/
pub shanghai_time: Option<u64>, // withdrawals
pub eip4844_time: Option<u64>, // 4844
}
impl<T: EthSpec> ExecutionBlockGenerator<T> {
@ -127,7 +126,6 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
terminal_block_number: u64,
terminal_block_hash: ExecutionBlockHash,
shanghai_time: Option<u64>,
eip4844_time: Option<u64>,
) -> Self {
let mut gen = Self {
head_block: <_>::default(),
@ -141,7 +139,6 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
next_payload_id: 0,
payload_ids: <_>::default(),
shanghai_time,
eip4844_time,
};
gen.insert_pow_block(0).unwrap();
@ -174,12 +171,9 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
}
pub fn get_fork_at_timestamp(&self, timestamp: u64) -> ForkName {
match self.eip4844_time {
Some(fork_time) if timestamp >= fork_time => ForkName::Eip4844,
_ => match self.shanghai_time {
Some(fork_time) if timestamp >= fork_time => ForkName::Capella,
_ => ForkName::Merge,
},
match self.shanghai_time {
Some(fork_time) if timestamp >= fork_time => ForkName::Capella,
_ => ForkName::Merge,
}
}
@ -490,67 +484,42 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
}),
PayloadAttributes::V2(pa) => {
match self.get_fork_at_timestamp(pa.timestamp) {
ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge {
parent_hash: forkchoice_state.head_block_hash,
fee_recipient: pa.suggested_fee_recipient,
receipts_root: Hash256::repeat_byte(42),
state_root: Hash256::repeat_byte(43),
logs_bloom: vec![0; 256].into(),
prev_randao: pa.prev_randao,
block_number: parent.block_number() + 1,
gas_limit: GAS_LIMIT,
gas_used: GAS_USED,
timestamp: pa.timestamp,
extra_data: "block gen was here".as_bytes().to_vec().into(),
base_fee_per_gas: Uint256::one(),
block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
}),
ForkName::Capella => {
ExecutionPayload::Capella(ExecutionPayloadCapella {
parent_hash: forkchoice_state.head_block_hash,
fee_recipient: pa.suggested_fee_recipient,
receipts_root: Hash256::repeat_byte(42),
state_root: Hash256::repeat_byte(43),
logs_bloom: vec![0; 256].into(),
prev_randao: pa.prev_randao,
block_number: parent.block_number() + 1,
gas_limit: GAS_LIMIT,
gas_used: GAS_USED,
timestamp: pa.timestamp,
extra_data: "block gen was here".as_bytes().to_vec().into(),
base_fee_per_gas: Uint256::one(),
block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
withdrawals: pa.withdrawals.clone().into(),
})
}
ForkName::Eip4844 => {
ExecutionPayload::Eip4844(ExecutionPayloadEip4844 {
parent_hash: forkchoice_state.head_block_hash,
fee_recipient: pa.suggested_fee_recipient,
receipts_root: Hash256::repeat_byte(42),
state_root: Hash256::repeat_byte(43),
logs_bloom: vec![0; 256].into(),
prev_randao: pa.prev_randao,
block_number: parent.block_number() + 1,
gas_limit: GAS_LIMIT,
gas_used: GAS_USED,
timestamp: pa.timestamp,
extra_data: "block gen was here".as_bytes().to_vec().into(),
base_fee_per_gas: Uint256::one(),
// FIXME(4844): maybe this should be set to something?
excess_data_gas: Uint256::one(),
block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
withdrawals: pa.withdrawals.clone().into(),
})
}
_ => unreachable!(),
}
}
PayloadAttributes::V2(pa) => match self.get_fork_at_timestamp(pa.timestamp) {
ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge {
parent_hash: forkchoice_state.head_block_hash,
fee_recipient: pa.suggested_fee_recipient,
receipts_root: Hash256::repeat_byte(42),
state_root: Hash256::repeat_byte(43),
logs_bloom: vec![0; 256].into(),
prev_randao: pa.prev_randao,
block_number: parent.block_number() + 1,
gas_limit: GAS_LIMIT,
gas_used: GAS_USED,
timestamp: pa.timestamp,
extra_data: "block gen was here".as_bytes().to_vec().into(),
base_fee_per_gas: Uint256::one(),
block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
}),
ForkName::Capella => ExecutionPayload::Capella(ExecutionPayloadCapella {
parent_hash: forkchoice_state.head_block_hash,
fee_recipient: pa.suggested_fee_recipient,
receipts_root: Hash256::repeat_byte(42),
state_root: Hash256::repeat_byte(43),
logs_bloom: vec![0; 256].into(),
prev_randao: pa.prev_randao,
block_number: parent.block_number() + 1,
gas_limit: GAS_LIMIT,
gas_used: GAS_USED,
timestamp: pa.timestamp,
extra_data: "block gen was here".as_bytes().to_vec().into(),
base_fee_per_gas: Uint256::one(),
block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
withdrawals: pa.withdrawals.clone().into(),
}),
_ => unreachable!(),
},
};
*execution_payload.block_hash_mut() =
@ -641,7 +610,6 @@ mod test {
TERMINAL_BLOCK,
ExecutionBlockHash::zero(),
None,
None,
);
for i in 0..=TERMINAL_BLOCK {

View File

@ -223,7 +223,6 @@ pub async fn handle_rpc<T: EthSpec>(
})
.unwrap()
}
_ => unreachable!(),
}),
_ => unreachable!(),
}

View File

@ -405,7 +405,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
let payload_attributes = match fork {
ForkName::Merge => PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None),
// the withdrawals root is filled in by operations
ForkName::Capella | ForkName::Eip4844 => {
ForkName::Capella => {
PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![]))
}
ForkName::Base | ForkName::Altair => {
@ -452,7 +452,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?,
public_key: self.builder_sk.public_key(),
}),
ForkName::Base | ForkName::Altair | ForkName::Eip4844 => {
ForkName::Base | ForkName::Altair => {
return Err(BlindedBlockProviderError::Custom(format!(
"Unsupported fork: {}",
fork

View File

@ -29,7 +29,6 @@ impl<T: EthSpec> MockExecutionLayer<T> {
DEFAULT_TERMINAL_BLOCK,
None,
None,
None,
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
spec,
None,
@ -41,7 +40,6 @@ impl<T: EthSpec> MockExecutionLayer<T> {
executor: TaskExecutor,
terminal_block: u64,
shanghai_time: Option<u64>,
eip4844_time: Option<u64>,
builder_threshold: Option<u128>,
jwt_key: Option<JwtKey>,
spec: ChainSpec,
@ -57,7 +55,6 @@ impl<T: EthSpec> MockExecutionLayer<T> {
terminal_block,
spec.terminal_block_hash,
shanghai_time,
eip4844_time,
);
let url = SensitiveUrl::parse(&server.url()).unwrap();

View File

@ -58,7 +58,6 @@ pub struct MockExecutionConfig {
pub terminal_block: u64,
pub terminal_block_hash: ExecutionBlockHash,
pub shanghai_time: Option<u64>,
pub eip4844_time: Option<u64>,
}
impl Default for MockExecutionConfig {
@ -70,7 +69,6 @@ impl Default for MockExecutionConfig {
terminal_block_hash: ExecutionBlockHash::zero(),
server_config: Config::default(),
shanghai_time: None,
eip4844_time: None,
}
}
}
@ -91,7 +89,6 @@ impl<T: EthSpec> MockServer<T> {
DEFAULT_TERMINAL_BLOCK,
ExecutionBlockHash::zero(),
None, // FIXME(capella): should this be the default?
None, // FIXME(eip4844): should this be the default?
)
}
@ -103,7 +100,6 @@ impl<T: EthSpec> MockServer<T> {
terminal_block_hash,
server_config,
shanghai_time,
eip4844_time,
} = config;
let last_echo_request = Arc::new(RwLock::new(None));
let preloaded_responses = Arc::new(Mutex::new(vec![]));
@ -112,7 +108,6 @@ impl<T: EthSpec> MockServer<T> {
terminal_block,
terminal_block_hash,
shanghai_time,
eip4844_time,
);
let ctx: Arc<Context<T>> = Arc::new(Context {
@ -171,7 +166,6 @@ impl<T: EthSpec> MockServer<T> {
terminal_block: u64,
terminal_block_hash: ExecutionBlockHash,
shanghai_time: Option<u64>,
eip4844_time: Option<u64>,
) -> Self {
Self::new_with_config(
handle,
@ -182,7 +176,6 @@ impl<T: EthSpec> MockServer<T> {
terminal_block,
terminal_block_hash,
shanghai_time,
eip4844_time,
},
)
}

View File

@ -1123,9 +1123,7 @@ pub fn serve<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move {
// need to have cached the blob sidecar somewhere in the beacon chain
// to publish
publish_blocks::publish_block(None, block, None, chain, &network_tx, log)
publish_blocks::publish_block(None, block, chain, &network_tx, log)
.await
.map(|()| warp::reply())
},

View File

@ -41,16 +41,4 @@ lazy_static::lazy_static! {
"http_api_block_published_very_late_total",
"The count of times a block was published beyond the attestation deadline"
);
pub static ref HTTP_API_BLOB_BROADCAST_DELAY_TIMES: Result<Histogram> = try_create_histogram(
"http_api_blob_broadcast_delay_times",
"Time between start of the slot and when the blob was broadcast"
);
pub static ref HTTP_API_BLOB_PUBLISHED_LATE_TOTAL: Result<IntCounter> = try_create_int_counter(
"http_api_blob_published_late_total",
"The count of times a blob was published beyond more than half way to the attestation deadline"
);
pub static ref HTTP_API_BLOB_PUBLISHED_VERY_LATE_TOTAL: Result<IntCounter> = try_create_int_counter(
"http_api_blob_published_very_late_total",
"The count of times a blob was published beyond the attestation deadline"
);
}

View File

@ -3,7 +3,7 @@ use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
use beacon_chain::{
BeaconChain, BeaconChainTypes, BlockError, CountUnrealized, NotifyExecutionLayer,
};
use lighthouse_network::{PubsubMessage, SignedBeaconBlockAndBlobsSidecar};
use lighthouse_network::PubsubMessage;
use network::NetworkMessage;
use slog::{debug, error, info, warn, Logger};
use slot_clock::SlotClock;
@ -11,8 +11,8 @@ use std::sync::Arc;
use tokio::sync::mpsc::UnboundedSender;
use tree_hash::TreeHash;
use types::{
AbstractExecPayload, BlindedPayload, BlobsSidecar, EthSpec, ExecPayload, ExecutionBlockHash,
FullPayload, Hash256, SignedBeaconBlock,
AbstractExecPayload, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, FullPayload,
Hash256, SignedBeaconBlock,
};
use warp::Rejection;
@ -20,7 +20,6 @@ use warp::Rejection;
pub async fn publish_block<T: BeaconChainTypes>(
block_root: Option<Hash256>,
block: Arc<SignedBeaconBlock<T::EthSpec>>,
blobs_sidecar: Option<Arc<BlobsSidecar<T::EthSpec>>>,
chain: Arc<BeaconChain<T>>,
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger,
@ -36,22 +35,7 @@ pub async fn publish_block<T: BeaconChainTypes>(
// Send the block, regardless of whether or not it is valid. The API
// specification is very clear that this is the desired behaviour.
let message = match &*block {
SignedBeaconBlock::Eip4844(block) => {
if let Some(sidecar) = blobs_sidecar {
PubsubMessage::BeaconBlockAndBlobsSidecars(Arc::new(
SignedBeaconBlockAndBlobsSidecar {
beacon_block: block.clone(),
blobs_sidecar: (*sidecar).clone(),
},
))
} else {
//TODO(pawan): return an empty sidecar instead
return Err(warp_utils::reject::broadcast_without_import(String::new()));
}
}
_ => PubsubMessage::BeaconBlock(block.clone()),
};
let message = PubsubMessage::BeaconBlock(block.clone());
crate::publish_pubsub_message(network_tx, message)?;
// Determine the delay after the start of the slot, register it with metrics.
@ -166,7 +150,6 @@ pub async fn publish_blinded_block<T: BeaconChainTypes>(
publish_block::<T>(
Some(block_root),
Arc::new(full_block),
None,
chain,
network_tx,
log,

View File

@ -305,9 +305,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc<ForkContext>) -> Gos
) -> Vec<u8> {
let topic_bytes = message.topic.as_str().as_bytes();
match fork_context.current_fork() {
// according to: https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#the-gossip-domain-gossipsub
// the derivation of the message-id remains the same in the merge and for eip 4844.
ForkName::Altair | ForkName::Merge | ForkName::Capella | ForkName::Eip4844 => {
ForkName::Altair | ForkName::Merge | ForkName::Capella => {
let topic_len_bytes = topic_bytes.len().to_le_bytes();
let mut vec = Vec::with_capacity(
prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(),

View File

@ -15,7 +15,6 @@ pub mod peer_manager;
pub mod rpc;
pub mod types;
pub use crate::types::SignedBeaconBlockAndBlobsSidecar;
pub use config::gossip_max_size;
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};

View File

@ -534,7 +534,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
Protocol::Ping => PeerAction::MidToleranceError,
Protocol::BlocksByRange => PeerAction::MidToleranceError,
Protocol::BlocksByRoot => PeerAction::MidToleranceError,
Protocol::BlobsByRange => PeerAction::MidToleranceError,
Protocol::LightClientBootstrap => PeerAction::LowToleranceError,
Protocol::Goodbye => PeerAction::LowToleranceError,
Protocol::MetaData => PeerAction::LowToleranceError,
@ -551,7 +550,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
Protocol::Ping => PeerAction::Fatal,
Protocol::BlocksByRange => return,
Protocol::BlocksByRoot => return,
Protocol::BlobsByRange => return,
Protocol::Goodbye => return,
Protocol::LightClientBootstrap => return,
Protocol::MetaData => PeerAction::LowToleranceError,
@ -568,7 +566,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
Protocol::Ping => PeerAction::LowToleranceError,
Protocol::BlocksByRange => PeerAction::MidToleranceError,
Protocol::BlocksByRoot => PeerAction::MidToleranceError,
Protocol::BlobsByRange => PeerAction::MidToleranceError,
Protocol::LightClientBootstrap => return,
Protocol::Goodbye => return,
Protocol::MetaData => return,

View File

@ -194,19 +194,16 @@ mod tests {
let altair_fork_epoch = Epoch::new(1);
let merge_fork_epoch = Epoch::new(2);
let capella_fork_epoch = Epoch::new(3);
let eip4844_fork_epoch = Epoch::new(4);
chain_spec.altair_fork_epoch = Some(altair_fork_epoch);
chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch);
chain_spec.capella_fork_epoch = Some(capella_fork_epoch);
chain_spec.eip4844_fork_epoch = Some(eip4844_fork_epoch);
let current_slot = match fork_name {
ForkName::Base => Slot::new(0),
ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Eip4844 => eip4844_fork_epoch.start_slot(Spec::slots_per_epoch()),
};
ForkContext::new::<Spec>(current_slot, Hash256::zero(), &chain_spec)
}

View File

@ -17,9 +17,8 @@ use std::sync::Arc;
use tokio_util::codec::{Decoder, Encoder};
use types::light_client_bootstrap::LightClientBootstrap;
use types::{
BlobsSidecar, EthSpec, ForkContext, ForkName, Hash256, SignedBeaconBlock,
SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella,
SignedBeaconBlockEip4844, SignedBeaconBlockMerge,
EthSpec, ForkContext, ForkName, Hash256, SignedBeaconBlock, SignedBeaconBlockAltair,
SignedBeaconBlockBase, SignedBeaconBlockCapella, SignedBeaconBlockMerge,
};
use unsigned_varint::codec::Uvi;
@ -72,7 +71,6 @@ impl<TSpec: EthSpec> Encoder<RPCCodedResponse<TSpec>> for SSZSnappyInboundCodec<
RPCResponse::Status(res) => res.as_ssz_bytes(),
RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(),
RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(),
RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(),
RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(),
RPCResponse::Pong(res) => res.data.as_ssz_bytes(),
RPCResponse::MetaData(res) =>
@ -232,7 +230,6 @@ impl<TSpec: EthSpec> Encoder<OutboundRequest<TSpec>> for SSZSnappyOutboundCodec<
OutboundRequest::Goodbye(req) => req.as_ssz_bytes(),
OutboundRequest::BlocksByRange(req) => req.as_ssz_bytes(),
OutboundRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(),
OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(),
OutboundRequest::Ping(req) => req.as_ssz_bytes(),
OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode
OutboundRequest::LightClientBootstrap(req) => req.as_ssz_bytes(),
@ -413,10 +410,6 @@ fn context_bytes<T: EthSpec>(
return match **ref_box_block {
// NOTE: If you are adding another fork type here, be sure to modify the
// `fork_context.to_context_bytes()` function to support it as well!
SignedBeaconBlock::Eip4844 { .. } => {
// Eip4844 context being `None` implies that "merge never happened".
fork_context.to_context_bytes(ForkName::Eip4844)
}
SignedBeaconBlock::Capella { .. } => {
// Capella context being `None` implies that "merge never happened".
fork_context.to_context_bytes(ForkName::Capella)
@ -483,9 +476,6 @@ fn handle_v1_request<T: EthSpec>(
Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest {
block_roots: VariableList::from_ssz_bytes(decoded_buffer)?,
}))),
Protocol::BlobsByRange => Ok(Some(InboundRequest::BlobsByRange(
BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?,
))),
Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping {
data: u64::from_ssz_bytes(decoded_buffer)?,
}))),
@ -522,9 +512,6 @@ fn handle_v2_request<T: EthSpec>(
Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest {
block_roots: VariableList::from_ssz_bytes(decoded_buffer)?,
}))),
Protocol::BlobsByRange => Ok(Some(InboundRequest::BlobsByRange(
BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?,
))),
// MetaData requests return early from InboundUpgrade and do not reach the decoder.
// Handle this case just for completeness.
Protocol::MetaData => {
@ -562,7 +549,6 @@ fn handle_v1_response<T: EthSpec>(
Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?),
)))),
Protocol::BlobsByRange => Err(RPCError::InvalidData("blobs by range via v1".to_string())),
Protocol::Ping => Ok(Some(RPCResponse::Pong(Ping {
data: u64::from_ssz_bytes(decoded_buffer)?,
}))),
@ -619,11 +605,6 @@ fn handle_v2_response<T: EthSpec>(
decoded_buffer,
)?),
)))),
ForkName::Eip4844 => Ok(Some(RPCResponse::BlocksByRange(Arc::new(
SignedBeaconBlock::Eip4844(SignedBeaconBlockEip4844::from_ssz_bytes(
decoded_buffer,
)?),
)))),
},
Protocol::BlocksByRoot => match fork_name {
ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
@ -644,20 +625,6 @@ fn handle_v2_response<T: EthSpec>(
decoded_buffer,
)?),
)))),
ForkName::Eip4844 => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
SignedBeaconBlock::Eip4844(SignedBeaconBlockEip4844::from_ssz_bytes(
decoded_buffer,
)?),
)))),
},
Protocol::BlobsByRange => match fork_name {
ForkName::Eip4844 => Ok(Some(RPCResponse::BlobsByRange(Arc::new(
BlobsSidecar::from_ssz_bytes(decoded_buffer)?,
)))),
_ => Err(RPCError::ErrorResponse(
RPCResponseErrorCode::InvalidRequest,
"Invalid forkname for blobsbyrange".to_string(),
)),
},
_ => Err(RPCError::ErrorResponse(
RPCResponseErrorCode::InvalidRequest,
@ -708,19 +675,16 @@ mod tests {
let altair_fork_epoch = Epoch::new(1);
let merge_fork_epoch = Epoch::new(2);
let capella_fork_epoch = Epoch::new(3);
let eip4844_fork_epoch = Epoch::new(4);
chain_spec.altair_fork_epoch = Some(altair_fork_epoch);
chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch);
chain_spec.capella_fork_epoch = Some(capella_fork_epoch);
chain_spec.eip4844_fork_epoch = Some(eip4844_fork_epoch);
let current_slot = match fork_name {
ForkName::Base => Slot::new(0),
ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Eip4844 => eip4844_fork_epoch.start_slot(Spec::slots_per_epoch()),
};
ForkContext::new::<Spec>(current_slot, Hash256::zero(), &chain_spec)
}
@ -924,9 +888,6 @@ mod tests {
OutboundRequest::BlocksByRoot(bbroot) => {
assert_eq!(decoded, InboundRequest::BlocksByRoot(bbroot))
}
OutboundRequest::BlobsByRange(blbrange) => {
assert_eq!(decoded, InboundRequest::BlobsByRange(blbrange))
}
OutboundRequest::Ping(ping) => {
assert_eq!(decoded, InboundRequest::Ping(ping))
}

View File

@ -67,7 +67,6 @@ pub struct OutboundRateLimiterConfig {
pub(super) goodbye_quota: Quota,
pub(super) blocks_by_range_quota: Quota,
pub(super) blocks_by_root_quota: Quota,
pub(super) blobs_by_range_quota: Quota,
}
impl OutboundRateLimiterConfig {
@ -78,8 +77,6 @@ impl OutboundRateLimiterConfig {
pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota =
Quota::n_every(methods::MAX_REQUEST_BLOCKS, 10);
pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10);
pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota =
Quota::n_every(methods::MAX_REQUEST_BLOBS_SIDECARS, 10);
}
impl Default for OutboundRateLimiterConfig {
@ -91,7 +88,6 @@ impl Default for OutboundRateLimiterConfig {
goodbye_quota: Self::DEFAULT_GOODBYE_QUOTA,
blocks_by_range_quota: Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA,
blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA,
blobs_by_range_quota: Self::DEFAULT_BLOBS_BY_RANGE_QUOTA,
}
}
}
@ -115,7 +111,6 @@ impl Debug for OutboundRateLimiterConfig {
.field("goodbye", fmt_q!(&self.goodbye_quota))
.field("blocks_by_range", fmt_q!(&self.blocks_by_range_quota))
.field("blocks_by_root", fmt_q!(&self.blocks_by_root_quota))
.field("blobs_by_range", fmt_q!(&self.blobs_by_range_quota))
.finish()
}
}
@ -134,7 +129,6 @@ impl FromStr for OutboundRateLimiterConfig {
let mut goodbye_quota = None;
let mut blocks_by_range_quota = None;
let mut blocks_by_root_quota = None;
let mut blobs_by_range_quota = None;
for proto_def in s.split(';') {
let ProtocolQuota { protocol, quota } = proto_def.parse()?;
let quota = Some(quota);
@ -145,7 +139,6 @@ impl FromStr for OutboundRateLimiterConfig {
Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota),
Protocol::Ping => ping_quota = ping_quota.or(quota),
Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota),
Protocol::BlobsByRange => blobs_by_range_quota = blobs_by_range_quota.or(quota),
Protocol::LightClientBootstrap => return Err("Lighthouse does not send LightClientBootstrap requests. Quota should not be set."),
}
}
@ -158,8 +151,6 @@ impl FromStr for OutboundRateLimiterConfig {
.unwrap_or(Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA),
blocks_by_root_quota: blocks_by_root_quota
.unwrap_or(Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA),
blobs_by_range_quota: blobs_by_range_quota
.unwrap_or(Self::DEFAULT_BLOBS_BY_RANGE_QUOTA),
})
}
}

View File

@ -13,8 +13,7 @@ use std::sync::Arc;
use strum::IntoStaticStr;
use superstruct::superstruct;
use types::{
blobs_sidecar::BlobsSidecar, light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec,
Hash256, SignedBeaconBlock, Slot,
light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot,
};
/// Maximum number of blocks in a single request.
@ -25,9 +24,6 @@ pub const MAX_REQUEST_BLOCKS: u64 = 1024;
pub type MaxErrorLen = U256;
pub const MAX_ERROR_LEN: u64 = 256;
pub type MaxRequestBlobsSidecars = U1024;
pub const MAX_REQUEST_BLOBS_SIDECARS: u64 = 1024;
/// Wrapper over SSZ List to represent error message in rpc responses.
#[derive(Debug, Clone)]
pub struct ErrorType(pub VariableList<u8, MaxErrorLen>);
@ -210,16 +206,6 @@ pub struct BlocksByRangeRequest {
pub count: u64,
}
/// Request a number of beacon blobs from a peer.
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BlobsByRangeRequest {
/// The starting slot to request blobs.
pub start_slot: u64,
/// The number of blobs from the start slot.
pub count: u64,
}
/// Request a number of beacon block roots from a peer.
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct OldBlocksByRangeRequest {
@ -259,9 +245,6 @@ pub enum RPCResponse<T: EthSpec> {
/// A response to a get BLOCKS_BY_ROOT request.
BlocksByRoot(Arc<SignedBeaconBlock<T>>),
/// A response to a get BLOBS_BY_RANGE request
BlobsByRange(Arc<BlobsSidecar<T>>),
/// A response to a get LIGHTCLIENT_BOOTSTRAP request.
LightClientBootstrap(LightClientBootstrap<T>),
@ -280,9 +263,6 @@ pub enum ResponseTermination {
/// Blocks by root stream termination.
BlocksByRoot,
/// Blobs by range stream termination.
BlobsByRange,
}
/// The structured response containing a result/code indicating success or failure
@ -350,7 +330,6 @@ impl<T: EthSpec> RPCCodedResponse<T> {
RPCResponse::Status(_) => false,
RPCResponse::BlocksByRange(_) => true,
RPCResponse::BlocksByRoot(_) => true,
RPCResponse::BlobsByRange(_) => true,
RPCResponse::Pong(_) => false,
RPCResponse::MetaData(_) => false,
RPCResponse::LightClientBootstrap(_) => false,
@ -386,7 +365,6 @@ impl<T: EthSpec> RPCResponse<T> {
RPCResponse::Status(_) => Protocol::Status,
RPCResponse::BlocksByRange(_) => Protocol::BlocksByRange,
RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot,
RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange,
RPCResponse::Pong(_) => Protocol::Ping,
RPCResponse::MetaData(_) => Protocol::MetaData,
RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
@ -423,9 +401,6 @@ impl<T: EthSpec> std::fmt::Display for RPCResponse<T> {
RPCResponse::BlocksByRoot(block) => {
write!(f, "BlocksByRoot: Block slot: {}", block.slot())
}
RPCResponse::BlobsByRange(blob) => {
write!(f, "BlobsByRange: Blob slot: {}", blob.beacon_block_slot)
}
RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data),
RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()),
RPCResponse::LightClientBootstrap(bootstrap) => {
@ -477,12 +452,6 @@ impl std::fmt::Display for OldBlocksByRangeRequest {
}
}
impl std::fmt::Display for BlobsByRangeRequest {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Start Slot: {}, Count: {}", self.start_slot, self.count)
}
}
impl slog::KV for StatusMessage {
fn serialize(
&self,

View File

@ -24,7 +24,6 @@ pub(crate) use handler::HandlerErr;
pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse};
pub(crate) use protocol::{InboundRequest, RPCProtocol};
use crate::rpc::methods::MAX_REQUEST_BLOBS_SIDECARS;
pub use handler::SubstreamId;
pub use methods::{
BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest,
@ -145,11 +144,6 @@ impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
Duration::from_secs(10),
)
.n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10))
.n_every(
Protocol::BlobsByRange,
MAX_REQUEST_BLOBS_SIDECARS,
Duration::from_secs(10),
)
.build()
.expect("Configuration parameters are valid");
@ -345,7 +339,6 @@ where
match end {
ResponseTermination::BlocksByRange => Protocol::BlocksByRange,
ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot,
ResponseTermination::BlobsByRange => Protocol::BlobsByRange,
},
),
},

View File

@ -38,7 +38,6 @@ pub enum OutboundRequest<TSpec: EthSpec> {
Goodbye(GoodbyeReason),
BlocksByRange(OldBlocksByRangeRequest),
BlocksByRoot(BlocksByRootRequest),
BlobsByRange(BlobsByRangeRequest),
LightClientBootstrap(LightClientBootstrapRequest),
Ping(Ping),
MetaData(PhantomData<TSpec>),
@ -77,11 +76,6 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy),
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy),
],
OutboundRequest::BlobsByRange(_) => vec![ProtocolId::new(
Protocol::BlobsByRange,
Version::V1,
Encoding::SSZSnappy,
)],
OutboundRequest::Ping(_) => vec![ProtocolId::new(
Protocol::Ping,
Version::V1,
@ -106,7 +100,6 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
OutboundRequest::Goodbye(_) => 0,
OutboundRequest::BlocksByRange(req) => req.count,
OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
OutboundRequest::BlobsByRange(req) => req.count,
OutboundRequest::Ping(_) => 1,
OutboundRequest::MetaData(_) => 1,
OutboundRequest::LightClientBootstrap(_) => 1,
@ -120,7 +113,6 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
OutboundRequest::Goodbye(_) => Protocol::Goodbye,
OutboundRequest::BlocksByRange(_) => Protocol::BlocksByRange,
OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot,
OutboundRequest::BlobsByRange(_) => Protocol::BlobsByRange,
OutboundRequest::Ping(_) => Protocol::Ping,
OutboundRequest::MetaData(_) => Protocol::MetaData,
OutboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
@ -135,7 +127,6 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
// variants that have `multiple_responses()` can have values.
OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange,
OutboundRequest::LightClientBootstrap(_) => unreachable!(),
OutboundRequest::Status(_) => unreachable!(),
OutboundRequest::Goodbye(_) => unreachable!(),
@ -192,7 +183,6 @@ impl<TSpec: EthSpec> std::fmt::Display for OutboundRequest<TSpec> {
OutboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason),
OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req),
OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req),
OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req),
OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
OutboundRequest::MetaData(_) => write!(f, "MetaData request"),
OutboundRequest::LightClientBootstrap(bootstrap) => {

View File

@ -20,9 +20,8 @@ use tokio_util::{
codec::Framed,
compat::{Compat, FuturesAsyncReadCompatExt},
};
use types::BlobsSidecar;
use types::{
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge, Blob,
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge,
EmptyBlock, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature,
SignedBeaconBlock,
};
@ -84,12 +83,6 @@ lazy_static! {
+ types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_capella_size() // adding max size of execution payload (~16gb)
+ ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field
pub static ref SIGNED_BEACON_BLOCK_EIP4844_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD
+ types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_eip4844_size() // adding max size of execution payload (~16gb)
+ ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload`
+ (<types::KzgCommitment as Encode>::ssz_fixed_len() * <MainnetEthSpec>::max_blobs_per_block())
+ ssz::BYTES_PER_LENGTH_OFFSET; // Length offset for the blob commitments field.
pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize =
VariableList::<Hash256, MaxRequestBlocks>::from(Vec::<Hash256>::new())
.as_ssz_bytes()
@ -114,13 +107,6 @@ lazy_static! {
])
.as_ssz_bytes()
.len();
pub static ref BLOBS_SIDECAR_MIN: usize = BlobsSidecar::<MainnetEthSpec>::empty()
.as_ssz_bytes()
.len();
pub static ref BLOBS_SIDECAR_MAX: usize = *BLOBS_SIDECAR_MIN // Max size of variable length `blobs` field
+ (MainnetEthSpec::max_blobs_per_block() * <Blob<MainnetEthSpec> as Encode>::ssz_fixed_len());
}
/// The maximum bytes that can be sent across the RPC pre-merge.
@ -128,8 +114,6 @@ pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M
/// The maximum bytes that can be sent across the RPC post-merge.
pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M
pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M
// FIXME(sean) should this be increased to account for blobs?
pub(crate) const MAX_RPC_SIZE_POST_EIP4844: usize = 10 * 1_048_576; // 10M
/// The protocol prefix the RPC protocol id.
const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req";
/// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte).
@ -144,7 +128,6 @@ pub fn max_rpc_size(fork_context: &ForkContext) -> usize {
ForkName::Altair | ForkName::Base => MAX_RPC_SIZE,
ForkName::Merge => MAX_RPC_SIZE_POST_MERGE,
ForkName::Capella => MAX_RPC_SIZE_POST_CAPELLA,
ForkName::Eip4844 => MAX_RPC_SIZE_POST_EIP4844,
}
}
@ -169,10 +152,6 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits {
*SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks
*SIGNED_BEACON_BLOCK_CAPELLA_MAX, // Capella block is larger than base, altair and merge blocks
),
ForkName::Eip4844 => RpcLimits::new(
*SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks
*SIGNED_BEACON_BLOCK_EIP4844_MAX, // EIP 4844 block is larger than all prior fork blocks
),
}
}
@ -190,8 +169,6 @@ pub enum Protocol {
/// The `BlocksByRoot` protocol name.
#[strum(serialize = "beacon_blocks_by_root")]
BlocksByRoot,
/// The `BlobsByRange` protocol name.
BlobsByRange,
/// The `Ping` protocol name.
Ping,
/// The `MetaData` protocol name.
@ -327,10 +304,6 @@ impl ProtocolId {
Protocol::BlocksByRoot => {
RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX)
}
Protocol::BlobsByRange => RpcLimits::new(
<BlobsByRangeRequest as Encode>::ssz_fixed_len(),
<BlobsByRangeRequest as Encode>::ssz_fixed_len(),
),
Protocol::Ping => RpcLimits::new(
<Ping as Encode>::ssz_fixed_len(),
<Ping as Encode>::ssz_fixed_len(),
@ -353,7 +326,6 @@ impl ProtocolId {
Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response
Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()),
Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()),
Protocol::BlobsByRange => RpcLimits::new(*BLOBS_SIDECAR_MIN, *BLOBS_SIDECAR_MAX),
Protocol::Ping => RpcLimits::new(
<Ping as Encode>::ssz_fixed_len(),
<Ping as Encode>::ssz_fixed_len(),
@ -471,7 +443,6 @@ pub enum InboundRequest<TSpec: EthSpec> {
Goodbye(GoodbyeReason),
BlocksByRange(OldBlocksByRangeRequest),
BlocksByRoot(BlocksByRootRequest),
BlobsByRange(BlobsByRangeRequest),
LightClientBootstrap(LightClientBootstrapRequest),
Ping(Ping),
MetaData(PhantomData<TSpec>),
@ -488,7 +459,6 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
InboundRequest::Goodbye(_) => 0,
InboundRequest::BlocksByRange(req) => req.count,
InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
InboundRequest::BlobsByRange(req) => req.count,
InboundRequest::Ping(_) => 1,
InboundRequest::MetaData(_) => 1,
InboundRequest::LightClientBootstrap(_) => 1,
@ -502,7 +472,6 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
InboundRequest::Goodbye(_) => Protocol::Goodbye,
InboundRequest::BlocksByRange(_) => Protocol::BlocksByRange,
InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot,
InboundRequest::BlobsByRange(_) => Protocol::BlobsByRange,
InboundRequest::Ping(_) => Protocol::Ping,
InboundRequest::MetaData(_) => Protocol::MetaData,
InboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
@ -517,7 +486,6 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
// variants that have `multiple_responses()` can have values.
InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
InboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange,
InboundRequest::Status(_) => unreachable!(),
InboundRequest::Goodbye(_) => unreachable!(),
InboundRequest::Ping(_) => unreachable!(),
@ -624,7 +592,6 @@ impl<TSpec: EthSpec> std::fmt::Display for InboundRequest<TSpec> {
InboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason),
InboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req),
InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req),
InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req),
InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
InboundRequest::MetaData(_) => write!(f, "MetaData request"),
InboundRequest::LightClientBootstrap(bootstrap) => {

View File

@ -93,8 +93,6 @@ pub struct RPCRateLimiter {
bbrange_rl: Limiter<PeerId>,
/// BlocksByRoot rate limiter.
bbroots_rl: Limiter<PeerId>,
/// BlobsByRange rate limiter.
blbrange_rl: Limiter<PeerId>,
/// LightClientBootstrap rate limiter.
lcbootstrap_rl: Limiter<PeerId>,
}
@ -123,8 +121,6 @@ pub struct RPCRateLimiterBuilder {
bbrange_quota: Option<Quota>,
/// Quota for the BlocksByRoot protocol.
bbroots_quota: Option<Quota>,
/// Quota for the BlobsByRange protocol.
blbrange_quota: Option<Quota>,
/// Quota for the LightClientBootstrap protocol.
lcbootstrap_quota: Option<Quota>,
}
@ -140,7 +136,6 @@ impl RPCRateLimiterBuilder {
Protocol::Goodbye => self.goodbye_quota = q,
Protocol::BlocksByRange => self.bbrange_quota = q,
Protocol::BlocksByRoot => self.bbroots_quota = q,
Protocol::BlobsByRange => self.blbrange_quota = q,
Protocol::LightClientBootstrap => self.lcbootstrap_quota = q,
}
self
@ -185,10 +180,6 @@ impl RPCRateLimiterBuilder {
.lcbootstrap_quota
.ok_or("LightClientBootstrap quota not specified")?;
let blbrange_quota = self
.blbrange_quota
.ok_or("BlobsByRange quota not specified")?;
// create the rate limiters
let ping_rl = Limiter::from_quota(ping_quota)?;
let metadata_rl = Limiter::from_quota(metadata_quota)?;
@ -196,7 +187,6 @@ impl RPCRateLimiterBuilder {
let goodbye_rl = Limiter::from_quota(goodbye_quota)?;
let bbroots_rl = Limiter::from_quota(bbroots_quota)?;
let bbrange_rl = Limiter::from_quota(bbrange_quota)?;
let blbrange_rl = Limiter::from_quota(blbrange_quota)?;
let lcbootstrap_rl = Limiter::from_quota(lcbootstrap_quote)?;
// check for peers to prune every 30 seconds, starting in 30 seconds
@ -211,7 +201,6 @@ impl RPCRateLimiterBuilder {
goodbye_rl,
bbroots_rl,
bbrange_rl,
blbrange_rl,
lcbootstrap_rl,
init_time: Instant::now(),
})
@ -265,7 +254,6 @@ impl RPCRateLimiter {
Protocol::Goodbye => &mut self.goodbye_rl,
Protocol::BlocksByRange => &mut self.bbrange_rl,
Protocol::BlocksByRoot => &mut self.bbroots_rl,
Protocol::BlobsByRange => &mut self.blbrange_rl,
Protocol::LightClientBootstrap => &mut self.lcbootstrap_rl,
};
check(limiter)
@ -279,7 +267,6 @@ impl RPCRateLimiter {
self.goodbye_rl.prune(time_since_start);
self.bbrange_rl.prune(time_since_start);
self.bbroots_rl.prune(time_since_start);
self.blbrange_rl.prune(time_since_start);
}
}

View File

@ -60,7 +60,6 @@ impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
goodbye_quota,
blocks_by_range_quota,
blocks_by_root_quota,
blobs_by_range_quota,
} = config;
let limiter = RateLimiter::builder()
@ -70,7 +69,6 @@ impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
.set_quota(Protocol::Goodbye, goodbye_quota)
.set_quota(Protocol::BlocksByRange, blocks_by_range_quota)
.set_quota(Protocol::BlocksByRoot, blocks_by_root_quota)
.set_quota(Protocol::BlobsByRange, blobs_by_range_quota)
// Manually set the LightClientBootstrap quota, since we use the same rate limiter for
// inbound and outbound requests, and the LightClientBootstrap is an only inbound
// protocol.

View File

@ -2,9 +2,8 @@ use std::sync::Arc;
use libp2p::core::connection::ConnectionId;
use types::light_client_bootstrap::LightClientBootstrap;
use types::{BlobsSidecar, EthSpec, SignedBeaconBlock};
use types::{EthSpec, SignedBeaconBlock};
use crate::rpc::methods::BlobsByRangeRequest;
use crate::rpc::{
methods::{
BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest,
@ -34,8 +33,6 @@ pub enum Request {
Status(StatusMessage),
/// A blocks by range request.
BlocksByRange(BlocksByRangeRequest),
/// A blobs by range request.
BlobsByRange(BlobsByRangeRequest),
/// A request blocks root request.
BlocksByRoot(BlocksByRootRequest),
// light client bootstrap request
@ -53,7 +50,6 @@ impl<TSpec: EthSpec> std::convert::From<Request> for OutboundRequest<TSpec> {
step: 1,
})
}
Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r),
Request::LightClientBootstrap(b) => OutboundRequest::LightClientBootstrap(b),
Request::Status(s) => OutboundRequest::Status(s),
}
@ -72,8 +68,6 @@ pub enum Response<TSpec: EthSpec> {
Status(StatusMessage),
/// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch.
BlocksByRange(Option<Arc<SignedBeaconBlock<TSpec>>>),
/// A response to a get BLOBS_BY_RANGE request. A None response signals the end of the batch.
BlobsByRange(Option<Arc<BlobsSidecar<TSpec>>>),
/// A response to a get BLOCKS_BY_ROOT request.
BlocksByRoot(Option<Arc<SignedBeaconBlock<TSpec>>>),
/// A response to a LightClientUpdate request.
@ -91,10 +85,6 @@ impl<TSpec: EthSpec> std::convert::From<Response<TSpec>> for RPCCodedResponse<TS
Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)),
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange),
},
Response::BlobsByRange(r) => match r {
Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRange(b)),
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange),
},
Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)),
Response::LightClientBootstrap(b) => {
RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b))

View File

@ -20,8 +20,6 @@ pub struct GossipCache {
topic_msgs: HashMap<GossipTopic, HashMap<Vec<u8>, Key>>,
/// Timeout for blocks.
beacon_block: Option<Duration>,
/// Timeout for blobs.
beacon_block_and_blobs_sidecar: Option<Duration>,
/// Timeout for aggregate attestations.
aggregates: Option<Duration>,
/// Timeout for attestations.
@ -49,8 +47,6 @@ pub struct GossipCacheBuilder {
default_timeout: Option<Duration>,
/// Timeout for blocks.
beacon_block: Option<Duration>,
/// Timeout for blob sidecars.
beacon_block_and_blobs_sidecar: Option<Duration>,
/// Timeout for aggregate attestations.
aggregates: Option<Duration>,
/// Timeout for attestations.
@ -151,7 +147,6 @@ impl GossipCacheBuilder {
let GossipCacheBuilder {
default_timeout,
beacon_block,
beacon_block_and_blobs_sidecar,
aggregates,
attestation,
voluntary_exit,
@ -167,7 +162,6 @@ impl GossipCacheBuilder {
expirations: DelayQueue::default(),
topic_msgs: HashMap::default(),
beacon_block: beacon_block.or(default_timeout),
beacon_block_and_blobs_sidecar: beacon_block_and_blobs_sidecar.or(default_timeout),
aggregates: aggregates.or(default_timeout),
attestation: attestation.or(default_timeout),
voluntary_exit: voluntary_exit.or(default_timeout),
@ -193,7 +187,6 @@ impl GossipCache {
pub fn insert(&mut self, topic: GossipTopic, data: Vec<u8>) {
let expire_timeout = match topic.kind() {
GossipKind::BeaconBlock => self.beacon_block,
GossipKind::BeaconBlocksAndBlobsSidecar => self.beacon_block_and_blobs_sidecar,
GossipKind::BeaconAggregateAndProof => self.aggregates,
GossipKind::Attestation(_) => self.attestation,
GossipKind::VoluntaryExit => self.voluntary_exit,

View File

@ -998,9 +998,6 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
Request::BlocksByRoot { .. } => {
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"])
}
Request::BlobsByRange { .. } => {
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"])
}
}
NetworkEvent::RequestReceived {
peer_id,
@ -1264,14 +1261,6 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
);
Some(event)
}
InboundRequest::BlobsByRange(req) => {
let event = self.build_request(
peer_request_id,
peer_id,
Request::BlobsByRange(req),
);
Some(event)
}
InboundRequest::LightClientBootstrap(req) => {
let event = self.build_request(
peer_request_id,
@ -1304,9 +1293,6 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
RPCResponse::BlocksByRange(resp) => {
self.build_response(id, peer_id, Response::BlocksByRange(Some(resp)))
}
RPCResponse::BlobsByRange(resp) => {
self.build_response(id, peer_id, Response::BlobsByRange(Some(resp)))
}
RPCResponse::BlocksByRoot(resp) => {
self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp)))
}
@ -1320,7 +1306,6 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
let response = match termination {
ResponseTermination::BlocksByRange => Response::BlocksByRange(None),
ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None),
ResponseTermination::BlobsByRange => Response::BlobsByRange(None),
};
self.build_response(id, peer_id, response)
}

View File

@ -13,7 +13,7 @@ pub type EnrSyncCommitteeBitfield<T> = BitVector<<T as EthSpec>::SyncCommitteeSu
pub type Enr = discv5::enr::Enr<discv5::enr::CombinedKey>;
pub use globals::NetworkGlobals;
pub use pubsub::{PubsubMessage, SignedBeaconBlockAndBlobsSidecar, SnappyTransform};
pub use pubsub::{PubsubMessage, SnappyTransform};
pub use subnet::{Subnet, SubnetDiscovery};
pub use sync_state::{BackFillState, SyncState};
pub use topics::{

View File

@ -3,39 +3,23 @@
use crate::types::{GossipEncoding, GossipKind, GossipTopic};
use crate::TopicHash;
use libp2p::gossipsub::{DataTransform, GossipsubMessage, RawGossipsubMessage};
use serde_derive::{Deserialize, Serialize};
use snap::raw::{decompress_len, Decoder, Encoder};
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use std::boxed::Box;
use std::io::{Error, ErrorKind};
use std::sync::Arc;
use tree_hash_derive::TreeHash;
use types::{
Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ForkContext, ForkName,
LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing,
SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase,
SignedBeaconBlockCapella, SignedBeaconBlockEip4844, SignedBeaconBlockMerge,
SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId,
SyncCommitteeMessage, SyncSubnetId,
Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate,
LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock,
SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella,
SignedBeaconBlockMerge, SignedBlsToExecutionChange, SignedContributionAndProof,
SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId,
};
/// TODO(pawan): move this to consensus/types? strictly not a consensus type
#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq)]
#[serde(bound = "T: EthSpec")]
pub struct SignedBeaconBlockAndBlobsSidecar<T: EthSpec> {
// TODO(pawan): switch to a SignedBeaconBlock and use ssz offsets for decoding to make this
// future proof?
pub beacon_block: SignedBeaconBlockEip4844<T>,
pub blobs_sidecar: BlobsSidecar<T>,
}
#[derive(Debug, Clone, PartialEq)]
pub enum PubsubMessage<T: EthSpec> {
/// Gossipsub message providing notification of a new block.
BeaconBlock(Arc<SignedBeaconBlock<T>>),
/// Gossipsub message providing notification of a new SignedBeaconBlock coupled with a blobs sidecar.
BeaconBlockAndBlobsSidecars(Arc<SignedBeaconBlockAndBlobsSidecar<T>>),
/// Gossipsub message providing notification of a Aggregate attestation and associated proof.
AggregateAndProofAttestation(Box<SignedAggregateAndProof<T>>),
/// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id.
@ -129,9 +113,6 @@ impl<T: EthSpec> PubsubMessage<T> {
pub fn kind(&self) -> GossipKind {
match self {
PubsubMessage::BeaconBlock(_) => GossipKind::BeaconBlock,
PubsubMessage::BeaconBlockAndBlobsSidecars(_) => {
GossipKind::BeaconBlocksAndBlobsSidecar
}
PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof,
PubsubMessage::Attestation(attestation_data) => {
GossipKind::Attestation(attestation_data.0)
@ -198,12 +179,6 @@ impl<T: EthSpec> PubsubMessage<T> {
SignedBeaconBlockMerge::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
),
Some(ForkName::Eip4844) => {
return Err(
"beacon_block topic is not used from eip4844 fork onwards"
.to_string(),
)
}
Some(ForkName::Capella) => SignedBeaconBlock::<T>::Capella(
SignedBeaconBlockCapella::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
@ -217,28 +192,6 @@ impl<T: EthSpec> PubsubMessage<T> {
};
Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block)))
}
GossipKind::BeaconBlocksAndBlobsSidecar => {
match fork_context.from_context_bytes(gossip_topic.fork_digest) {
Some(ForkName::Eip4844) => {
let block_and_blobs_sidecar =
SignedBeaconBlockAndBlobsSidecar::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?;
Ok(PubsubMessage::BeaconBlockAndBlobsSidecars(Arc::new(
block_and_blobs_sidecar,
)))
}
Some(
ForkName::Base
| ForkName::Altair
| ForkName::Merge
| ForkName::Capella,
)
| None => Err(format!(
"beacon_blobs_and_sidecar topic invalid for given fork digest {:?}",
gossip_topic.fork_digest
)),
}
}
GossipKind::VoluntaryExit => {
let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?;
@ -307,7 +260,6 @@ impl<T: EthSpec> PubsubMessage<T> {
// messages for us.
match &self {
PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(),
PubsubMessage::BeaconBlockAndBlobsSidecars(data) => data.as_ssz_bytes(),
PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(),
PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(),
PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(),
@ -331,12 +283,6 @@ impl<T: EthSpec> std::fmt::Display for PubsubMessage<T> {
block.slot(),
block.message().proposer_index()
),
PubsubMessage::BeaconBlockAndBlobsSidecars(block_and_blob) => write!(
f,
"Beacon block and Blobs Sidecar: slot: {}, blobs: {}",
block_and_blob.beacon_block.message.slot,
block_and_blob.blobs_sidecar.blobs.len(),
),
PubsubMessage::AggregateAndProofAttestation(att) => write!(
f,
"Aggregate and Proof: slot: {}, index: {}, aggregator_index: {}",

View File

@ -11,7 +11,6 @@ use crate::Subnet;
pub const TOPIC_PREFIX: &str = "eth2";
pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy";
pub const BEACON_BLOCK_TOPIC: &str = "beacon_block";
pub const BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC: &str = "beacon_block_and_blobs_sidecar";
pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof";
pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_";
pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit";
@ -57,8 +56,6 @@ pub struct GossipTopic {
pub enum GossipKind {
/// Topic for publishing beacon blocks.
BeaconBlock,
/// Topic for publishing beacon block coupled with blob sidecars.
BeaconBlocksAndBlobsSidecar,
/// Topic for publishing aggregate attestations and proofs.
BeaconAggregateAndProof,
/// Topic for publishing raw attestations on a particular subnet.
@ -150,7 +147,6 @@ impl GossipTopic {
let kind = match topic_parts[3] {
BEACON_BLOCK_TOPIC => GossipKind::BeaconBlock,
BEACON_AGGREGATE_AND_PROOF_TOPIC => GossipKind::BeaconAggregateAndProof,
BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC => GossipKind::BeaconBlocksAndBlobsSidecar,
SIGNED_CONTRIBUTION_AND_PROOF_TOPIC => GossipKind::SignedContributionAndProof,
VOLUNTARY_EXIT_TOPIC => GossipKind::VoluntaryExit,
PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing,
@ -207,7 +203,6 @@ impl std::fmt::Display for GossipTopic {
let kind = match self.kind {
GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(),
GossipKind::BeaconBlocksAndBlobsSidecar => BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC.into(),
GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(),
GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(),
GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(),
@ -292,7 +287,6 @@ mod tests {
VoluntaryExit,
ProposerSlashing,
AttesterSlashing,
BeaconBlocksAndBlobsSidecar,
]
.iter()
{

View File

@ -26,19 +26,16 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext {
let altair_fork_epoch = Epoch::new(1);
let merge_fork_epoch = Epoch::new(2);
let capella_fork_epoch = Epoch::new(3);
let eip4844_fork_epoch = Epoch::new(4);
chain_spec.altair_fork_epoch = Some(altair_fork_epoch);
chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch);
chain_spec.capella_fork_epoch = Some(capella_fork_epoch);
chain_spec.eip4844_fork_epoch = Some(eip4844_fork_epoch);
let current_slot = match fork_name {
ForkName::Base => Slot::new(0),
ForkName::Altair => altair_fork_epoch.start_slot(E::slots_per_epoch()),
ForkName::Merge => merge_fork_epoch.start_slot(E::slots_per_epoch()),
ForkName::Capella => capella_fork_epoch.start_slot(E::slots_per_epoch()),
ForkName::Eip4844 => eip4844_fork_epoch.start_slot(E::slots_per_epoch()),
};
ForkContext::new::<E>(current_slot, Hash256::zero(), &chain_spec)
}

View File

@ -45,9 +45,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock, NotifyExe
use derivative::Derivative;
use futures::stream::{Stream, StreamExt};
use futures::task::Poll;
use lighthouse_network::rpc::methods::BlobsByRangeRequest;
use lighthouse_network::rpc::LightClientBootstrapRequest;
use lighthouse_network::SignedBeaconBlockAndBlobsSidecar;
use lighthouse_network::{
rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage},
Client, MessageId, NetworkGlobals, PeerId, PeerRequestId,
@ -116,10 +114,6 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024;
/// before we start dropping them.
const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024;
/// The maximum number of queued `SignedBeaconBlockAndBlobsSidecar` objects received on gossip that
/// will be stored before we start dropping them.
const MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN: usize = 1_024;
/// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but
/// within acceptable clock disparity) that will be queued before we start dropping them.
const MAX_DELAYED_BLOCK_QUEUE_LEN: usize = 1_024;
@ -172,8 +166,6 @@ const MAX_STATUS_QUEUE_LEN: usize = 1_024;
/// will be stored before we start dropping them.
const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024;
const MAX_BLOBS_BY_RANGE_QUEUE_LEN: usize = 1_024;
/// The maximum number of queued `BlocksByRootRequest` objects received from the network RPC that
/// will be stored before we start dropping them.
const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024;
@ -216,7 +208,6 @@ pub const GOSSIP_ATTESTATION_BATCH: &str = "gossip_attestation_batch";
pub const GOSSIP_AGGREGATE: &str = "gossip_aggregate";
pub const GOSSIP_AGGREGATE_BATCH: &str = "gossip_aggregate_batch";
pub const GOSSIP_BLOCK: &str = "gossip_block";
pub const GOSSIP_BLOCK_AND_BLOBS_SIDECAR: &str = "gossip_block_and_blobs_sidecar";
pub const DELAYED_IMPORT_BLOCK: &str = "delayed_import_block";
pub const GOSSIP_VOLUNTARY_EXIT: &str = "gossip_voluntary_exit";
pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing";
@ -230,7 +221,6 @@ pub const CHAIN_SEGMENT: &str = "chain_segment";
pub const STATUS_PROCESSING: &str = "status_processing";
pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request";
pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request";
pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request";
pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap";
pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation";
pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate";
@ -439,26 +429,6 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
}
}
/// Create a new `Work` event for some blobs sidecar.
pub fn gossip_block_and_blobs_sidecar(
message_id: MessageId,
peer_id: PeerId,
peer_client: Client,
block_and_blobs: Arc<SignedBeaconBlockAndBlobsSidecar<T::EthSpec>>,
seen_timestamp: Duration,
) -> Self {
Self {
drop_during_sync: false,
work: Work::GossipBlockAndBlobsSidecar {
message_id,
peer_id,
peer_client,
block_and_blobs,
seen_timestamp,
},
}
}
/// Create a new `Work` event for some sync committee signature.
pub fn gossip_sync_signature(
message_id: MessageId,
@ -668,21 +638,6 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
}
}
pub fn blobs_by_range_request(
peer_id: PeerId,
request_id: PeerRequestId,
request: BlobsByRangeRequest,
) -> Self {
Self {
drop_during_sync: false,
work: Work::BlobsByRangeRequest {
peer_id,
request_id,
request,
},
}
}
/// Create a new work event to process `LightClientBootstrap`s from the RPC network.
pub fn lightclient_bootstrap_request(
peer_id: PeerId,
@ -838,13 +793,6 @@ pub enum Work<T: BeaconChainTypes> {
block: Arc<SignedBeaconBlock<T::EthSpec>>,
seen_timestamp: Duration,
},
GossipBlockAndBlobsSidecar {
message_id: MessageId,
peer_id: PeerId,
peer_client: Client,
block_and_blobs: Arc<SignedBeaconBlockAndBlobsSidecar<T::EthSpec>>,
seen_timestamp: Duration,
},
DelayedImportBlock {
peer_id: PeerId,
block: Box<GossipVerifiedBlock<T>>,
@ -915,11 +863,6 @@ pub enum Work<T: BeaconChainTypes> {
request_id: PeerRequestId,
request: BlocksByRootRequest,
},
BlobsByRangeRequest {
peer_id: PeerId,
request_id: PeerRequestId,
request: BlobsByRangeRequest,
},
GossipBlsToExecutionChange {
message_id: MessageId,
peer_id: PeerId,
@ -941,7 +884,6 @@ impl<T: BeaconChainTypes> Work<T> {
Work::GossipAggregate { .. } => GOSSIP_AGGREGATE,
Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH,
Work::GossipBlock { .. } => GOSSIP_BLOCK,
Work::GossipBlockAndBlobsSidecar { .. } => GOSSIP_BLOCK_AND_BLOBS_SIDECAR,
Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK,
Work::GossipVoluntaryExit { .. } => GOSSIP_VOLUNTARY_EXIT,
Work::GossipProposerSlashing { .. } => GOSSIP_PROPOSER_SLASHING,
@ -955,7 +897,6 @@ impl<T: BeaconChainTypes> Work<T> {
Work::Status { .. } => STATUS_PROCESSING,
Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST,
Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST,
Work::BlobsByRangeRequest { .. } => BLOBS_BY_RANGE_REQUEST,
Work::LightClientBootstrapRequest { .. } => LIGHT_CLIENT_BOOTSTRAP_REQUEST,
Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION,
Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE,
@ -1103,14 +1044,11 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN);
let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN);
let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN);
let mut gossip_block_and_blobs_sidecar_queue =
FifoQueue::new(MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN);
let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN);
let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN);
let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN);
let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN);
let mut blbrange_queue = FifoQueue::new(MAX_BLOBS_BY_RANGE_QUEUE_LEN);
let mut gossip_bls_to_execution_change_queue =
FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN);
@ -1217,8 +1155,6 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
// required to verify some attestations.
} else if let Some(item) = gossip_block_queue.pop() {
self.spawn_worker(item, toolbox);
} else if let Some(item) = gossip_block_and_blobs_sidecar_queue.pop() {
self.spawn_worker(item, toolbox);
// Check the aggregates, *then* the unaggregates since we assume that
// aggregates are more valuable to local validators and effectively give us
// more information with less signature verification time.
@ -1428,9 +1364,6 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
Work::GossipBlock { .. } => {
gossip_block_queue.push(work, work_id, &self.log)
}
Work::GossipBlockAndBlobsSidecar { .. } => {
gossip_block_and_blobs_sidecar_queue.push(work, work_id, &self.log)
}
Work::DelayedImportBlock { .. } => {
delayed_block_queue.push(work, work_id, &self.log)
}
@ -1470,9 +1403,6 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
Work::BlocksByRootsRequest { .. } => {
bbroots_queue.push(work, work_id, &self.log)
}
Work::BlobsByRangeRequest { .. } => {
blbrange_queue.push(work, work_id, &self.log)
}
Work::LightClientBootstrapRequest { .. } => {
lcbootstrap_queue.push(work, work_id, &self.log)
}
@ -1705,12 +1635,6 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
)
.await
}),
/*
* Verification for blobs sidecars received on gossip.
*/
Work::GossipBlockAndBlobsSidecar { .. } => {
warn!(self.log, "Unexpected block and blobs on gossip")
}
/*
* Import for blocks that we received earlier than their intended slot.
*/
@ -1912,9 +1836,6 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
request,
)
}),
Work::BlobsByRangeRequest { .. } => {
warn!(self.log.clone(), "Unexpected BlobsByRange Request")
}
/*
* Processing of lightclient bootstrap requests from other peers.
*/

View File

@ -167,15 +167,6 @@ lazy_static! {
"beacon_processor_rpc_block_imported_total",
"Total number of gossip blocks imported to fork choice, etc."
);
// Rpc blobs.
pub static ref BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_rpc_blob_queue_total",
"Count of blobs from the rpc waiting to be verified."
);
pub static ref BEACON_PROCESSOR_RPC_BLOB_IMPORTED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_rpc_blob_imported_total",
"Total number of gossip blobs imported."
);
// Chain segments.
pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_chain_segment_queue_total",

View File

@ -168,9 +168,6 @@ impl<T: BeaconChainTypes> Router<T> {
Request::BlocksByRoot(request) => self
.processor
.on_blocks_by_root_request(peer_id, id, request),
Request::BlobsByRange(request) => self
.processor
.on_blobs_by_range_request(peer_id, id, request),
Request::LightClientBootstrap(request) => self
.processor
.on_lightclient_bootstrap(peer_id, id, request),
@ -198,10 +195,6 @@ impl<T: BeaconChainTypes> Router<T> {
self.processor
.on_blocks_by_root_response(peer_id, request_id, beacon_block);
}
Response::BlobsByRange(beacon_blob) => {
self.processor
.on_blobs_by_range_response(peer_id, request_id, beacon_blob);
}
Response::LightClientBootstrap(_) => unreachable!(),
}
}
@ -240,14 +233,6 @@ impl<T: BeaconChainTypes> Router<T> {
block,
);
}
PubsubMessage::BeaconBlockAndBlobsSidecars(block_and_blobs) => {
self.processor.on_block_and_blobs_sidecar_gossip(
id,
peer_id,
self.network_globals.client(&peer_id),
block_and_blobs,
);
}
PubsubMessage::VoluntaryExit(exit) => {
debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id);
self.processor.on_voluntary_exit_gossip(id, peer_id, exit);

View File

@ -6,8 +6,7 @@ use crate::status::status_message;
use crate::sync::manager::RequestId as SyncId;
use crate::sync::SyncMessage;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use lighthouse_network::rpc::methods::BlobsByRangeRequest;
use lighthouse_network::{rpc::*, SignedBeaconBlockAndBlobsSidecar};
use lighthouse_network::rpc::*;
use lighthouse_network::{
Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response,
};
@ -18,10 +17,9 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH};
use store::SyncCommitteeMessage;
use tokio::sync::mpsc;
use types::{
Attestation, AttesterSlashing, BlobsSidecar, EthSpec, LightClientFinalityUpdate,
LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock,
SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId,
SyncSubnetId,
Attestation, AttesterSlashing, EthSpec, LightClientFinalityUpdate, LightClientOptimisticUpdate,
ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange,
SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId,
};
/// Processes validated messages from the network. It relays necessary data to the syncing thread
@ -163,17 +161,6 @@ impl<T: BeaconChainTypes> Processor<T> {
))
}
pub fn on_blobs_by_range_request(
&mut self,
peer_id: PeerId,
request_id: PeerRequestId,
request: BlobsByRangeRequest,
) {
self.send_beacon_processor_work(BeaconWorkEvent::blobs_by_range_request(
peer_id, request_id, request,
))
}
/// Handle a `LightClientBootstrap` request from the peer.
pub fn on_lightclient_bootstrap(
&mut self,
@ -230,33 +217,6 @@ impl<T: BeaconChainTypes> Processor<T> {
});
}
pub fn on_blobs_by_range_response(
&mut self,
peer_id: PeerId,
request_id: RequestId,
blob_wrapper: Option<Arc<BlobsSidecar<T::EthSpec>>>,
) {
trace!(
self.log,
"Received BlobsByRange Response";
"peer" => %peer_id,
);
if let RequestId::Sync(id) = request_id {
self.send_to_sync(SyncMessage::RpcBlob {
peer_id,
request_id: id,
blob_sidecar: blob_wrapper,
seen_timestamp: timestamp_now(),
});
} else {
debug!(
self.log,
"All blobs by range responses should belong to sync"
);
}
}
/// Handle a `BlocksByRoot` response from the peer.
pub fn on_blocks_by_root_response(
&mut self,
@ -308,22 +268,6 @@ impl<T: BeaconChainTypes> Processor<T> {
))
}
pub fn on_block_and_blobs_sidecar_gossip(
&mut self,
message_id: MessageId,
peer_id: PeerId,
peer_client: Client,
block_and_blobs: Arc<SignedBeaconBlockAndBlobsSidecar<T::EthSpec>>,
) {
self.send_beacon_processor_work(BeaconWorkEvent::gossip_block_and_blobs_sidecar(
message_id,
peer_id,
peer_client,
block_and_blobs,
timestamp_now(),
))
}
pub fn on_unaggregated_attestation_gossip(
&mut self,
message_id: MessageId,

View File

@ -47,13 +47,13 @@ use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS;
use lighthouse_network::types::{NetworkGlobals, SyncState};
use lighthouse_network::SyncInfo;
use lighthouse_network::{PeerAction, PeerId};
use slog::{crit, debug, error, info, trace, warn, Logger};
use slog::{crit, debug, error, info, trace, Logger};
use std::boxed::Box;
use std::ops::Sub;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc;
use types::{BlobsSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot};
use types::{EthSpec, Hash256, SignedBeaconBlock, Slot};
/// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync
/// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a
@ -93,14 +93,6 @@ pub enum SyncMessage<T: EthSpec> {
seen_timestamp: Duration,
},
/// A blob has been received from RPC.
RpcBlob {
peer_id: PeerId,
request_id: RequestId,
blob_sidecar: Option<Arc<BlobsSidecar<T>>>,
seen_timestamp: Duration,
},
/// A block with an unknown parent has been received.
UnknownBlock(PeerId, Arc<SignedBeaconBlock<T>>, Hash256),
@ -592,9 +584,6 @@ impl<T: BeaconChainTypes> SyncManager<T> {
.block_lookups
.parent_chain_processed(chain_hash, result, &mut self.network),
},
SyncMessage::RpcBlob { .. } => {
warn!(self.log, "Unexpected blob message received");
}
}
}

View File

@ -7,7 +7,6 @@ use types::{EthSpec, MinimalEthSpec};
pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048;
pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192;
pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5;
pub const DEFAULT_BLOB_CACHE_SIZE: usize = 5;
/// Database configuration parameters.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
@ -18,8 +17,6 @@ pub struct StoreConfig {
pub slots_per_restore_point_set_explicitly: bool,
/// Maximum number of blocks to store in the in-memory block cache.
pub block_cache_size: usize,
/// Maximum number of blobs to store in the in-memory blob cache.
pub blob_cache_size: usize,
/// Whether to compact the database on initialization.
pub compact_on_init: bool,
/// Whether to compact the database during database pruning.
@ -46,7 +43,6 @@ impl Default for StoreConfig {
slots_per_restore_point: MinimalEthSpec::slots_per_historical_root() as u64,
slots_per_restore_point_set_explicitly: false,
block_cache_size: DEFAULT_BLOCK_CACHE_SIZE,
blob_cache_size: DEFAULT_BLOB_CACHE_SIZE,
compact_on_init: false,
compact_on_prune: true,
prune_payloads: true,

View File

@ -60,8 +60,6 @@ pub struct HotColdDB<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
///
/// The hot database also contains all blocks.
pub hot_db: Hot,
/// LRU cache of deserialized blobs. Updated whenever a blob is loaded.
blob_cache: Mutex<LruCache<Hash256, BlobsSidecar<E>>>,
/// LRU cache of deserialized blocks. Updated whenever a block is loaded.
block_cache: Mutex<LruCache<Hash256, SignedBeaconBlock<E>>>,
/// Chain spec.
@ -131,7 +129,6 @@ impl<E: EthSpec> HotColdDB<E, MemoryStore<E>, MemoryStore<E>> {
cold_db: MemoryStore::open(),
hot_db: MemoryStore::open(),
block_cache: Mutex::new(LruCache::new(config.block_cache_size)),
blob_cache: Mutex::new(LruCache::new(config.blob_cache_size)),
config,
spec,
log,
@ -165,7 +162,6 @@ impl<E: EthSpec> HotColdDB<E, LevelDB<E>, LevelDB<E>> {
cold_db: LevelDB::open(cold_path)?,
hot_db: LevelDB::open(hot_path)?,
block_cache: Mutex::new(LruCache::new(config.block_cache_size)),
blob_cache: Mutex::new(LruCache::new(config.blob_cache_size)),
config,
spec,
log,
@ -488,41 +484,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
.key_delete(DBColumn::ExecPayload.into(), block_root.as_bytes())
}
pub fn put_blobs(&self, block_root: &Hash256, blobs: BlobsSidecar<E>) -> Result<(), Error> {
self.hot_db.put_bytes(
DBColumn::BeaconBlob.into(),
block_root.as_bytes(),
&blobs.as_ssz_bytes(),
)?;
self.blob_cache.lock().push(*block_root, blobs);
Ok(())
}
pub fn get_blobs(&self, block_root: &Hash256) -> Result<Option<BlobsSidecar<E>>, Error> {
if let Some(blobs) = self.blob_cache.lock().get(block_root) {
Ok(Some(blobs.clone()))
} else if let Some(bytes) = self
.hot_db
.get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())?
{
let ret = BlobsSidecar::from_ssz_bytes(&bytes)?;
self.blob_cache.lock().put(*block_root, ret.clone());
Ok(Some(ret))
} else {
Ok(None)
}
}
pub fn blobs_as_kv_store_ops(
&self,
key: &Hash256,
blobs: &BlobsSidecar<E>,
ops: &mut Vec<KeyValueStoreOp>,
) {
let db_key = get_key_for_col(DBColumn::BeaconBlob.into(), key.as_bytes());
ops.push(KeyValueStoreOp::PutKeyValue(db_key, blobs.as_ssz_bytes()));
}
pub fn put_state_summary(
&self,
state_root: &Hash256,
@ -750,10 +711,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
self.store_hot_state(&state_root, state, &mut key_value_batch)?;
}
StoreOp::PutBlobs(block_root, blobs) => {
self.blobs_as_kv_store_ops(&block_root, &blobs, &mut key_value_batch);
}
StoreOp::PutStateSummary(state_root, summary) => {
key_value_batch.push(summary.as_kv_store_op(state_root));
}
@ -802,7 +759,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
// Update the block cache whilst holding a lock, to ensure that the cache updates atomically
// with the database.
let mut guard = self.block_cache.lock();
let mut guard_blob = self.blob_cache.lock();
for op in &batch {
match op {
@ -810,10 +766,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
guard.put(*block_root, (**block).clone());
}
StoreOp::PutBlobs(block_root, blobs) => {
guard_blob.put(*block_root, (**blobs).clone());
}
StoreOp::PutState(_, _) => (),
StoreOp::PutStateSummary(_, _) => (),

View File

@ -1,9 +1,6 @@
use crate::{DBColumn, Error, StoreItem};
use ssz::{Decode, Encode};
use types::{
EthSpec, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844,
ExecutionPayloadMerge,
};
use types::{EthSpec, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge};
macro_rules! impl_store_item {
($ty_name:ident) => {
@ -24,7 +21,6 @@ macro_rules! impl_store_item {
}
impl_store_item!(ExecutionPayloadMerge);
impl_store_item!(ExecutionPayloadCapella);
impl_store_item!(ExecutionPayloadEip4844);
/// This fork-agnostic implementation should be only used for writing.
///
@ -40,13 +36,9 @@ impl<E: EthSpec> StoreItem for ExecutionPayload<E> {
}
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
ExecutionPayloadEip4844::from_ssz_bytes(bytes)
.map(Self::Eip4844)
.or_else(|_| {
ExecutionPayloadCapella::from_ssz_bytes(bytes)
.map(Self::Capella)
.or_else(|_| ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge))
})
ExecutionPayloadCapella::from_ssz_bytes(bytes)
.map(Self::Capella)
.or_else(|_| ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge))
.map_err(Into::into)
}
}

View File

@ -155,7 +155,6 @@ pub trait ItemStore<E: EthSpec>: KeyValueStore<E> + Sync + Send + Sized + 'stati
pub enum StoreOp<'a, E: EthSpec> {
PutBlock(Hash256, Arc<SignedBeaconBlock<E>>),
PutState(Hash256, &'a BeaconState<E>),
PutBlobs(Hash256, Arc<BlobsSidecar<E>>),
PutStateSummary(Hash256, HotStateSummary),
PutStateTemporaryFlag(Hash256),
DeleteStateTemporaryFlag(Hash256),
@ -173,8 +172,6 @@ pub enum DBColumn {
BeaconMeta,
#[strum(serialize = "blk")]
BeaconBlock,
#[strum(serialize = "blb")]
BeaconBlob,
/// For full `BeaconState`s in the hot database (finalized or fork-boundary states).
#[strum(serialize = "ste")]
BeaconState,

View File

@ -15,7 +15,7 @@ use types::*;
///
/// Utilises lazy-loading from separate storage for its vector fields.
#[superstruct(
variants(Base, Altair, Merge, Capella, Eip4844),
variants(Base, Altair, Merge, Capella),
variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode))
)]
#[derive(Debug, PartialEq, Clone, Encode)]
@ -67,9 +67,9 @@ where
pub current_epoch_attestations: VariableList<PendingAttestation<T>, T::MaxPendingAttestations>,
// Participation (Altair and later)
#[superstruct(only(Altair, Merge, Capella, Eip4844))]
#[superstruct(only(Altair, Merge, Capella))]
pub previous_epoch_participation: VariableList<ParticipationFlags, T::ValidatorRegistryLimit>,
#[superstruct(only(Altair, Merge, Capella, Eip4844))]
#[superstruct(only(Altair, Merge, Capella))]
pub current_epoch_participation: VariableList<ParticipationFlags, T::ValidatorRegistryLimit>,
// Finality
@ -79,13 +79,13 @@ where
pub finalized_checkpoint: Checkpoint,
// Inactivity
#[superstruct(only(Altair, Merge, Capella, Eip4844))]
#[superstruct(only(Altair, Merge, Capella))]
pub inactivity_scores: VariableList<u64, T::ValidatorRegistryLimit>,
// Light-client sync committees
#[superstruct(only(Altair, Merge, Capella, Eip4844))]
#[superstruct(only(Altair, Merge, Capella))]
pub current_sync_committee: Arc<SyncCommittee<T>>,
#[superstruct(only(Altair, Merge, Capella, Eip4844))]
#[superstruct(only(Altair, Merge, Capella))]
pub next_sync_committee: Arc<SyncCommittee<T>>,
// Execution
@ -99,20 +99,15 @@ where
partial_getter(rename = "latest_execution_payload_header_capella")
)]
pub latest_execution_payload_header: ExecutionPayloadHeaderCapella<T>,
#[superstruct(
only(Eip4844),
partial_getter(rename = "latest_execution_payload_header_eip4844")
)]
pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844<T>,
// Capella
#[superstruct(only(Capella, Eip4844))]
#[superstruct(only(Capella))]
pub next_withdrawal_index: u64,
#[superstruct(only(Capella, Eip4844))]
#[superstruct(only(Capella))]
pub next_withdrawal_validator_index: u64,
#[ssz(skip_serializing, skip_deserializing)]
#[superstruct(only(Capella, Eip4844))]
#[superstruct(only(Capella))]
pub historical_summaries: Option<VariableList<HistoricalSummary, T::HistoricalRootsLimit>>,
}
@ -227,23 +222,6 @@ impl<T: EthSpec> PartialBeaconState<T> {
],
[historical_summaries]
),
BeaconState::Eip4844(s) => impl_from_state_forgetful!(
s,
outer,
Eip4844,
PartialBeaconStateEip4844,
[
previous_epoch_participation,
current_epoch_participation,
current_sync_committee,
next_sync_committee,
inactivity_scores,
latest_execution_payload_header,
next_withdrawal_index,
next_withdrawal_validator_index
],
[historical_summaries]
),
}
}
@ -472,22 +450,6 @@ impl<E: EthSpec> TryInto<BeaconState<E>> for PartialBeaconState<E> {
],
[historical_summaries]
),
PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!(
inner,
Eip4844,
BeaconStateEip4844,
[
previous_epoch_participation,
current_epoch_participation,
current_sync_committee,
next_sync_committee,
inactivity_scores,
latest_execution_payload_header,
next_withdrawal_index,
next_withdrawal_validator_index
],
[historical_summaries]
),
};
Ok(state)
}

View File

@ -1395,32 +1395,6 @@ impl BeaconNodeHttpClient {
self.get(path).await
}
/// `GET v1/validator/blocks_and_blobs/{slot}`
pub async fn get_validator_blocks_and_blobs<T: EthSpec, Payload: AbstractExecPayload<T>>(
&self,
slot: Slot,
randao_reveal: &SignatureBytes,
graffiti: Option<&Graffiti>,
) -> Result<ForkVersionedResponse<BlocksAndBlobs<T, Payload>>, Error> {
let mut path = self.eth_path(V1)?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("validator")
.push("blocks_and_blobs")
.push(&slot.to_string());
path.query_pairs_mut()
.append_pair("randao_reveal", &randao_reveal.to_string());
if let Some(graffiti) = graffiti {
path.query_pairs_mut()
.append_pair("graffiti", &graffiti.to_string());
}
self.get(path).await
}
/// `GET v2/validator/blinded_blocks/{slot}`
pub async fn get_validator_blinded_blocks<T: EthSpec, Payload: AbstractExecPayload<T>>(
&self,

View File

@ -1114,38 +1114,6 @@ pub struct LivenessResponseData {
pub is_live: bool,
}
#[derive(PartialEq, Debug, Serialize, Deserialize)]
#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload<T>")]
pub struct BlocksAndBlobs<T: EthSpec, Payload: AbstractExecPayload<T>> {
pub block: BeaconBlock<T, Payload>,
pub blobs: Vec<Blob<T>>,
pub kzg_aggregate_proof: KzgProof,
}
impl<T: EthSpec, Payload: AbstractExecPayload<T>> ForkVersionDeserialize
for BlocksAndBlobs<T, Payload>
{
fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>(
value: serde_json::value::Value,
fork_name: ForkName,
) -> Result<Self, D::Error> {
#[derive(Deserialize)]
#[serde(bound = "T: EthSpec")]
struct Helper<T: EthSpec> {
block: serde_json::Value,
blobs: Vec<Blob<T>>,
kzg_aggregate_proof: KzgProof,
}
let helper: Helper<T> = serde_json::from_value(value).map_err(serde::de::Error::custom)?;
Ok(Self {
block: BeaconBlock::deserialize_by_fork::<'de, D>(helper.block, fork_name)?,
blobs: helper.blobs,
kzg_aggregate_proof: helper.kzg_aggregate_proof,
})
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -39,9 +39,6 @@ BELLATRIX_FORK_EPOCH: 385536
# Capella
CAPELLA_FORK_VERSION: 0x03000064
CAPELLA_FORK_EPOCH: 18446744073709551615
# Eip4844
EIP4844_FORK_VERSION: 0x04000064
EIP4844_FORK_EPOCH: 18446744073709551615
# Sharding
SHARDING_FORK_VERSION: 0x03000064
SHARDING_FORK_EPOCH: 18446744073709551615

View File

@ -39,9 +39,6 @@ BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC
# Capella
CAPELLA_FORK_VERSION: 0x03000000
CAPELLA_FORK_EPOCH: 18446744073709551615
# Eip4844
EIP4844_FORK_VERSION: 0x04000000
EIP4844_FORK_EPOCH: 18446744073709551615
# Sharding
SHARDING_FORK_VERSION: 0x03000000
SHARDING_FORK_EPOCH: 18446744073709551615

View File

@ -32,10 +32,6 @@ TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615
CAPELLA_FORK_VERSION: 0x90000072
CAPELLA_FORK_EPOCH: 56832
# Eip4844
EIP4844_FORK_VERSION: 0x03001020
EIP4844_FORK_EPOCH: 18446744073709551615
# Sharding
SHARDING_FORK_VERSION: 0x04001020
SHARDING_FORK_EPOCH: 18446744073709551615

View File

@ -857,10 +857,7 @@ where
(parent_justified, parent_finalized)
} else {
let justification_and_finalization_state = match block {
// TODO(eip4844): Ensure that the final specification
// does not substantially modify per epoch processing.
BeaconBlockRef::Eip4844(_)
| BeaconBlockRef::Capella(_)
BeaconBlockRef::Capella(_)
| BeaconBlockRef::Merge(_)
| BeaconBlockRef::Altair(_) => {
let participation_cache =

View File

@ -50,12 +50,11 @@ pub fn slash_validator<T: EthSpec>(
validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?;
let proposer_reward = match state {
BeaconState::Base(_) => whistleblower_reward.safe_div(spec.proposer_reward_quotient)?,
BeaconState::Altair(_)
| BeaconState::Merge(_)
| BeaconState::Capella(_)
| BeaconState::Eip4844(_) => whistleblower_reward
.safe_mul(PROPOSER_WEIGHT)?
.safe_div(WEIGHT_DENOMINATOR)?,
BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => {
whistleblower_reward
.safe_mul(PROPOSER_WEIGHT)?
.safe_div(WEIGHT_DENOMINATOR)?
}
};
// Ensure the whistleblower index is in the validator registry.

View File

@ -2,9 +2,7 @@ use super::per_block_processing::{
errors::BlockProcessingError, process_operations::process_deposit,
};
use crate::common::DepositDataTree;
use crate::upgrade::{
upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_eip4844,
};
use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella};
use safe_arith::{ArithError, SafeArith};
use tree_hash::TreeHash;
use types::DEPOSIT_TREE_DEPTH;
@ -93,23 +91,6 @@ pub fn initialize_beacon_state_from_eth1<T: EthSpec>(
}
}
// Upgrade to eip4844 if configured from genesis
if spec
.eip4844_fork_epoch
.map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch())
{
upgrade_to_eip4844(&mut state, spec)?;
// Remove intermediate Capella fork from `state.fork`.
state.fork_mut().previous_version = spec.eip4844_fork_version;
// Override latest execution payload header.
// See https://github.com/ethereum/consensus-specs/blob/dev/specs/eip4844/beacon-chain.md#testing
if let Some(ExecutionPayloadHeader::Eip4844(header)) = execution_payload_header {
*state.latest_execution_payload_header_eip4844_mut()? = header;
}
}
// Now that we have our validators, initialize the caches (including the committees)
state.build_all_caches(spec)?;

View File

@ -13,7 +13,6 @@ pub use self::verify_attester_slashing::{
pub use self::verify_proposer_slashing::verify_proposer_slashing;
pub use altair::sync_committee::process_sync_aggregate;
pub use block_signature_verifier::{BlockSignatureVerifier, ParallelSignatureSets};
pub use eip4844::eip4844::process_blob_kzg_commitments;
pub use is_valid_indexed_attestation::is_valid_indexed_attestation;
pub use process_operations::process_operations;
pub use verify_attestation::{
@ -27,7 +26,6 @@ pub use verify_exit::verify_exit;
pub mod altair;
pub mod block_signature_verifier;
pub mod eip4844;
pub mod errors;
mod is_valid_indexed_attestation;
pub mod process_operations;
@ -180,12 +178,6 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
)?;
}
// Eip4844 specifications are not yet released so additional care is taken
// to ensure the code does not run in production.
if matches!(block, BeaconBlockRef::Eip4844(_)) {
process_blob_kzg_commitments(block.body())?;
}
Ok(())
}
@ -409,12 +401,6 @@ pub fn process_execution_payload<T: EthSpec, Payload: AbstractExecPayload<T>>(
_ => return Err(BlockProcessingError::IncorrectStateType),
}
}
ExecutionPayloadHeaderRefMut::Eip4844(header_mut) => {
match payload.to_execution_payload_header() {
ExecutionPayloadHeader::Eip4844(header) => *header_mut = header,
_ => return Err(BlockProcessingError::IncorrectStateType),
}
}
}
Ok(())
@ -527,7 +513,7 @@ pub fn process_withdrawals<T: EthSpec, Payload: AbstractExecPayload<T>>(
) -> Result<(), BlockProcessingError> {
match state {
BeaconState::Merge(_) => Ok(()),
BeaconState::Capella(_) | BeaconState::Eip4844(_) => {
BeaconState::Capella(_) => {
let expected_withdrawals = get_expected_withdrawals(state, spec)?;
let expected_root = expected_withdrawals.tree_hash_root();
let withdrawals_root = payload.withdrawals_root()?;

View File

@ -1,2 +0,0 @@
#[allow(clippy::module_inception)]
pub mod eip4844;

View File

@ -1,122 +0,0 @@
use crate::BlockProcessingError;
use eth2_hashing::hash_fixed;
use itertools::{EitherOrBoth, Itertools};
use safe_arith::SafeArith;
use ssz::Decode;
use ssz_types::VariableList;
use types::consts::eip4844::{BLOB_TX_TYPE, VERSIONED_HASH_VERSION_KZG};
use types::{
AbstractExecPayload, BeaconBlockBodyRef, EthSpec, ExecPayload, KzgCommitment, Transaction,
Transactions, VersionedHash,
};
pub fn process_blob_kzg_commitments<T: EthSpec, Payload: AbstractExecPayload<T>>(
block_body: BeaconBlockBodyRef<T, Payload>,
) -> Result<(), BlockProcessingError> {
if let (Ok(payload), Ok(kzg_commitments)) = (
block_body.execution_payload(),
block_body.blob_kzg_commitments(),
) {
if let Some(transactions) = payload.transactions() {
if !verify_kzg_commitments_against_transactions::<T>(transactions, kzg_commitments)? {
return Err(BlockProcessingError::BlobVersionHashMismatch);
}
}
}
Ok(())
}
pub fn verify_kzg_commitments_against_transactions<T: EthSpec>(
transactions: &Transactions<T>,
kzg_commitments: &VariableList<KzgCommitment, T::MaxBlobsPerBlock>,
) -> Result<bool, BlockProcessingError> {
let nested_iter = transactions
.into_iter()
.filter(|tx| {
tx.first()
.map(|tx_type| *tx_type == BLOB_TX_TYPE)
.unwrap_or(false)
})
.map(|tx| tx_peek_blob_versioned_hashes::<T>(tx));
itertools::process_results(nested_iter, |iter| {
let zipped_iter = iter
.flatten()
// Need to use `itertools::zip_longest` here because just zipping hides if one iter is shorter
// and `itertools::zip_eq` panics.
.zip_longest(kzg_commitments.into_iter())
.enumerate()
.map(|(index, next)| match next {
EitherOrBoth::Both(hash, commitment) => Ok((hash?, commitment)),
// The number of versioned hashes from the blob transactions exceeds the number of
// commitments in the block.
EitherOrBoth::Left(_) => Err(BlockProcessingError::BlobNumCommitmentsMismatch {
commitments_processed_in_block: index,
commitments_processed_in_transactions: index.safe_add(1)?,
}),
// The number of commitments in the block exceeds the number of versioned hashes
// in the blob transactions.
EitherOrBoth::Right(_) => Err(BlockProcessingError::BlobNumCommitmentsMismatch {
commitments_processed_in_block: index.safe_add(1)?,
commitments_processed_in_transactions: index,
}),
});
itertools::process_results(zipped_iter, |mut iter| {
iter.all(|(tx_versioned_hash, commitment)| {
tx_versioned_hash == kzg_commitment_to_versioned_hash(commitment)
})
})
})?
}
/// Only transactions of type `BLOB_TX_TYPE` should be passed into this function.
fn tx_peek_blob_versioned_hashes<T: EthSpec>(
opaque_tx: &Transaction<T::MaxBytesPerTransaction>,
) -> Result<
impl IntoIterator<Item = Result<VersionedHash, BlockProcessingError>> + '_,
BlockProcessingError,
> {
let tx_len = opaque_tx.len();
let message_offset = 1.safe_add(u32::from_ssz_bytes(opaque_tx.get(1..5).ok_or(
BlockProcessingError::BlobVersionHashIndexOutOfBounds {
length: tx_len,
index: 5,
},
)?)?)?;
let message_offset_usize = message_offset as usize;
// field offset: 32 + 8 + 32 + 32 + 8 + 4 + 32 + 4 + 4 + 32 = 188
let blob_versioned_hashes_offset = message_offset.safe_add(u32::from_ssz_bytes(
opaque_tx
.get(message_offset_usize.safe_add(188)?..message_offset_usize.safe_add(192)?)
.ok_or(BlockProcessingError::BlobVersionHashIndexOutOfBounds {
length: tx_len,
index: message_offset_usize.safe_add(192)?,
})?,
)?)?;
let num_hashes = tx_len
.safe_sub(blob_versioned_hashes_offset as usize)?
.safe_div(32)?;
Ok((0..num_hashes).into_iter().map(move |i| {
let next_version_hash_index =
(blob_versioned_hashes_offset as usize).safe_add(i.safe_mul(32)?)?;
let bytes = opaque_tx
.get(next_version_hash_index..next_version_hash_index.safe_add(32)?)
.ok_or(BlockProcessingError::BlobVersionHashIndexOutOfBounds {
length: tx_len,
index: (next_version_hash_index).safe_add(32)?,
})?;
Ok(VersionedHash::from_slice(bytes))
}))
}
fn kzg_commitment_to_versioned_hash(kzg_commitment: &KzgCommitment) -> VersionedHash {
let mut hashed_commitment = hash_fixed(&kzg_commitment.0);
hashed_commitment[0] = VERSIONED_HASH_VERSION_KZG;
VersionedHash::from(hashed_commitment)
}

View File

@ -82,18 +82,6 @@ pub enum BlockProcessingError {
expected: Hash256,
found: Hash256,
},
BlobVersionHashMismatch,
/// The number of commitments in blob transactions in the payload does not match the number
/// of commitments in the block.
BlobNumCommitmentsMismatch {
commitments_processed_in_block: usize,
/// This number depic
commitments_processed_in_transactions: usize,
},
BlobVersionHashIndexOutOfBounds {
index: usize,
length: usize,
},
WithdrawalCredentialsInvalid,
}

View File

@ -256,8 +256,7 @@ pub fn process_attestations<T: EthSpec, Payload: AbstractExecPayload<T>>(
}
BeaconBlockBodyRef::Altair(_)
| BeaconBlockBodyRef::Merge(_)
| BeaconBlockBodyRef::Capella(_)
| BeaconBlockBodyRef::Eip4844(_) => {
| BeaconBlockBodyRef::Capella(_) => {
altair::process_attestations(
state,
block_body.attestations(),

View File

@ -40,7 +40,7 @@ pub fn process_epoch<T: EthSpec>(
match state {
BeaconState::Base(_) => base::process_epoch(state, spec),
BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_epoch(state, spec),
BeaconState::Capella(_) | BeaconState::Eip4844(_) => capella::process_epoch(state, spec),
BeaconState::Capella(_) => capella::process_epoch(state, spec),
}
}

View File

@ -1,6 +1,4 @@
use crate::upgrade::{
upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_eip4844,
};
use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella};
use crate::{per_epoch_processing::EpochProcessingSummary, *};
use safe_arith::{ArithError, SafeArith};
use types::*;
@ -61,10 +59,6 @@ pub fn per_slot_processing<T: EthSpec>(
if spec.capella_fork_epoch == Some(state.current_epoch()) {
upgrade_to_capella(state, spec)?;
}
// Eip4844
if spec.eip4844_fork_epoch == Some(state.current_epoch()) {
upgrade_to_eip4844(state, spec)?;
}
}
Ok(summary)

View File

@ -1,9 +1,7 @@
pub mod altair;
pub mod capella;
pub mod eip4844;
pub mod merge;
pub use altair::upgrade_to_altair;
pub use capella::upgrade_to_capella;
pub use eip4844::upgrade_to_eip4844;
pub use merge::upgrade_to_bellatrix;

View File

@ -1,75 +0,0 @@
use std::mem;
use types::{BeaconState, BeaconStateEip4844, BeaconStateError as Error, ChainSpec, EthSpec, Fork};
/// Transform a `Capella` state into an `Eip4844` state.
pub fn upgrade_to_eip4844<E: EthSpec>(
pre_state: &mut BeaconState<E>,
spec: &ChainSpec,
) -> Result<(), Error> {
let epoch = pre_state.current_epoch();
let pre = pre_state.as_capella_mut()?;
let previous_fork_version = pre.fork.current_version;
// Where possible, use something like `mem::take` to move fields from behind the &mut
// reference. For other fields that don't have a good default value, use `clone`.
//
// Fixed size vectors get cloned because replacing them would require the same size
// allocation as cloning.
let post = BeaconState::Eip4844(BeaconStateEip4844 {
// Versioning
genesis_time: pre.genesis_time,
genesis_validators_root: pre.genesis_validators_root,
slot: pre.slot,
fork: Fork {
previous_version: previous_fork_version,
current_version: spec.eip4844_fork_version,
epoch,
},
// History
latest_block_header: pre.latest_block_header.clone(),
block_roots: pre.block_roots.clone(),
state_roots: pre.state_roots.clone(),
historical_roots: mem::take(&mut pre.historical_roots),
// Eth1
eth1_data: pre.eth1_data.clone(),
eth1_data_votes: mem::take(&mut pre.eth1_data_votes),
eth1_deposit_index: pre.eth1_deposit_index,
// Registry
validators: mem::take(&mut pre.validators),
balances: mem::take(&mut pre.balances),
// Randomness
randao_mixes: pre.randao_mixes.clone(),
// Slashings
slashings: pre.slashings.clone(),
// `Participation
previous_epoch_participation: mem::take(&mut pre.previous_epoch_participation),
current_epoch_participation: mem::take(&mut pre.current_epoch_participation),
// Finality
justification_bits: pre.justification_bits.clone(),
previous_justified_checkpoint: pre.previous_justified_checkpoint,
current_justified_checkpoint: pre.current_justified_checkpoint,
finalized_checkpoint: pre.finalized_checkpoint,
// Inactivity
inactivity_scores: mem::take(&mut pre.inactivity_scores),
// Sync committees
current_sync_committee: pre.current_sync_committee.clone(),
next_sync_committee: pre.next_sync_committee.clone(),
// Execution
latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_eip4844(),
// Capella
next_withdrawal_index: pre.next_withdrawal_index,
next_withdrawal_validator_index: pre.next_withdrawal_validator_index,
historical_summaries: pre.historical_summaries.clone(),
// Caches
total_active_balance: pre.total_active_balance,
committee_caches: mem::take(&mut pre.committee_caches),
pubkey_cache: mem::take(&mut pre.pubkey_cache),
exit_cache: mem::take(&mut pre.exit_cache),
tree_hash_cache: mem::take(&mut pre.tree_hash_cache),
});
*pre_state = post;
Ok(())
}

View File

@ -9,7 +9,6 @@ name = "benches"
harness = false
[dependencies]
serde-big-array = {version = "0.3.2", features = ["const-generics"]}
merkle_proof = { path = "../../consensus/merkle_proof" }
bls = { path = "../../crypto/bls", features = ["arbitrary"] }
compare_fields = { path = "../../common/compare_fields" }

View File

@ -1,6 +1,6 @@
use crate::beacon_block_body::{
BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyEip4844, BeaconBlockBodyMerge,
BeaconBlockBodyRef, BeaconBlockBodyRefMut,
BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, BeaconBlockBodyRef,
BeaconBlockBodyRefMut,
};
use crate::test_utils::TestRandom;
use crate::*;
@ -17,7 +17,7 @@ use tree_hash_derive::TreeHash;
/// A block of the `BeaconChain`.
#[superstruct(
variants(Base, Altair, Merge, Capella, Eip4844),
variants(Base, Altair, Merge, Capella),
variant_attributes(
derive(
Debug,
@ -72,8 +72,6 @@ pub struct BeaconBlock<T: EthSpec, Payload: AbstractExecPayload<T> = FullPayload
pub body: BeaconBlockBodyMerge<T, Payload>,
#[superstruct(only(Capella), partial_getter(rename = "body_capella"))]
pub body: BeaconBlockBodyCapella<T, Payload>,
#[superstruct(only(Eip4844), partial_getter(rename = "body_eip4844"))]
pub body: BeaconBlockBodyEip4844<T, Payload>,
}
pub type BlindedBeaconBlock<E> = BeaconBlock<E, BlindedPayload<E>>;
@ -126,9 +124,8 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlock<T, Payload> {
/// Usually it's better to prefer `from_ssz_bytes` which will decode the correct variant based
/// on the fork slot.
pub fn any_from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
BeaconBlockEip4844::from_ssz_bytes(bytes)
.map(BeaconBlock::Eip4844)
.or_else(|_| BeaconBlockCapella::from_ssz_bytes(bytes).map(BeaconBlock::Capella))
BeaconBlockCapella::from_ssz_bytes(bytes)
.map(BeaconBlock::Capella)
.or_else(|_| BeaconBlockMerge::from_ssz_bytes(bytes).map(BeaconBlock::Merge))
.or_else(|_| BeaconBlockAltair::from_ssz_bytes(bytes).map(BeaconBlock::Altair))
.or_else(|_| BeaconBlockBase::from_ssz_bytes(bytes).map(BeaconBlock::Base))
@ -206,7 +203,6 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlockRef<'a, T, Payl
BeaconBlockRef::Altair { .. } => ForkName::Altair,
BeaconBlockRef::Merge { .. } => ForkName::Merge,
BeaconBlockRef::Capella { .. } => ForkName::Capella,
BeaconBlockRef::Eip4844 { .. } => ForkName::Eip4844,
};
if fork_at_slot == object_fork {
@ -560,36 +556,6 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> EmptyBlock for BeaconBlockCape
}
}
impl<T: EthSpec, Payload: AbstractExecPayload<T>> EmptyBlock for BeaconBlockEip4844<T, Payload> {
/// Returns an empty Eip4844 block to be used during genesis.
fn empty(spec: &ChainSpec) -> Self {
BeaconBlockEip4844 {
slot: spec.genesis_slot,
proposer_index: 0,
parent_root: Hash256::zero(),
state_root: Hash256::zero(),
body: BeaconBlockBodyEip4844 {
randao_reveal: Signature::empty(),
eth1_data: Eth1Data {
deposit_root: Hash256::zero(),
block_hash: Hash256::zero(),
deposit_count: 0,
},
graffiti: Graffiti::default(),
proposer_slashings: VariableList::empty(),
attester_slashings: VariableList::empty(),
attestations: VariableList::empty(),
deposits: VariableList::empty(),
voluntary_exits: VariableList::empty(),
sync_aggregate: SyncAggregate::empty(),
execution_payload: Payload::Eip4844::default(),
bls_to_execution_changes: VariableList::empty(),
blob_kzg_commitments: VariableList::empty(),
},
}
}
}
// We can convert pre-Bellatrix blocks without payloads into blocks "with" payloads.
impl<E: EthSpec> From<BeaconBlockBase<E, BlindedPayload<E>>>
for BeaconBlockBase<E, FullPayload<E>>
@ -669,7 +635,6 @@ impl_from!(BeaconBlockBase, <E, FullPayload<E>>, <E, BlindedPayload<E>>, |body:
impl_from!(BeaconBlockAltair, <E, FullPayload<E>>, <E, BlindedPayload<E>>, |body: BeaconBlockBodyAltair<_, _>| body.into());
impl_from!(BeaconBlockMerge, <E, FullPayload<E>>, <E, BlindedPayload<E>>, |body: BeaconBlockBodyMerge<_, _>| body.into());
impl_from!(BeaconBlockCapella, <E, FullPayload<E>>, <E, BlindedPayload<E>>, |body: BeaconBlockBodyCapella<_, _>| body.into());
impl_from!(BeaconBlockEip4844, <E, FullPayload<E>>, <E, BlindedPayload<E>>, |body: BeaconBlockBodyEip4844<_, _>| body.into());
// We can clone blocks with payloads to blocks without payloads, without cloning the payload.
macro_rules! impl_clone_as_blinded {
@ -701,7 +666,6 @@ impl_clone_as_blinded!(BeaconBlockBase, <E, FullPayload<E>>, <E, BlindedPayload<
impl_clone_as_blinded!(BeaconBlockAltair, <E, FullPayload<E>>, <E, BlindedPayload<E>>);
impl_clone_as_blinded!(BeaconBlockMerge, <E, FullPayload<E>>, <E, BlindedPayload<E>>);
impl_clone_as_blinded!(BeaconBlockCapella, <E, FullPayload<E>>, <E, BlindedPayload<E>>);
impl_clone_as_blinded!(BeaconBlockEip4844, <E, FullPayload<E>>, <E, BlindedPayload<E>>);
// A reference to a full beacon block can be cloned into a blinded beacon block, without cloning the
// execution payload.
@ -817,25 +781,6 @@ mod tests {
});
}
#[test]
fn roundtrip_4844_block() {
let rng = &mut XorShiftRng::from_seed([42; 16]);
let spec = &ForkName::Eip4844.make_genesis_spec(MainnetEthSpec::default_spec());
let inner_block = BeaconBlockEip4844 {
slot: Slot::random_for_test(rng),
proposer_index: u64::random_for_test(rng),
parent_root: Hash256::random_for_test(rng),
state_root: Hash256::random_for_test(rng),
body: BeaconBlockBodyEip4844::random_for_test(rng),
};
let block = BeaconBlock::Eip4844(inner_block.clone());
test_ssz_tree_hash_pair_with(&block, &inner_block, |bytes| {
BeaconBlock::from_ssz_bytes(bytes, spec)
});
}
#[test]
fn decode_base_and_altair() {
type E = MainnetEthSpec;
@ -851,12 +796,9 @@ mod tests {
let altair_slot = altair_epoch.start_slot(E::slots_per_epoch());
let capella_epoch = altair_fork_epoch + 1;
let capella_slot = capella_epoch.start_slot(E::slots_per_epoch());
let eip4844_epoch = capella_epoch + 1;
let eip4844_slot = eip4844_epoch.start_slot(E::slots_per_epoch());
spec.altair_fork_epoch = Some(altair_epoch);
spec.capella_fork_epoch = Some(capella_epoch);
spec.eip4844_fork_epoch = Some(eip4844_epoch);
// BeaconBlockBase
{
@ -923,27 +865,5 @@ mod tests {
BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec)
.expect_err("bad capella block cannot be decoded");
}
// BeaconBlockEip4844
{
let good_block = BeaconBlock::Eip4844(BeaconBlockEip4844 {
slot: eip4844_slot,
..<_>::random_for_test(rng)
});
// It's invalid to have an Capella block with a epoch lower than the fork epoch.
let bad_block = {
let mut bad = good_block.clone();
*bad.slot_mut() = capella_slot;
bad
};
assert_eq!(
BeaconBlock::from_ssz_bytes(&good_block.as_ssz_bytes(), &spec)
.expect("good eip4844 block can be decoded"),
good_block
);
BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec)
.expect_err("bad eip4844 block cannot be decoded");
}
}
}

View File

@ -1,4 +1,3 @@
use crate::kzg_commitment::KzgCommitment;
use crate::test_utils::TestRandom;
use crate::*;
use derivative::Derivative;
@ -14,7 +13,7 @@ use tree_hash_derive::TreeHash;
///
/// This *superstruct* abstracts over the hard-fork.
#[superstruct(
variants(Base, Altair, Merge, Capella, Eip4844),
variants(Base, Altair, Merge, Capella),
variant_attributes(
derive(
Debug,
@ -52,7 +51,7 @@ pub struct BeaconBlockBody<T: EthSpec, Payload: AbstractExecPayload<T> = FullPay
pub attestations: VariableList<Attestation<T>, T::MaxAttestations>,
pub deposits: VariableList<Deposit, T::MaxDeposits>,
pub voluntary_exits: VariableList<SignedVoluntaryExit, T::MaxVoluntaryExits>,
#[superstruct(only(Altair, Merge, Capella, Eip4844))]
#[superstruct(only(Altair, Merge, Capella))]
pub sync_aggregate: SyncAggregate<T>,
// We flatten the execution payload so that serde can use the name of the inner type,
// either `execution_payload` for full payloads, or `execution_payload_header` for blinded
@ -63,14 +62,9 @@ pub struct BeaconBlockBody<T: EthSpec, Payload: AbstractExecPayload<T> = FullPay
#[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))]
#[serde(flatten)]
pub execution_payload: Payload::Capella,
#[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))]
#[serde(flatten)]
pub execution_payload: Payload::Eip4844,
#[superstruct(only(Capella, Eip4844))]
#[superstruct(only(Capella))]
pub bls_to_execution_changes:
VariableList<SignedBlsToExecutionChange, T::MaxBlsToExecutionChanges>,
#[superstruct(only(Eip4844))]
pub blob_kzg_commitments: VariableList<KzgCommitment, T::MaxBlobsPerBlock>,
#[superstruct(only(Base, Altair))]
#[ssz(skip_serializing, skip_deserializing)]
#[tree_hash(skip_hashing)]
@ -91,7 +85,6 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlockBodyRef<'a, T,
Self::Base(_) | Self::Altair(_) => Err(Error::IncorrectStateVariant),
Self::Merge(body) => Ok(Payload::Ref::from(&body.execution_payload)),
Self::Capella(body) => Ok(Payload::Ref::from(&body.execution_payload)),
Self::Eip4844(body) => Ok(Payload::Ref::from(&body.execution_payload)),
}
}
}
@ -104,7 +97,6 @@ impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> {
BeaconBlockBodyRef::Altair { .. } => ForkName::Altair,
BeaconBlockBodyRef::Merge { .. } => ForkName::Merge,
BeaconBlockBodyRef::Capella { .. } => ForkName::Capella,
BeaconBlockBodyRef::Eip4844 { .. } => ForkName::Eip4844,
}
}
}
@ -329,50 +321,6 @@ impl<E: EthSpec> From<BeaconBlockBodyCapella<E, FullPayload<E>>>
}
}
impl<E: EthSpec> From<BeaconBlockBodyEip4844<E, FullPayload<E>>>
for (
BeaconBlockBodyEip4844<E, BlindedPayload<E>>,
Option<ExecutionPayloadEip4844<E>>,
)
{
fn from(body: BeaconBlockBodyEip4844<E, FullPayload<E>>) -> Self {
let BeaconBlockBodyEip4844 {
randao_reveal,
eth1_data,
graffiti,
proposer_slashings,
attester_slashings,
attestations,
deposits,
voluntary_exits,
sync_aggregate,
execution_payload: FullPayloadEip4844 { execution_payload },
bls_to_execution_changes,
blob_kzg_commitments,
} = body;
(
BeaconBlockBodyEip4844 {
randao_reveal,
eth1_data,
graffiti,
proposer_slashings,
attester_slashings,
attestations,
deposits,
voluntary_exits,
sync_aggregate,
execution_payload: BlindedPayloadEip4844 {
execution_payload_header: From::from(&execution_payload),
},
bls_to_execution_changes,
blob_kzg_commitments,
},
Some(execution_payload),
)
}
}
// We can clone a full block into a blinded block, without cloning the payload.
impl<E: EthSpec> BeaconBlockBodyBase<E, FullPayload<E>> {
pub fn clone_as_blinded(&self) -> BeaconBlockBodyBase<E, BlindedPayload<E>> {
@ -454,42 +402,6 @@ impl<E: EthSpec> BeaconBlockBodyCapella<E, FullPayload<E>> {
}
}
impl<E: EthSpec> BeaconBlockBodyEip4844<E, FullPayload<E>> {
pub fn clone_as_blinded(&self) -> BeaconBlockBodyEip4844<E, BlindedPayload<E>> {
let BeaconBlockBodyEip4844 {
randao_reveal,
eth1_data,
graffiti,
proposer_slashings,
attester_slashings,
attestations,
deposits,
voluntary_exits,
sync_aggregate,
execution_payload: FullPayloadEip4844 { execution_payload },
bls_to_execution_changes,
blob_kzg_commitments,
} = self;
BeaconBlockBodyEip4844 {
randao_reveal: randao_reveal.clone(),
eth1_data: eth1_data.clone(),
graffiti: *graffiti,
proposer_slashings: proposer_slashings.clone(),
attester_slashings: attester_slashings.clone(),
attestations: attestations.clone(),
deposits: deposits.clone(),
voluntary_exits: voluntary_exits.clone(),
sync_aggregate: sync_aggregate.clone(),
execution_payload: BlindedPayloadEip4844 {
execution_payload_header: execution_payload.into(),
},
bls_to_execution_changes: bls_to_execution_changes.clone(),
blob_kzg_commitments: blob_kzg_commitments.clone(),
}
}
}
impl<E: EthSpec> From<BeaconBlockBody<E, FullPayload<E>>>
for (
BeaconBlockBody<E, BlindedPayload<E>>,

View File

@ -176,7 +176,7 @@ impl From<BeaconStateHash> for Hash256 {
/// The state of the `BeaconChain` at some slot.
#[superstruct(
variants(Base, Altair, Merge, Capella, Eip4844),
variants(Base, Altair, Merge, Capella),
variant_attributes(
derive(
Derivative,
@ -256,9 +256,9 @@ where
pub current_epoch_attestations: VariableList<PendingAttestation<T>, T::MaxPendingAttestations>,
// Participation (Altair and later)
#[superstruct(only(Altair, Merge, Capella, Eip4844))]
#[superstruct(only(Altair, Merge, Capella))]
pub previous_epoch_participation: VariableList<ParticipationFlags, T::ValidatorRegistryLimit>,
#[superstruct(only(Altair, Merge, Capella, Eip4844))]
#[superstruct(only(Altair, Merge, Capella))]
pub current_epoch_participation: VariableList<ParticipationFlags, T::ValidatorRegistryLimit>,
// Finality
@ -273,13 +273,13 @@ where
// Inactivity
#[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")]
#[superstruct(only(Altair, Merge, Capella, Eip4844))]
#[superstruct(only(Altair, Merge, Capella))]
pub inactivity_scores: VariableList<u64, T::ValidatorRegistryLimit>,
// Light-client sync committees
#[superstruct(only(Altair, Merge, Capella, Eip4844))]
#[superstruct(only(Altair, Merge, Capella))]
pub current_sync_committee: Arc<SyncCommittee<T>>,
#[superstruct(only(Altair, Merge, Capella, Eip4844))]
#[superstruct(only(Altair, Merge, Capella))]
pub next_sync_committee: Arc<SyncCommittee<T>>,
// Execution
@ -293,21 +293,16 @@ where
partial_getter(rename = "latest_execution_payload_header_capella")
)]
pub latest_execution_payload_header: ExecutionPayloadHeaderCapella<T>,
#[superstruct(
only(Eip4844),
partial_getter(rename = "latest_execution_payload_header_eip4844")
)]
pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844<T>,
// Capella
#[superstruct(only(Capella, Eip4844), partial_getter(copy))]
#[superstruct(only(Capella), partial_getter(copy))]
#[serde(with = "eth2_serde_utils::quoted_u64")]
pub next_withdrawal_index: u64,
#[superstruct(only(Capella, Eip4844), partial_getter(copy))]
#[superstruct(only(Capella), partial_getter(copy))]
#[serde(with = "eth2_serde_utils::quoted_u64")]
pub next_withdrawal_validator_index: u64,
// Deep history valid from Capella onwards.
#[superstruct(only(Capella, Eip4844))]
#[superstruct(only(Capella))]
pub historical_summaries: VariableList<HistoricalSummary, T::HistoricalRootsLimit>,
// Caching (not in the spec)
@ -420,7 +415,6 @@ impl<T: EthSpec> BeaconState<T> {
BeaconState::Altair { .. } => ForkName::Altair,
BeaconState::Merge { .. } => ForkName::Merge,
BeaconState::Capella { .. } => ForkName::Capella,
BeaconState::Eip4844 { .. } => ForkName::Eip4844,
};
if fork_at_slot == object_fork {
@ -720,9 +714,6 @@ impl<T: EthSpec> BeaconState<T> {
BeaconState::Capella(state) => Ok(ExecutionPayloadHeaderRef::Capella(
&state.latest_execution_payload_header,
)),
BeaconState::Eip4844(state) => Ok(ExecutionPayloadHeaderRef::Eip4844(
&state.latest_execution_payload_header,
)),
}
}
@ -737,9 +728,6 @@ impl<T: EthSpec> BeaconState<T> {
BeaconState::Capella(state) => Ok(ExecutionPayloadHeaderRefMut::Capella(
&mut state.latest_execution_payload_header,
)),
BeaconState::Eip4844(state) => Ok(ExecutionPayloadHeaderRefMut::Eip4844(
&mut state.latest_execution_payload_header,
)),
}
}
@ -1168,7 +1156,6 @@ impl<T: EthSpec> BeaconState<T> {
BeaconState::Altair(state) => (&mut state.validators, &mut state.balances),
BeaconState::Merge(state) => (&mut state.validators, &mut state.balances),
BeaconState::Capella(state) => (&mut state.validators, &mut state.balances),
BeaconState::Eip4844(state) => (&mut state.validators, &mut state.balances),
}
}
@ -1366,7 +1353,6 @@ impl<T: EthSpec> BeaconState<T> {
BeaconState::Altair(state) => Ok(&mut state.current_epoch_participation),
BeaconState::Merge(state) => Ok(&mut state.current_epoch_participation),
BeaconState::Capella(state) => Ok(&mut state.current_epoch_participation),
BeaconState::Eip4844(state) => Ok(&mut state.current_epoch_participation),
}
} else if epoch == self.previous_epoch() {
match self {
@ -1374,7 +1360,6 @@ impl<T: EthSpec> BeaconState<T> {
BeaconState::Altair(state) => Ok(&mut state.previous_epoch_participation),
BeaconState::Merge(state) => Ok(&mut state.previous_epoch_participation),
BeaconState::Capella(state) => Ok(&mut state.previous_epoch_participation),
BeaconState::Eip4844(state) => Ok(&mut state.previous_epoch_participation),
}
} else {
Err(BeaconStateError::EpochOutOfBounds)
@ -1680,7 +1665,6 @@ impl<T: EthSpec> BeaconState<T> {
BeaconState::Altair(inner) => BeaconState::Altair(inner.clone()),
BeaconState::Merge(inner) => BeaconState::Merge(inner.clone()),
BeaconState::Capella(inner) => BeaconState::Capella(inner.clone()),
BeaconState::Eip4844(inner) => BeaconState::Eip4844(inner.clone()),
};
if config.committee_caches {
*res.committee_caches_mut() = self.committee_caches().clone();
@ -1849,7 +1833,6 @@ impl<T: EthSpec> CompareFields for BeaconState<T> {
(BeaconState::Altair(x), BeaconState::Altair(y)) => x.compare_fields(y),
(BeaconState::Merge(x), BeaconState::Merge(y)) => x.compare_fields(y),
(BeaconState::Capella(x), BeaconState::Capella(y)) => x.compare_fields(y),
(BeaconState::Eip4844(x), BeaconState::Eip4844(y)) => x.compare_fields(y),
_ => panic!("compare_fields: mismatched state variants",),
}
}

View File

@ -1,43 +0,0 @@
use crate::kzg_proof::KzgProof;
use crate::{Blob, EthSpec, Hash256, SignedRoot, Slot};
use serde_derive::{Deserialize, Serialize};
use ssz::Encode;
use ssz_derive::{Decode, Encode};
use ssz_types::VariableList;
use tree_hash_derive::TreeHash;
#[derive(
Debug,
Clone,
Serialize,
Deserialize,
Encode,
Decode,
TreeHash,
PartialEq,
Default,
arbitrary::Arbitrary,
)]
#[serde(bound = "T: EthSpec")]
#[arbitrary(bound = "T: EthSpec")]
pub struct BlobsSidecar<T: EthSpec> {
pub beacon_block_root: Hash256,
pub beacon_block_slot: Slot,
pub blobs: VariableList<Blob<T>, T::MaxBlobsPerBlock>,
pub kzg_aggregate_proof: KzgProof,
}
impl<T: EthSpec> SignedRoot for BlobsSidecar<T> {}
impl<T: EthSpec> BlobsSidecar<T> {
pub fn empty() -> Self {
Self::default()
}
#[allow(clippy::integer_arithmetic)]
pub fn max_size() -> usize {
// Fixed part
Self::empty().as_ssz_bytes().len()
// Max size of variable length `blobs` field
+ (T::max_blobs_per_block() * <Blob<T> as Encode>::ssz_fixed_len())
}
}

View File

@ -14,7 +14,6 @@ pub enum Domain {
BlsToExecutionChange,
BeaconProposer,
BeaconAttester,
BlobsSideCar,
Randao,
Deposit,
VoluntaryExit,
@ -100,7 +99,6 @@ pub struct ChainSpec {
*/
pub(crate) domain_beacon_proposer: u32,
pub(crate) domain_beacon_attester: u32,
pub(crate) domain_blobs_sidecar: u32,
pub(crate) domain_randao: u32,
pub(crate) domain_deposit: u32,
pub(crate) domain_voluntary_exit: u32,
@ -161,12 +159,6 @@ pub struct ChainSpec {
pub capella_fork_epoch: Option<Epoch>,
pub max_validators_per_withdrawals_sweep: u64,
/*
* Eip4844 hard fork params
*/
pub eip4844_fork_version: [u8; 4],
pub eip4844_fork_epoch: Option<Epoch>,
/*
* Networking
*/
@ -255,16 +247,13 @@ impl ChainSpec {
/// Returns the name of the fork which is active at `epoch`.
pub fn fork_name_at_epoch(&self, epoch: Epoch) -> ForkName {
match self.eip4844_fork_epoch {
Some(fork_epoch) if epoch >= fork_epoch => ForkName::Eip4844,
_ => match self.capella_fork_epoch {
Some(fork_epoch) if epoch >= fork_epoch => ForkName::Capella,
_ => match self.bellatrix_fork_epoch {
Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge,
_ => match self.altair_fork_epoch {
Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair,
_ => ForkName::Base,
},
match self.capella_fork_epoch {
Some(fork_epoch) if epoch >= fork_epoch => ForkName::Capella,
_ => match self.bellatrix_fork_epoch {
Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge,
_ => match self.altair_fork_epoch {
Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair,
_ => ForkName::Base,
},
},
}
@ -277,7 +266,6 @@ impl ChainSpec {
ForkName::Altair => self.altair_fork_version,
ForkName::Merge => self.bellatrix_fork_version,
ForkName::Capella => self.capella_fork_version,
ForkName::Eip4844 => self.eip4844_fork_version,
}
}
@ -288,7 +276,6 @@ impl ChainSpec {
ForkName::Altair => self.altair_fork_epoch,
ForkName::Merge => self.bellatrix_fork_epoch,
ForkName::Capella => self.capella_fork_epoch,
ForkName::Eip4844 => self.eip4844_fork_epoch,
}
}
@ -299,7 +286,6 @@ impl ChainSpec {
BeaconState::Altair(_) => self.inactivity_penalty_quotient_altair,
BeaconState::Merge(_) => self.inactivity_penalty_quotient_bellatrix,
BeaconState::Capella(_) => self.inactivity_penalty_quotient_bellatrix,
BeaconState::Eip4844(_) => self.inactivity_penalty_quotient_bellatrix,
}
}
@ -313,7 +299,6 @@ impl ChainSpec {
BeaconState::Altair(_) => self.proportional_slashing_multiplier_altair,
BeaconState::Merge(_) => self.proportional_slashing_multiplier_bellatrix,
BeaconState::Capella(_) => self.proportional_slashing_multiplier_bellatrix,
BeaconState::Eip4844(_) => self.proportional_slashing_multiplier_bellatrix,
}
}
@ -327,7 +312,6 @@ impl ChainSpec {
BeaconState::Altair(_) => self.min_slashing_penalty_quotient_altair,
BeaconState::Merge(_) => self.min_slashing_penalty_quotient_bellatrix,
BeaconState::Capella(_) => self.min_slashing_penalty_quotient_bellatrix,
BeaconState::Eip4844(_) => self.min_slashing_penalty_quotient_bellatrix,
}
}
@ -366,7 +350,6 @@ impl ChainSpec {
match domain {
Domain::BeaconProposer => self.domain_beacon_proposer,
Domain::BeaconAttester => self.domain_beacon_attester,
Domain::BlobsSideCar => self.domain_blobs_sidecar,
Domain::Randao => self.domain_randao,
Domain::Deposit => self.domain_deposit,
Domain::VoluntaryExit => self.domain_voluntary_exit,
@ -574,7 +557,6 @@ impl ChainSpec {
domain_voluntary_exit: 4,
domain_selection_proof: 5,
domain_aggregate_and_proof: 6,
domain_blobs_sidecar: 10, // 0x0a000000
/*
* Fork choice
@ -636,12 +618,6 @@ impl ChainSpec {
capella_fork_epoch: None,
max_validators_per_withdrawals_sweep: 16384,
/*
* Eip4844 hard fork params
*/
eip4844_fork_version: [0x04, 0x00, 0x00, 0x00],
eip4844_fork_epoch: None,
/*
* Network specific
*/
@ -709,9 +685,6 @@ impl ChainSpec {
capella_fork_version: [0x03, 0x00, 0x00, 0x01],
capella_fork_epoch: None,
max_validators_per_withdrawals_sweep: 16,
// Eip4844
eip4844_fork_version: [0x04, 0x00, 0x00, 0x01],
eip4844_fork_epoch: None,
// Other
network_id: 2, // lighthouse testnet network id
deposit_chain_id: 5,
@ -809,7 +782,6 @@ impl ChainSpec {
domain_voluntary_exit: 4,
domain_selection_proof: 5,
domain_aggregate_and_proof: 6,
domain_blobs_sidecar: 10,
/*
* Fork choice
@ -873,12 +845,6 @@ impl ChainSpec {
capella_fork_epoch: None,
max_validators_per_withdrawals_sweep: 16384,
/*
* Eip4844 hard fork params
*/
eip4844_fork_version: [0x04, 0x00, 0x00, 0x64],
eip4844_fork_epoch: None,
/*
* Network specific
*/
@ -970,14 +936,6 @@ pub struct Config {
#[serde(deserialize_with = "deserialize_fork_epoch")]
pub capella_fork_epoch: Option<MaybeQuoted<Epoch>>,
#[serde(default = "default_eip4844_fork_version")]
#[serde(with = "eth2_serde_utils::bytes_4_hex")]
eip4844_fork_version: [u8; 4],
#[serde(default)]
#[serde(serialize_with = "serialize_fork_epoch")]
#[serde(deserialize_with = "deserialize_fork_epoch")]
pub eip4844_fork_epoch: Option<MaybeQuoted<Epoch>>,
#[serde(with = "eth2_serde_utils::quoted_u64")]
seconds_per_slot: u64,
#[serde(with = "eth2_serde_utils::quoted_u64")]
@ -1020,11 +978,6 @@ fn default_capella_fork_version() -> [u8; 4] {
[0xff, 0xff, 0xff, 0xff]
}
fn default_eip4844_fork_version() -> [u8; 4] {
// This value shouldn't be used.
[0xff, 0xff, 0xff, 0xff]
}
/// Placeholder value: 2^256-2^10 (115792089237316195423570985008687907853269984665640564039457584007913129638912).
///
/// Taken from https://github.com/ethereum/consensus-specs/blob/d5e4828aecafaf1c57ef67a5f23c4ae7b08c5137/configs/mainnet.yaml#L15-L16
@ -1125,10 +1078,6 @@ impl Config {
capella_fork_epoch: spec
.capella_fork_epoch
.map(|epoch| MaybeQuoted { value: epoch }),
eip4844_fork_version: spec.eip4844_fork_version,
eip4844_fork_epoch: spec
.eip4844_fork_epoch
.map(|epoch| MaybeQuoted { value: epoch }),
seconds_per_slot: spec.seconds_per_slot,
seconds_per_eth1_block: spec.seconds_per_eth1_block,
@ -1176,8 +1125,6 @@ impl Config {
bellatrix_fork_version,
capella_fork_epoch,
capella_fork_version,
eip4844_fork_epoch,
eip4844_fork_version,
seconds_per_slot,
seconds_per_eth1_block,
min_validator_withdrawability_delay,
@ -1210,8 +1157,6 @@ impl Config {
bellatrix_fork_version,
capella_fork_epoch: capella_fork_epoch.map(|q| q.value),
capella_fork_version,
eip4844_fork_epoch: eip4844_fork_epoch.map(|q| q.value),
eip4844_fork_version,
seconds_per_slot,
seconds_per_eth1_block,
min_validator_withdrawability_delay,
@ -1285,7 +1230,6 @@ mod tests {
test_domain(Domain::BeaconProposer, spec.domain_beacon_proposer, &spec);
test_domain(Domain::BeaconAttester, spec.domain_beacon_attester, &spec);
test_domain(Domain::BlobsSideCar, spec.domain_blobs_sidecar, &spec);
test_domain(Domain::Randao, spec.domain_randao, &spec);
test_domain(Domain::Deposit, spec.domain_deposit, &spec);
test_domain(Domain::VoluntaryExit, spec.domain_voluntary_exit, &spec);
@ -1310,8 +1254,6 @@ mod tests {
spec.domain_bls_to_execution_change,
&spec,
);
test_domain(Domain::BlobsSideCar, spec.domain_blobs_sidecar, &spec);
}
fn apply_bit_mask(domain_bytes: [u8; 4], spec: &ChainSpec) -> u32 {

View File

@ -78,7 +78,6 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap<String, Value> {
"bls_withdrawal_prefix".to_uppercase() => u8_hex(spec.bls_withdrawal_prefix_byte),
"domain_beacon_proposer".to_uppercase() => u32_hex(spec.domain_beacon_proposer),
"domain_beacon_attester".to_uppercase() => u32_hex(spec.domain_beacon_attester),
"domain_blobs_sidecar".to_uppercase() => u32_hex(spec.domain_blobs_sidecar),
"domain_randao".to_uppercase()=> u32_hex(spec.domain_randao),
"domain_deposit".to_uppercase()=> u32_hex(spec.domain_deposit),
"domain_voluntary_exit".to_uppercase() => u32_hex(spec.domain_voluntary_exit),

View File

@ -22,17 +22,3 @@ pub mod altair {
pub mod merge {
pub const INTERVALS_PER_SLOT: u64 = 3;
}
pub mod eip4844 {
use crate::Uint256;
use lazy_static::lazy_static;
lazy_static! {
pub static ref BLS_MODULUS: Uint256 = Uint256::from_dec_str(
"52435875175126190479447740508185965837690552500527637822603658699938581184513"
)
.expect("should initialize BLS_MODULUS");
}
pub const BLOB_TX_TYPE: u8 = 5;
pub const VERSIONED_HASH_VERSION_KZG: u8 = 1;
}

View File

@ -102,11 +102,6 @@ pub trait EthSpec:
*/
type MaxBlsToExecutionChanges: Unsigned + Clone + Sync + Send + Debug + PartialEq;
type MaxWithdrawalsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq;
/*
* New in Eip4844
*/
type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq;
type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq;
/*
* Derived values (set these CAREFULLY)
*/
@ -244,11 +239,6 @@ pub trait EthSpec:
fn max_withdrawals_per_payload() -> usize {
Self::MaxWithdrawalsPerPayload::to_usize()
}
/// Returns the `MAX_BLOBS_PER_BLOCK` constant for this specification.
fn max_blobs_per_block() -> usize {
Self::MaxBlobsPerBlock::to_usize()
}
}
/// Macro to inherit some type values from another EthSpec.
@ -288,8 +278,6 @@ impl EthSpec for MainnetEthSpec {
type GasLimitDenominator = U1024;
type MinGasLimit = U5000;
type MaxExtraDataBytes = U32;
type MaxBlobsPerBlock = U16; // 2**4 = 16
type FieldElementsPerBlob = U4096;
type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count
type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch
type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch
@ -340,9 +328,7 @@ impl EthSpec for MinimalEthSpec {
GasLimitDenominator,
MinGasLimit,
MaxExtraDataBytes,
MaxBlsToExecutionChanges,
MaxBlobsPerBlock,
FieldElementsPerBlob
MaxBlsToExecutionChanges
});
fn default_spec() -> ChainSpec {
@ -388,8 +374,6 @@ impl EthSpec for GnosisEthSpec {
type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch
type MaxBlsToExecutionChanges = U16;
type MaxWithdrawalsPerPayload = U16;
type MaxBlobsPerBlock = U16; // 2**4 = 16
type FieldElementsPerBlob = U4096;
fn default_spec() -> ChainSpec {
ChainSpec::gnosis()

View File

@ -15,7 +15,7 @@ pub type Transactions<T> = VariableList<
pub type Withdrawals<T> = VariableList<Withdrawal, <T as EthSpec>::MaxWithdrawalsPerPayload>;
#[superstruct(
variants(Merge, Capella, Eip4844),
variants(Merge, Capella),
variant_attributes(
derive(
Default,
@ -77,15 +77,11 @@ pub struct ExecutionPayload<T: EthSpec> {
#[serde(with = "eth2_serde_utils::quoted_u256")]
#[superstruct(getter(copy))]
pub base_fee_per_gas: Uint256,
#[superstruct(only(Eip4844))]
#[serde(with = "eth2_serde_utils::quoted_u256")]
#[superstruct(getter(copy))]
pub excess_data_gas: Uint256,
#[superstruct(getter(copy))]
pub block_hash: ExecutionBlockHash,
#[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")]
pub transactions: Transactions<T>,
#[superstruct(only(Capella, Eip4844))]
#[superstruct(only(Capella))]
pub withdrawals: Withdrawals<T>,
}
@ -107,7 +103,6 @@ impl<T: EthSpec> ExecutionPayload<T> {
))),
ForkName::Merge => ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge),
ForkName::Capella => ExecutionPayloadCapella::from_ssz_bytes(bytes).map(Self::Capella),
ForkName::Eip4844 => ExecutionPayloadEip4844::from_ssz_bytes(bytes).map(Self::Eip4844),
}
}
@ -134,19 +129,6 @@ impl<T: EthSpec> ExecutionPayload<T> {
// Max size of variable length `withdrawals` field
+ (T::max_withdrawals_per_payload() * <Withdrawal as Encode>::ssz_fixed_len())
}
#[allow(clippy::integer_arithmetic)]
/// Returns the maximum size of an execution payload.
pub fn max_execution_payload_eip4844_size() -> usize {
// Fixed part
ExecutionPayloadEip4844::<T>::default().as_ssz_bytes().len()
// Max size of variable length `extra_data` field
+ (T::max_extra_data_bytes() * <u8 as Encode>::ssz_fixed_len())
// Max size of variable length `transactions` field
+ (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction()))
// Max size of variable length `withdrawals` field
+ (T::max_withdrawals_per_payload() * <Withdrawal as Encode>::ssz_fixed_len())
}
}
impl<T: EthSpec> ForkVersionDeserialize for ExecutionPayload<T> {
@ -161,7 +143,6 @@ impl<T: EthSpec> ForkVersionDeserialize for ExecutionPayload<T> {
Ok(match fork_name {
ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?),
ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?),
ForkName::Eip4844 => Self::Eip4844(serde_json::from_value(value).map_err(convert_err)?),
ForkName::Base | ForkName::Altair => {
return Err(serde::de::Error::custom(format!(
"ExecutionPayload failed to deserialize: unsupported fork '{}'",

View File

@ -9,7 +9,7 @@ use tree_hash_derive::TreeHash;
use BeaconStateError;
#[superstruct(
variants(Merge, Capella, Eip4844),
variants(Merge, Capella),
variant_attributes(
derive(
Default,
@ -70,15 +70,11 @@ pub struct ExecutionPayloadHeader<T: EthSpec> {
#[serde(with = "eth2_serde_utils::quoted_u256")]
#[superstruct(getter(copy))]
pub base_fee_per_gas: Uint256,
#[superstruct(only(Eip4844))]
#[serde(with = "eth2_serde_utils::quoted_u256")]
#[superstruct(getter(copy))]
pub excess_data_gas: Uint256,
#[superstruct(getter(copy))]
pub block_hash: ExecutionBlockHash,
#[superstruct(getter(copy))]
pub transactions_root: Hash256,
#[superstruct(only(Capella, Eip4844))]
#[superstruct(only(Capella))]
#[superstruct(getter(copy))]
pub withdrawals_root: Hash256,
}
@ -97,9 +93,6 @@ impl<T: EthSpec> ExecutionPayloadHeader<T> {
ForkName::Capella => {
ExecutionPayloadHeaderCapella::from_ssz_bytes(bytes).map(Self::Capella)
}
ForkName::Eip4844 => {
ExecutionPayloadHeaderEip4844::from_ssz_bytes(bytes).map(Self::Eip4844)
}
}
}
}
@ -135,30 +128,6 @@ impl<T: EthSpec> ExecutionPayloadHeaderMerge<T> {
}
}
impl<T: EthSpec> ExecutionPayloadHeaderCapella<T> {
pub fn upgrade_to_eip4844(&self) -> ExecutionPayloadHeaderEip4844<T> {
ExecutionPayloadHeaderEip4844 {
parent_hash: self.parent_hash,
fee_recipient: self.fee_recipient,
state_root: self.state_root,
receipts_root: self.receipts_root,
logs_bloom: self.logs_bloom.clone(),
prev_randao: self.prev_randao,
block_number: self.block_number,
gas_limit: self.gas_limit,
gas_used: self.gas_used,
timestamp: self.timestamp,
extra_data: self.extra_data.clone(),
base_fee_per_gas: self.base_fee_per_gas,
// TODO: verify if this is correct
excess_data_gas: Uint256::zero(),
block_hash: self.block_hash,
transactions_root: self.transactions_root,
withdrawals_root: self.withdrawals_root,
}
}
}
impl<'a, T: EthSpec> From<&'a ExecutionPayloadMerge<T>> for ExecutionPayloadHeaderMerge<T> {
fn from(payload: &'a ExecutionPayloadMerge<T>) -> Self {
Self {
@ -201,29 +170,6 @@ impl<'a, T: EthSpec> From<&'a ExecutionPayloadCapella<T>> for ExecutionPayloadHe
}
}
impl<'a, T: EthSpec> From<&'a ExecutionPayloadEip4844<T>> for ExecutionPayloadHeaderEip4844<T> {
fn from(payload: &'a ExecutionPayloadEip4844<T>) -> Self {
Self {
parent_hash: payload.parent_hash,
fee_recipient: payload.fee_recipient,
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom.clone(),
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,
timestamp: payload.timestamp,
extra_data: payload.extra_data.clone(),
base_fee_per_gas: payload.base_fee_per_gas,
excess_data_gas: payload.excess_data_gas,
block_hash: payload.block_hash,
transactions_root: payload.transactions.tree_hash_root(),
withdrawals_root: payload.withdrawals.tree_hash_root(),
}
}
}
// These impls are required to work around an inelegance in `to_execution_payload_header`.
// They only clone headers so they should be relatively cheap.
impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderMerge<T> {
@ -238,12 +184,6 @@ impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderCapella<T> {
}
}
impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderEip4844<T> {
fn from(payload: &'a Self) -> Self {
payload.clone()
}
}
impl<'a, T: EthSpec> From<ExecutionPayloadRef<'a, T>> for ExecutionPayloadHeader<T> {
fn from(payload: ExecutionPayloadRef<'a, T>) -> Self {
map_execution_payload_ref_into_execution_payload_header!(
@ -274,17 +214,6 @@ impl<T: EthSpec> TryFrom<ExecutionPayloadHeader<T>> for ExecutionPayloadHeaderCa
}
}
}
impl<T: EthSpec> TryFrom<ExecutionPayloadHeader<T>> for ExecutionPayloadHeaderEip4844<T> {
type Error = BeaconStateError;
fn try_from(header: ExecutionPayloadHeader<T>) -> Result<Self, Self::Error> {
match header {
ExecutionPayloadHeader::Eip4844(execution_payload_header) => {
Ok(execution_payload_header)
}
_ => Err(BeaconStateError::IncorrectStateVariant),
}
}
}
impl<T: EthSpec> ForkVersionDeserialize for ExecutionPayloadHeader<T> {
fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>(
@ -301,7 +230,6 @@ impl<T: EthSpec> ForkVersionDeserialize for ExecutionPayloadHeader<T> {
Ok(match fork_name {
ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?),
ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?),
ForkName::Eip4844 => Self::Eip4844(serde_json::from_value(value).map_err(convert_err)?),
ForkName::Base | ForkName::Altair => {
return Err(serde::de::Error::custom(format!(
"ExecutionPayloadHeader failed to deserialize: unsupported fork '{}'",

View File

@ -54,13 +54,6 @@ impl ForkContext {
));
}
if spec.eip4844_fork_epoch.is_some() {
fork_to_digest.push((
ForkName::Eip4844,
ChainSpec::compute_fork_digest(spec.eip4844_fork_version, genesis_validators_root),
));
}
let fork_to_digest: HashMap<ForkName, [u8; 4]> = fork_to_digest.into_iter().collect();
let digest_to_fork = fork_to_digest

View File

@ -12,7 +12,6 @@ pub enum ForkName {
Altair,
Merge,
Capella,
Eip4844,
}
impl ForkName {
@ -22,7 +21,6 @@ impl ForkName {
ForkName::Altair,
ForkName::Merge,
ForkName::Capella,
ForkName::Eip4844,
]
}
@ -35,35 +33,24 @@ impl ForkName {
spec.altair_fork_epoch = None;
spec.bellatrix_fork_epoch = None;
spec.capella_fork_epoch = None;
spec.eip4844_fork_epoch = None;
spec
}
ForkName::Altair => {
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = None;
spec.capella_fork_epoch = None;
spec.eip4844_fork_epoch = None;
spec
}
ForkName::Merge => {
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(0));
spec.capella_fork_epoch = None;
spec.eip4844_fork_epoch = None;
spec
}
ForkName::Capella => {
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(0));
spec.capella_fork_epoch = Some(Epoch::new(0));
spec.eip4844_fork_epoch = None;
spec
}
ForkName::Eip4844 => {
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(0));
spec.capella_fork_epoch = Some(Epoch::new(0));
spec.eip4844_fork_epoch = Some(Epoch::new(0));
spec
}
}
@ -78,7 +65,6 @@ impl ForkName {
ForkName::Altair => Some(ForkName::Base),
ForkName::Merge => Some(ForkName::Altair),
ForkName::Capella => Some(ForkName::Merge),
ForkName::Eip4844 => Some(ForkName::Capella),
}
}
@ -90,8 +76,7 @@ impl ForkName {
ForkName::Base => Some(ForkName::Altair),
ForkName::Altair => Some(ForkName::Merge),
ForkName::Merge => Some(ForkName::Capella),
ForkName::Capella => Some(ForkName::Eip4844),
ForkName::Eip4844 => None,
ForkName::Capella => None,
}
}
}
@ -137,10 +122,6 @@ macro_rules! map_fork_name_with {
let (value, extra_data) = $body;
($t::Capella(value), extra_data)
}
ForkName::Eip4844 => {
let (value, extra_data) = $body;
($t::Eip4844(value), extra_data)
}
}
};
}
@ -154,7 +135,6 @@ impl FromStr for ForkName {
"altair" => ForkName::Altair,
"bellatrix" | "merge" => ForkName::Merge,
"capella" => ForkName::Capella,
"eip4844" => ForkName::Eip4844,
_ => return Err(format!("unknown fork name: {}", fork_name)),
})
}
@ -167,7 +147,6 @@ impl Display for ForkName {
ForkName::Altair => "altair".fmt(f),
ForkName::Merge => "bellatrix".fmt(f),
ForkName::Capella => "capella".fmt(f),
ForkName::Eip4844 => "eip4844".fmt(f),
}
}
}
@ -199,7 +178,7 @@ mod test {
#[test]
fn previous_and_next_fork_consistent() {
assert_eq!(ForkName::Eip4844.next_fork(), None);
assert_eq!(ForkName::Capella.next_fork(), None);
assert_eq!(ForkName::Base.previous_fork(), None);
for (prev_fork, fork) in ForkName::list_all().into_iter().tuple_windows() {

View File

@ -1,45 +0,0 @@
use crate::test_utils::TestRandom;
use crate::*;
use derivative::Derivative;
use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use std::fmt;
use std::fmt::{Display, Formatter};
use tree_hash::{PackedEncoding, TreeHash};
#[derive(
Derivative, Debug, Clone, Encode, Decode, Serialize, Deserialize, arbitrary::Arbitrary,
)]
#[derivative(PartialEq, Eq, Hash)]
#[ssz(struct_behaviour = "transparent")]
pub struct KzgCommitment(#[serde(with = "BigArray")] pub [u8; 48]);
impl Display for KzgCommitment {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{}", eth2_serde_utils::hex::encode(self.0))
}
}
impl TreeHash for KzgCommitment {
fn tree_hash_type() -> tree_hash::TreeHashType {
<[u8; 48] as TreeHash>::tree_hash_type()
}
fn tree_hash_packed_encoding(&self) -> PackedEncoding {
self.0.tree_hash_packed_encoding()
}
fn tree_hash_packing_factor() -> usize {
<[u8; 48] as TreeHash>::tree_hash_packing_factor()
}
fn tree_hash_root(&self) -> tree_hash::Hash256 {
self.0.tree_hash_root()
}
}
impl TestRandom for KzgCommitment {
fn random_for_test(rng: &mut impl rand::RngCore) -> Self {
KzgCommitment(<[u8; 48] as TestRandom>::random_for_test(rng))
}
}

View File

@ -1,74 +0,0 @@
use crate::test_utils::{RngCore, TestRandom};
use serde::{Deserialize, Serialize};
use serde_big_array::BigArray;
use ssz_derive::{Decode, Encode};
use std::fmt;
use tree_hash::{PackedEncoding, TreeHash};
const KZG_PROOF_BYTES_LEN: usize = 48;
#[derive(
Debug,
PartialEq,
Hash,
Clone,
Copy,
Encode,
Decode,
Serialize,
Deserialize,
arbitrary::Arbitrary,
)]
#[serde(transparent)]
#[ssz(struct_behaviour = "transparent")]
pub struct KzgProof(#[serde(with = "BigArray")] pub [u8; KZG_PROOF_BYTES_LEN]);
impl fmt::Display for KzgProof {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", eth2_serde_utils::hex::encode(self.0))
}
}
impl Default for KzgProof {
fn default() -> Self {
KzgProof([0; 48])
}
}
impl From<[u8; KZG_PROOF_BYTES_LEN]> for KzgProof {
fn from(bytes: [u8; KZG_PROOF_BYTES_LEN]) -> Self {
Self(bytes)
}
}
impl Into<[u8; KZG_PROOF_BYTES_LEN]> for KzgProof {
fn into(self) -> [u8; KZG_PROOF_BYTES_LEN] {
self.0
}
}
impl TreeHash for KzgProof {
fn tree_hash_type() -> tree_hash::TreeHashType {
<[u8; KZG_PROOF_BYTES_LEN]>::tree_hash_type()
}
fn tree_hash_packed_encoding(&self) -> PackedEncoding {
self.0.tree_hash_packed_encoding()
}
fn tree_hash_packing_factor() -> usize {
<[u8; KZG_PROOF_BYTES_LEN]>::tree_hash_packing_factor()
}
fn tree_hash_root(&self) -> tree_hash::Hash256 {
self.0.tree_hash_root()
}
}
impl TestRandom for KzgProof {
fn random_for_test(rng: &mut impl RngCore) -> Self {
let mut bytes = [0; KZG_PROOF_BYTES_LEN];
rng.fill_bytes(&mut bytes);
Self(bytes)
}
}

View File

@ -99,10 +99,6 @@ pub mod slot_data;
#[cfg(feature = "sqlite")]
pub mod sqlite;
pub mod blobs_sidecar;
pub mod kzg_commitment;
pub mod kzg_proof;
use ethereum_types::{H160, H256};
pub use crate::aggregate_and_proof::AggregateAndProof;
@ -111,17 +107,16 @@ pub use crate::attestation_data::AttestationData;
pub use crate::attestation_duty::AttestationDuty;
pub use crate::attester_slashing::AttesterSlashing;
pub use crate::beacon_block::{
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockEip4844,
BeaconBlockMerge, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, EmptyBlock,
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge,
BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, EmptyBlock,
};
pub use crate::beacon_block_body::{
BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyCapella,
BeaconBlockBodyEip4844, BeaconBlockBodyMerge, BeaconBlockBodyRef, BeaconBlockBodyRefMut,
BeaconBlockBodyMerge, BeaconBlockBodyRef, BeaconBlockBodyRefMut,
};
pub use crate::beacon_block_header::BeaconBlockHeader;
pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee};
pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *};
pub use crate::blobs_sidecar::BlobsSidecar;
pub use crate::bls_to_execution_change::BlsToExecutionChange;
pub use crate::chain_spec::{ChainSpec, Config, Domain};
pub use crate::checkpoint::Checkpoint;
@ -139,12 +134,12 @@ pub use crate::eth_spec::EthSpecId;
pub use crate::execution_block_hash::ExecutionBlockHash;
pub use crate::execution_block_header::ExecutionBlockHeader;
pub use crate::execution_payload::{
ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge,
ExecutionPayloadRef, Transaction, Transactions, Withdrawals,
ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge, ExecutionPayloadRef,
Transaction, Transactions, Withdrawals,
};
pub use crate::execution_payload_header::{
ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderEip4844,
ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut,
ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderMerge,
ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut,
};
pub use crate::fork::Fork;
pub use crate::fork_context::ForkContext;
@ -156,16 +151,14 @@ pub use crate::fork_versioned_response::{
pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN};
pub use crate::historical_batch::HistoricalBatch;
pub use crate::indexed_attestation::IndexedAttestation;
pub use crate::kzg_commitment::KzgCommitment;
pub use crate::kzg_proof::KzgProof;
pub use crate::light_client_finality_update::LightClientFinalityUpdate;
pub use crate::light_client_optimistic_update::LightClientOptimisticUpdate;
pub use crate::participation_flags::ParticipationFlags;
pub use crate::participation_list::ParticipationList;
pub use crate::payload::{
AbstractExecPayload, BlindedPayload, BlindedPayloadCapella, BlindedPayloadEip4844,
BlindedPayloadMerge, BlindedPayloadRef, BlockType, ExecPayload, FullPayload,
FullPayloadCapella, FullPayloadEip4844, FullPayloadMerge, FullPayloadRef, OwnedExecPayload,
AbstractExecPayload, BlindedPayload, BlindedPayloadCapella, BlindedPayloadMerge,
BlindedPayloadRef, BlockType, ExecPayload, FullPayload, FullPayloadCapella, FullPayloadMerge,
FullPayloadRef, OwnedExecPayload,
};
pub use crate::pending_attestation::PendingAttestation;
pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset, CapellaPreset};
@ -177,8 +170,7 @@ pub use crate::shuffling_id::AttestationShufflingId;
pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof;
pub use crate::signed_beacon_block::{
SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella,
SignedBeaconBlockEip4844, SignedBeaconBlockHash, SignedBeaconBlockMerge,
SignedBlindedBeaconBlock,
SignedBeaconBlockHash, SignedBeaconBlockMerge, SignedBlindedBeaconBlock,
};
pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader;
pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange;
@ -201,7 +193,6 @@ pub use crate::validator_registration_data::*;
pub use crate::validator_subscription::ValidatorSubscription;
pub use crate::voluntary_exit::VoluntaryExit;
pub use crate::withdrawal::Withdrawal;
use serde_big_array::BigArray;
pub type CommitteeIndex = u64;
pub type Hash256 = H256;
@ -209,7 +200,6 @@ pub type Uint256 = ethereum_types::U256;
pub type Address = H160;
pub type ForkVersion = [u8; 4];
pub type BLSFieldElement = Uint256;
pub type Blob<T> = FixedVector<BLSFieldElement, <T as EthSpec>::FieldElementsPerBlob>;
pub type VersionedHash = Hash256;
pub type Hash64 = ethereum_types::H64;

View File

@ -81,13 +81,8 @@ pub trait AbstractExecPayload<T: EthSpec>:
+ TryFrom<ExecutionPayloadHeader<T>>
+ TryInto<Self::Merge>
+ TryInto<Self::Capella>
+ TryInto<Self::Eip4844>
{
type Ref<'a>: ExecPayload<T>
+ Copy
+ From<&'a Self::Merge>
+ From<&'a Self::Capella>
+ From<&'a Self::Eip4844>;
type Ref<'a>: ExecPayload<T> + Copy + From<&'a Self::Merge> + From<&'a Self::Capella>;
type Merge: OwnedExecPayload<T>
+ Into<Self>
@ -97,16 +92,12 @@ pub trait AbstractExecPayload<T: EthSpec>:
+ Into<Self>
+ for<'a> From<Cow<'a, ExecutionPayloadCapella<T>>>
+ TryFrom<ExecutionPayloadHeaderCapella<T>>;
type Eip4844: OwnedExecPayload<T>
+ Into<Self>
+ for<'a> From<Cow<'a, ExecutionPayloadEip4844<T>>>
+ TryFrom<ExecutionPayloadHeaderEip4844<T>>;
fn default_at_fork(fork_name: ForkName) -> Result<Self, Error>;
}
#[superstruct(
variants(Merge, Capella, Eip4844),
variants(Merge, Capella),
variant_attributes(
derive(
Debug,
@ -145,8 +136,6 @@ pub struct FullPayload<T: EthSpec> {
pub execution_payload: ExecutionPayloadMerge<T>,
#[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))]
pub execution_payload: ExecutionPayloadCapella<T>,
#[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))]
pub execution_payload: ExecutionPayloadEip4844<T>,
}
impl<T: EthSpec> From<FullPayload<T>> for ExecutionPayload<T> {
@ -250,9 +239,6 @@ impl<T: EthSpec> ExecPayload<T> for FullPayload<T> {
FullPayload::Capella(ref inner) => {
Ok(inner.execution_payload.withdrawals.tree_hash_root())
}
FullPayload::Eip4844(ref inner) => {
Ok(inner.execution_payload.withdrawals.tree_hash_root())
}
}
}
@ -359,9 +345,6 @@ impl<'b, T: EthSpec> ExecPayload<T> for FullPayloadRef<'b, T> {
FullPayloadRef::Capella(inner) => {
Ok(inner.execution_payload.withdrawals.tree_hash_root())
}
FullPayloadRef::Eip4844(inner) => {
Ok(inner.execution_payload.withdrawals.tree_hash_root())
}
}
}
@ -382,14 +365,12 @@ impl<T: EthSpec> AbstractExecPayload<T> for FullPayload<T> {
type Ref<'a> = FullPayloadRef<'a, T>;
type Merge = FullPayloadMerge<T>;
type Capella = FullPayloadCapella<T>;
type Eip4844 = FullPayloadEip4844<T>;
fn default_at_fork(fork_name: ForkName) -> Result<Self, Error> {
match fork_name {
ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant),
ForkName::Merge => Ok(FullPayloadMerge::default().into()),
ForkName::Capella => Ok(FullPayloadCapella::default().into()),
ForkName::Eip4844 => Ok(FullPayloadEip4844::default().into()),
}
}
}
@ -410,7 +391,7 @@ impl<T: EthSpec> TryFrom<ExecutionPayloadHeader<T>> for FullPayload<T> {
}
#[superstruct(
variants(Merge, Capella, Eip4844),
variants(Merge, Capella),
variant_attributes(
derive(
Debug,
@ -448,8 +429,6 @@ pub struct BlindedPayload<T: EthSpec> {
pub execution_payload_header: ExecutionPayloadHeaderMerge<T>,
#[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))]
pub execution_payload_header: ExecutionPayloadHeaderCapella<T>,
#[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))]
pub execution_payload_header: ExecutionPayloadHeaderEip4844<T>,
}
impl<'a, T: EthSpec> From<BlindedPayloadRef<'a, T>> for BlindedPayload<T> {
@ -531,9 +510,6 @@ impl<T: EthSpec> ExecPayload<T> for BlindedPayload<T> {
BlindedPayload::Capella(ref inner) => {
Ok(inner.execution_payload_header.withdrawals_root)
}
BlindedPayload::Eip4844(ref inner) => {
Ok(inner.execution_payload_header.withdrawals_root)
}
}
}
@ -621,9 +597,6 @@ impl<'b, T: EthSpec> ExecPayload<T> for BlindedPayloadRef<'b, T> {
BlindedPayloadRef::Capella(inner) => {
Ok(inner.execution_payload_header.withdrawals_root)
}
BlindedPayloadRef::Eip4844(inner) => {
Ok(inner.execution_payload_header.withdrawals_root)
}
}
}
@ -887,26 +860,17 @@ impl_exec_payload_for_fork!(
ExecutionPayloadCapella,
Capella
);
impl_exec_payload_for_fork!(
BlindedPayloadEip4844,
FullPayloadEip4844,
ExecutionPayloadHeaderEip4844,
ExecutionPayloadEip4844,
Eip4844
);
impl<T: EthSpec> AbstractExecPayload<T> for BlindedPayload<T> {
type Ref<'a> = BlindedPayloadRef<'a, T>;
type Merge = BlindedPayloadMerge<T>;
type Capella = BlindedPayloadCapella<T>;
type Eip4844 = BlindedPayloadEip4844<T>;
fn default_at_fork(fork_name: ForkName) -> Result<Self, Error> {
match fork_name {
ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant),
ForkName::Merge => Ok(BlindedPayloadMerge::default().into()),
ForkName::Capella => Ok(BlindedPayloadCapella::default().into()),
ForkName::Eip4844 => Ok(BlindedPayloadEip4844::default().into()),
}
}
}
@ -935,11 +899,6 @@ impl<T: EthSpec> From<ExecutionPayloadHeader<T>> for BlindedPayload<T> {
execution_payload_header,
})
}
ExecutionPayloadHeader::Eip4844(execution_payload_header) => {
Self::Eip4844(BlindedPayloadEip4844 {
execution_payload_header,
})
}
}
}
}
@ -953,9 +912,6 @@ impl<T: EthSpec> From<BlindedPayload<T>> for ExecutionPayloadHeader<T> {
BlindedPayload::Capella(blinded_payload) => {
ExecutionPayloadHeader::Capella(blinded_payload.execution_payload_header)
}
BlindedPayload::Eip4844(blinded_payload) => {
ExecutionPayloadHeader::Eip4844(blinded_payload.execution_payload_header)
}
}
}
}

View File

@ -37,7 +37,7 @@ impl From<SignedBeaconBlockHash> for Hash256 {
/// A `BeaconBlock` and a signature from its proposer.
#[superstruct(
variants(Base, Altair, Merge, Capella, Eip4844),
variants(Base, Altair, Merge, Capella),
variant_attributes(
derive(
Debug,
@ -76,8 +76,6 @@ pub struct SignedBeaconBlock<E: EthSpec, Payload: AbstractExecPayload<E> = FullP
pub message: BeaconBlockMerge<E, Payload>,
#[superstruct(only(Capella), partial_getter(rename = "message_capella"))]
pub message: BeaconBlockCapella<E, Payload>,
#[superstruct(only(Eip4844), partial_getter(rename = "message_eip4844"))]
pub message: BeaconBlockEip4844<E, Payload>,
pub signature: Signature,
}
@ -138,9 +136,6 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> SignedBeaconBlock<E, Payload>
BeaconBlock::Capella(message) => {
SignedBeaconBlock::Capella(SignedBeaconBlockCapella { message, signature })
}
BeaconBlock::Eip4844(message) => {
SignedBeaconBlock::Eip4844(SignedBeaconBlockEip4844 { message, signature })
}
}
}
@ -373,62 +368,6 @@ impl<E: EthSpec> SignedBeaconBlockCapella<E, BlindedPayload<E>> {
}
}
impl<E: EthSpec> SignedBeaconBlockEip4844<E, BlindedPayload<E>> {
pub fn into_full_block(
self,
execution_payload: ExecutionPayloadEip4844<E>,
) -> SignedBeaconBlockEip4844<E, FullPayload<E>> {
let SignedBeaconBlockEip4844 {
message:
BeaconBlockEip4844 {
slot,
proposer_index,
parent_root,
state_root,
body:
BeaconBlockBodyEip4844 {
randao_reveal,
eth1_data,
graffiti,
proposer_slashings,
attester_slashings,
attestations,
deposits,
voluntary_exits,
sync_aggregate,
execution_payload: BlindedPayloadEip4844 { .. },
bls_to_execution_changes,
blob_kzg_commitments,
},
},
signature,
} = self;
SignedBeaconBlockEip4844 {
message: BeaconBlockEip4844 {
slot,
proposer_index,
parent_root,
state_root,
body: BeaconBlockBodyEip4844 {
randao_reveal,
eth1_data,
graffiti,
proposer_slashings,
attester_slashings,
attestations,
deposits,
voluntary_exits,
sync_aggregate,
execution_payload: FullPayloadEip4844 { execution_payload },
bls_to_execution_changes,
blob_kzg_commitments,
},
},
signature,
}
}
}
impl<E: EthSpec> SignedBeaconBlock<E, BlindedPayload<E>> {
pub fn try_into_full_block(
self,
@ -443,14 +382,10 @@ impl<E: EthSpec> SignedBeaconBlock<E, BlindedPayload<E>> {
(SignedBeaconBlock::Capella(block), Some(ExecutionPayload::Capella(payload))) => {
SignedBeaconBlock::Capella(block.into_full_block(payload))
}
(SignedBeaconBlock::Eip4844(block), Some(ExecutionPayload::Eip4844(payload))) => {
SignedBeaconBlock::Eip4844(block.into_full_block(payload))
}
// avoid wildcard matching forks so that compiler will
// direct us here when a new fork has been added
(SignedBeaconBlock::Merge(_), _) => return None,
(SignedBeaconBlock::Capella(_), _) => return None,
(SignedBeaconBlock::Eip4844(_), _) => return None,
};
Some(full_block)
}

View File

@ -5,8 +5,8 @@ use std::fs::File;
use std::io::Write;
use std::time::{SystemTime, UNIX_EPOCH};
use types::{
EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderEip4844,
ExecutionPayloadHeaderMerge, ForkName,
EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderMerge,
ForkName,
};
pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
@ -40,14 +40,6 @@ pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
prev_randao: eth1_block_hash.into_root(),
..ExecutionPayloadHeaderCapella::default()
}),
ForkName::Eip4844 => ExecutionPayloadHeader::Eip4844(ExecutionPayloadHeaderEip4844 {
gas_limit,
base_fee_per_gas,
timestamp: genesis_time,
block_hash: eth1_block_hash,
prev_randao: eth1_block_hash.into_root(),
..ExecutionPayloadHeaderEip4844::default()
}),
};
let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?;

View File

@ -425,7 +425,7 @@ fn main() {
.takes_value(true)
.default_value("bellatrix")
.help("The fork for which the execution payload header should be created.")
.possible_values(&["merge", "bellatrix", "capella", "eip4844"])
.possible_values(&["merge", "bellatrix", "capella"])
)
)
.subcommand(

View File

@ -10,8 +10,7 @@ use std::path::PathBuf;
use std::time::{SystemTime, UNIX_EPOCH};
use types::{
test_utils::generate_deterministic_keypairs, Address, Config, Epoch, EthSpec,
ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderEip4844,
ExecutionPayloadHeaderMerge, ForkName,
ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderMerge, ForkName,
};
pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> {
@ -94,10 +93,6 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul
ExecutionPayloadHeaderCapella::<T>::from_ssz_bytes(bytes.as_slice())
.map(ExecutionPayloadHeader::Capella)
}
ForkName::Eip4844 => {
ExecutionPayloadHeaderEip4844::<T>::from_ssz_bytes(bytes.as_slice())
.map(ExecutionPayloadHeader::Eip4844)
}
}
.map_err(|e| format!("SSZ decode failed: {:?}", e))
})

View File

@ -66,7 +66,6 @@ pub fn previous_fork(fork_name: ForkName) -> ForkName {
ForkName::Altair => ForkName::Base,
ForkName::Merge => ForkName::Altair, // TODO: Check this when tests are released..
ForkName::Capella => ForkName::Merge, // TODO: Check this when tests are released..
ForkName::Eip4844 => ForkName::Capella, // TODO: Check this when tests are released..
}
}

View File

@ -101,10 +101,7 @@ impl<E: EthSpec> EpochTransition<E> for JustificationAndFinalization {
justification_and_finalization_state.apply_changes_to_state(state);
Ok(())
}
BeaconState::Altair(_)
| BeaconState::Merge(_)
| BeaconState::Capella(_)
| BeaconState::Eip4844(_) => {
BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => {
let justification_and_finalization_state =
altair::process_justification_and_finalization(
state,
@ -125,14 +122,13 @@ impl<E: EthSpec> EpochTransition<E> for RewardsAndPenalties {
validator_statuses.process_attestations(state)?;
base::process_rewards_and_penalties(state, &mut validator_statuses, spec)
}
BeaconState::Altair(_)
| BeaconState::Merge(_)
| BeaconState::Capella(_)
| BeaconState::Eip4844(_) => altair::process_rewards_and_penalties(
state,
&altair::ParticipationCache::new(state, spec).unwrap(),
spec,
),
BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => {
altair::process_rewards_and_penalties(
state,
&altair::ParticipationCache::new(state, spec).unwrap(),
spec,
)
}
}
}
}
@ -155,10 +151,7 @@ impl<E: EthSpec> EpochTransition<E> for Slashings {
spec,
)?;
}
BeaconState::Altair(_)
| BeaconState::Merge(_)
| BeaconState::Capella(_)
| BeaconState::Eip4844(_) => {
BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => {
process_slashings(
state,
altair::ParticipationCache::new(state, spec)
@ -210,9 +203,7 @@ impl<E: EthSpec> EpochTransition<E> for HistoricalRootsUpdate {
impl<E: EthSpec> EpochTransition<E> for HistoricalSummariesUpdate {
fn run(state: &mut BeaconState<E>, _spec: &ChainSpec) -> Result<(), EpochProcessingError> {
match state {
BeaconState::Capella(_) | BeaconState::Eip4844(_) => {
process_historical_summaries_update(state)
}
BeaconState::Capella(_) => process_historical_summaries_update(state),
_ => Ok(()),
}
}
@ -232,10 +223,9 @@ impl<E: EthSpec> EpochTransition<E> for SyncCommitteeUpdates {
fn run(state: &mut BeaconState<E>, spec: &ChainSpec) -> Result<(), EpochProcessingError> {
match state {
BeaconState::Base(_) => Ok(()),
BeaconState::Altair(_)
| BeaconState::Merge(_)
| BeaconState::Capella(_)
| BeaconState::Eip4844(_) => altair::process_sync_committee_updates(state, spec),
BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => {
altair::process_sync_committee_updates(state, spec)
}
}
}
}
@ -244,14 +234,13 @@ impl<E: EthSpec> EpochTransition<E> for InactivityUpdates {
fn run(state: &mut BeaconState<E>, spec: &ChainSpec) -> Result<(), EpochProcessingError> {
match state {
BeaconState::Base(_) => Ok(()),
BeaconState::Altair(_)
| BeaconState::Merge(_)
| BeaconState::Capella(_)
| BeaconState::Eip4844(_) => altair::process_inactivity_updates(
state,
&altair::ParticipationCache::new(state, spec).unwrap(),
spec,
),
BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => {
altair::process_inactivity_updates(
state,
&altair::ParticipationCache::new(state, spec).unwrap(),
spec,
)
}
}
}
}
@ -260,10 +249,9 @@ impl<E: EthSpec> EpochTransition<E> for ParticipationFlagUpdates {
fn run(state: &mut BeaconState<E>, _: &ChainSpec) -> Result<(), EpochProcessingError> {
match state {
BeaconState::Base(_) => Ok(()),
BeaconState::Altair(_)
| BeaconState::Merge(_)
| BeaconState::Capella(_)
| BeaconState::Eip4844(_) => altair::process_participation_flag_updates(state),
BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => {
altair::process_participation_flag_updates(state)
}
}
}
}
@ -318,7 +306,6 @@ impl<E: EthSpec, T: EpochTransition<E>> Case for EpochProcessing<E, T> {
T::name() != "participation_record_updates"
&& T::name() != "historical_roots_update"
}
ForkName::Eip4844 => false, // TODO: revisit when tests are out
}
}

View File

@ -62,7 +62,6 @@ impl<E: EthSpec> Case for ForkTest<E> {
ForkName::Altair => upgrade_to_altair(&mut result_state, spec).map(|_| result_state),
ForkName::Merge => upgrade_to_bellatrix(&mut result_state, spec).map(|_| result_state),
ForkName::Capella => upgrade_to_capella(&mut result_state, spec).map(|_| result_state),
ForkName::Eip4844 => panic!("eip4844 not supported"),
};
compare_beacon_state_results_without_caches(&mut result, &mut expected)

View File

@ -95,10 +95,7 @@ impl<E: EthSpec> Operation<E> for Attestation<E> {
&mut ctxt,
spec,
),
BeaconState::Altair(_)
| BeaconState::Merge(_)
| BeaconState::Capella(_)
| BeaconState::Eip4844(_) => {
BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => {
altair::process_attestation(state, self, 0, &mut ctxt, VerifySignatures::True, spec)
}
}

View File

@ -47,12 +47,6 @@ impl<E: EthSpec> LoadCase for TransitionTest<E> {
spec.bellatrix_fork_epoch = Some(Epoch::new(0));
spec.capella_fork_epoch = Some(metadata.fork_epoch);
}
ForkName::Eip4844 => {
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(0));
spec.capella_fork_epoch = Some(Epoch::new(0));
spec.eip4844_fork_epoch = Some(metadata.fork_epoch);
}
}
// Load blocks

View File

@ -24,11 +24,6 @@ pub trait Handler {
fn run(&self) {
for fork_name in ForkName::list_all() {
// FIXME(eip4844): enable eip4844
if fork_name == ForkName::Eip4844 {
continue;
}
if self.is_enabled_for_fork(fork_name) {
self.run_for_fork(fork_name)
}

View File

@ -47,7 +47,6 @@ type_name_generic!(BeaconBlockBodyBase, "BeaconBlockBody");
type_name_generic!(BeaconBlockBodyAltair, "BeaconBlockBody");
type_name_generic!(BeaconBlockBodyMerge, "BeaconBlockBody");
type_name_generic!(BeaconBlockBodyCapella, "BeaconBlockBody");
type_name_generic!(BeaconBlockBodyEip4844, "BeaconBlockBody");
type_name!(BeaconBlockHeader);
type_name_generic!(BeaconState);
type_name!(Checkpoint);
@ -59,12 +58,10 @@ type_name!(Eth1Data);
type_name_generic!(ExecutionPayload);
type_name_generic!(ExecutionPayloadMerge, "ExecutionPayload");
type_name_generic!(ExecutionPayloadCapella, "ExecutionPayload");
type_name_generic!(ExecutionPayloadEip4844, "ExecutionPayload");
type_name_generic!(FullPayload, "ExecutionPayload");
type_name_generic!(ExecutionPayloadHeader);
type_name_generic!(ExecutionPayloadHeaderMerge, "ExecutionPayloadHeader");
type_name_generic!(ExecutionPayloadHeaderCapella, "ExecutionPayloadHeader");
type_name_generic!(ExecutionPayloadHeaderEip4844, "ExecutionPayloadHeader");
type_name_generic!(BlindedPayload, "ExecutionPayloadHeader");
type_name!(Fork);
type_name!(ForkData);

View File

@ -57,11 +57,6 @@ lazy_static::lazy_static! {
"Total count of attempted block signings",
&["status"]
);
pub static ref SIGNED_BLOBS_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
"vc_signed_beacon_blobs_total",
"Total count of attempted blob signings",
&["status"]
);
pub static ref SIGNED_ATTESTATIONS_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
"vc_signed_attestations_total",
"Total count of attempted Attestation signings",

View File

@ -27,7 +27,6 @@ pub enum ForkName {
Altair,
Bellatrix,
Capella,
Eip4844,
}
#[derive(Debug, PartialEq, Serialize)]
@ -97,11 +96,6 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> Web3SignerObject<'a, T, Pa
block: None,
block_header: Some(block.block_header()),
}),
BeaconBlock::Eip4844(_) => Ok(Web3SignerObject::BeaconBlock {
version: ForkName::Eip4844,
block: None,
block_header: Some(block.block_header()),
}),
}
}