diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index c8d3aed79..2b759e4ad 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1141,7 +1141,7 @@ impl IntoExecutionPendingBlock for Arc( // Send the block, regardless of whether or not it is valid. The API // specification is very clear that this is the desired behaviour. - let wrapped_block = if matches!(block.as_ref(), &SignedBeaconBlock::Eip4844(_)) { - if let Some(sidecar) = chain.blob_cache.pop(&block_root) { - let block_and_blobs = SignedBeaconBlockAndBlobsSidecar { - beacon_block: block, - blobs_sidecar: Arc::new(sidecar), - }; - crate::publish_pubsub_message( - network_tx, - PubsubMessage::BeaconBlockAndBlobsSidecars(block_and_blobs.clone()), - )?; - BlockWrapper::BlockAndBlob(block_and_blobs) + let wrapped_block: BlockWrapper = + if matches!(block.as_ref(), &SignedBeaconBlock::Eip4844(_)) { + if let Some(sidecar) = chain.blob_cache.pop(&block_root) { + let block_and_blobs = SignedBeaconBlockAndBlobsSidecar { + beacon_block: block, + blobs_sidecar: Arc::new(sidecar), + }; + crate::publish_pubsub_message( + network_tx, + PubsubMessage::BeaconBlockAndBlobsSidecars(block_and_blobs.clone()), + )?; + block_and_blobs.into() + } else { + //FIXME(sean): This should probably return a specific no-blob-cached error code, beacon API coordination required + return Err(warp_utils::reject::broadcast_without_import(format!( + "no blob cached for block" + ))); + } } else { - //FIXME(sean): This should probably return a specific no-blob-cached error code, beacon API coordination required - return Err(warp_utils::reject::broadcast_without_import(format!( - "no blob cached for block" - ))); - } - } else { - crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?; - BlockWrapper::Block(block) - }; + crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?; + block.into() + }; // Determine the delay after the start of the slot, register it with metrics. let block = wrapped_block.block(); diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index df5350d94..eb5cc7f27 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -73,7 +73,7 @@ impl Encoder> for SSZSnappyInboundCodec< RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(), - RPCResponse::BlobsByRoot(res) => res.as_ssz_bytes(), + RPCResponse::BlockAndBlobsByRoot(res) => res.as_ssz_bytes(), RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RPCResponse::Pong(res) => res.data.as_ssz_bytes(), RPCResponse::MetaData(res) => @@ -439,7 +439,8 @@ fn context_bytes( SignedBeaconBlock::Base { .. } => Some(fork_context.genesis_context_bytes()), }; } - if let RPCResponse::BlobsByRange(_) | RPCResponse::BlobsByRoot(_) = rpc_variant { + if let RPCResponse::BlobsByRange(_) | RPCResponse::BlockAndBlobsByRoot(_) = rpc_variant + { return fork_context.to_context_bytes(ForkName::Eip4844); } } @@ -585,7 +586,7 @@ fn handle_v1_response( )))), _ => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, - "Invalid forkname for blobsbyrange".to_string(), + "Invalid fork name for blobs by range".to_string(), )), } } @@ -597,12 +598,12 @@ fn handle_v1_response( ) })?; match fork_name { - ForkName::Eip4844 => Ok(Some(RPCResponse::BlobsByRoot(Arc::new( + ForkName::Eip4844 => Ok(Some(RPCResponse::BlockAndBlobsByRoot( SignedBeaconBlockAndBlobsSidecar::from_ssz_bytes(decoded_buffer)?, - )))), + ))), _ => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, - "Invalid forkname for blobsbyroot".to_string(), + "Invalid fork name for block and blobs by root".to_string(), )), } } diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 53e6b6759..02e24d8e1 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -281,7 +281,7 @@ pub enum RPCResponse { LightClientBootstrap(LightClientBootstrap), /// A response to a get BLOBS_BY_ROOT request. - BlobsByRoot(Arc>), + BlockAndBlobsByRoot(SignedBeaconBlockAndBlobsSidecar), /// A PONG response to a PING request. Pong(Ping), @@ -372,7 +372,7 @@ impl RPCCodedResponse { RPCResponse::BlocksByRange(_) => true, RPCResponse::BlocksByRoot(_) => true, RPCResponse::BlobsByRange(_) => true, - RPCResponse::BlobsByRoot(_) => true, + RPCResponse::BlockAndBlobsByRoot(_) => true, RPCResponse::Pong(_) => false, RPCResponse::MetaData(_) => false, RPCResponse::LightClientBootstrap(_) => false, @@ -409,7 +409,7 @@ impl RPCResponse { RPCResponse::BlocksByRange(_) => Protocol::BlocksByRange, RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange, - RPCResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, + RPCResponse::BlockAndBlobsByRoot(_) => Protocol::BlobsByRoot, RPCResponse::Pong(_) => Protocol::Ping, RPCResponse::MetaData(_) => Protocol::MetaData, RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, @@ -449,7 +449,7 @@ impl std::fmt::Display for RPCResponse { RPCResponse::BlobsByRange(blob) => { write!(f, "BlobsByRange: Blob slot: {}", blob.beacon_block_slot) } - RPCResponse::BlobsByRoot(blob) => { + RPCResponse::BlockAndBlobsByRoot(blob) => { write!( f, "BlobsByRoot: Blob slot: {}", diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index b6a033020..c9c239d8c 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -83,7 +83,7 @@ pub enum Response { /// A response to a LightClientUpdate request. LightClientBootstrap(LightClientBootstrap), /// A response to a get BLOBS_BY_ROOT request. - BlobsByRoot(Option>>), + BlobsByRoot(Option>), } impl std::convert::From> for RPCCodedResponse { @@ -98,7 +98,7 @@ impl std::convert::From> for RPCCodedResponse RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), }, Response::BlobsByRoot(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRoot(b)), + Some(b) => RPCCodedResponse::Success(RPCResponse::BlockAndBlobsByRoot(b)), None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRoot), }, Response::BlobsByRange(r) => match r { diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 9adf7699b..d59bc4bfd 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1315,7 +1315,7 @@ impl Network { RPCResponse::BlocksByRoot(resp) => { self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } - RPCResponse::BlobsByRoot(resp) => { + RPCResponse::BlockAndBlobsByRoot(resp) => { self.build_response(id, peer_id, Response::BlobsByRoot(Some(resp))) } // Should never be reached diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 9de006c84..37d6edef8 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -1699,7 +1699,7 @@ impl BeaconProcessor { message_id, peer_id, peer_client, - BlockWrapper::Block(block), + block.into(), work_reprocessing_tx, duplicate_cache, seen_timestamp, @@ -1721,7 +1721,7 @@ impl BeaconProcessor { message_id, peer_id, peer_client, - BlockWrapper::BlockAndBlob(block_sidecar_pair), + block_sidecar_pair.into(), work_reprocessing_tx, duplicate_cache, seen_timestamp, diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 3ade1bb87..69bd7da11 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -230,10 +230,10 @@ impl Worker { Ok((Some(block), Some(blobs))) => { self.send_response( peer_id, - Response::BlobsByRoot(Some(Arc::new(SignedBeaconBlockAndBlobsSidecar { + Response::BlobsByRoot(Some(SignedBeaconBlockAndBlobsSidecar { beacon_block: block, blobs_sidecar: blobs, - }))), + })), request_id, ); send_block_count += 1; diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 5af62c37d..284f96da7 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -190,11 +190,8 @@ impl Worker { let unwrapped = downloaded_blocks .into_iter() - .map(|block| match block { - BlockWrapper::Block(block) => block, - //FIXME(sean) handle blobs in backfill - BlockWrapper::BlockAndBlob(_) => todo!(), - }) + //FIXME(sean) handle blobs in backfill + .map(|block| block.block_cloned()) .collect(); match self.process_backfill_blocks(unwrapped) { diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 5ee0e367b..d0879baba 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -223,10 +223,10 @@ impl Processor { SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. } => { unreachable!("Block lookups do not request BBRange requests") } - id @ (SyncId::BackFillSync { .. } - | SyncId::RangeSync { .. } - | SyncId::BackFillSidecarPair { .. } - | SyncId::RangeSidecarPair { .. }) => id, + id @ (SyncId::BackFillBlocks { .. } + | SyncId::RangeBlocks { .. } + | SyncId::BackFillBlobs { .. } + | SyncId::RangeBlobs { .. }) => id, }, RequestId::Router => unreachable!("All BBRange requests belong to sync"), }; @@ -258,7 +258,7 @@ impl Processor { ); if let RequestId::Sync(id) = request_id { - self.send_to_sync(SyncMessage::RpcGlob { + self.send_to_sync(SyncMessage::RpcBlobs { peer_id, request_id: id, blob_sidecar, @@ -282,10 +282,10 @@ impl Processor { let request_id = match request_id { RequestId::Sync(sync_id) => match sync_id { id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id, - SyncId::BackFillSync { .. } - | SyncId::RangeSync { .. } - | SyncId::RangeSidecarPair { .. } - | SyncId::BackFillSidecarPair { .. } => { + SyncId::BackFillBlocks { .. } + | SyncId::RangeBlocks { .. } + | SyncId::RangeBlobs { .. } + | SyncId::BackFillBlobs { .. } => { unreachable!("Batch syncing do not request BBRoot requests") } }, @@ -310,15 +310,15 @@ impl Processor { &mut self, peer_id: PeerId, request_id: RequestId, - block_and_blobs: Option>>, + block_and_blobs: Option>, ) { let request_id = match request_id { RequestId::Sync(sync_id) => match sync_id { id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id, - SyncId::BackFillSync { .. } - | SyncId::RangeSync { .. } - | SyncId::RangeSidecarPair { .. } - | SyncId::BackFillSidecarPair { .. } => { + SyncId::BackFillBlocks { .. } + | SyncId::RangeBlocks { .. } + | SyncId::RangeBlobs { .. } + | SyncId::BackFillBlobs { .. } => { unreachable!("Batch syncing does not request BBRoot requests") } }, @@ -330,7 +330,7 @@ impl Processor { "Received BlockAndBlobssByRoot Response"; "peer" => %peer_id, ); - self.send_to_sync(SyncMessage::RpcBlockAndGlob { + self.send_to_sync(SyncMessage::RpcBlockAndBlobs { peer_id, request_id, block_and_blobs, diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 56ed55153..ad1bfb1d4 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -536,7 +536,7 @@ impl BackFillSync { let process_id = ChainSegmentProcessId::BackSyncBatchId(batch_id); self.current_processing_batch = Some(batch_id); - let work_event = BeaconWorkEvent::chain_segment(process_id, blocks.into_wrapped_blocks()); + let work_event = BeaconWorkEvent::chain_segment(process_id, blocks); if let Err(e) = network.processor_channel().try_send(work_event) { crit!(self.log, "Failed to send backfill segment to processor."; "msg" => "process_batch", "error" => %e, "batch" => self.processing_target); diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index f82417db3..46ac5bd0f 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -1,12 +1,9 @@ use std::{collections::VecDeque, sync::Arc}; -use types::{ - signed_block_and_blobs::BlockWrapper, BlobsSidecar, EthSpec, SignedBeaconBlock, - SignedBeaconBlockAndBlobsSidecar, -}; +use types::{signed_block_and_blobs::BlockWrapper, BlobsSidecar, EthSpec, SignedBeaconBlock}; #[derive(Debug, Default)] -pub struct BlockBlobRequestInfo { +pub struct BlocksAndBlobsRequestInfo { /// Blocks we have received awaiting for their corresponding sidecar. accumulated_blocks: VecDeque>>, /// Sidecars we have received awaiting for their corresponding block. @@ -17,7 +14,7 @@ pub struct BlockBlobRequestInfo { is_sidecars_stream_terminated: bool, } -impl BlockBlobRequestInfo { +impl BlocksAndBlobsRequestInfo { pub fn add_block_response(&mut self, maybe_block: Option>>) { match maybe_block { Some(block) => self.accumulated_blocks.push_back(block), @@ -33,7 +30,7 @@ impl BlockBlobRequestInfo { } pub fn into_responses(self) -> Result>, &'static str> { - let BlockBlobRequestInfo { + let BlocksAndBlobsRequestInfo { accumulated_blocks, mut accumulated_sidecars, .. @@ -51,14 +48,9 @@ impl BlockBlobRequestInfo { { let blobs_sidecar = accumulated_sidecars.pop_front().ok_or("missing sidecar")?; - Ok(BlockWrapper::BlockAndBlob( - SignedBeaconBlockAndBlobsSidecar { - beacon_block, - blobs_sidecar, - }, - )) + Ok(BlockWrapper::new_with_blobs(beacon_block, blobs_sidecar)) } else { - Ok(BlockWrapper::Block(beacon_block)) + Ok(beacon_block.into()) } }) .collect::, _>>(); diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 29838bde7..5da203e0e 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -35,13 +35,13 @@ use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart}; use super::block_lookups::BlockLookups; -use super::network_context::{BlockOrBlob, SyncNetworkContext}; +use super::network_context::{BlockOrBlobs, SyncNetworkContext}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; -use crate::sync::range_sync::ExpectedBatchTy; +use crate::sync::range_sync::ByRangeRequestType; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState}; use futures::StreamExt; use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; @@ -79,13 +79,13 @@ pub enum RequestId { /// Request searching for a block's parent. The id is the chain ParentLookup { id: Id }, /// Request was from the backfill sync algorithm. - BackFillSync { id: Id }, - /// Backfill request for blocks and sidecars. - BackFillSidecarPair { id: Id }, + BackFillBlocks { id: Id }, + /// Backfill request for blob sidecars. + BackFillBlobs { id: Id }, /// The request was from a chain in the range sync algorithm. - RangeSync { id: Id }, - /// The request was from a chain in range, asking for ranges of blocks and sidecars. - RangeSidecarPair { id: Id }, + RangeBlocks { id: Id }, + /// The request was from a chain in range, asking for ranges blob sidecars. + RangeBlobs { id: Id }, } #[derive(Debug)] @@ -103,7 +103,7 @@ pub enum SyncMessage { }, /// A blob has been received from the RPC. - RpcGlob { + RpcBlobs { request_id: RequestId, peer_id: PeerId, blob_sidecar: Option>>, @@ -111,10 +111,10 @@ pub enum SyncMessage { }, /// A block and blobs have been received from the RPC. - RpcBlockAndGlob { + RpcBlockAndBlobs { request_id: RequestId, peer_id: PeerId, - block_and_blobs: Option>>, + block_and_blobs: Option>, seen_timestamp: Duration, }, @@ -295,10 +295,10 @@ impl SyncManager { self.block_lookups .parent_lookup_failed(id, peer_id, &mut self.network, error); } - RequestId::BackFillSync { id } => { + RequestId::BackFillBlocks { id } => { if let Some(batch_id) = self .network - .backfill_request_failed(id, ExpectedBatchTy::OnlyBlock) + .backfill_request_failed(id, ByRangeRequestType::Blocks) { match self .backfill_sync @@ -310,10 +310,10 @@ impl SyncManager { } } - RequestId::BackFillSidecarPair { id } => { + RequestId::BackFillBlobs { id } => { if let Some(batch_id) = self .network - .backfill_request_failed(id, ExpectedBatchTy::OnlyBlockBlobs) + .backfill_request_failed(id, ByRangeRequestType::BlocksAndBlobs) { match self .backfill_sync @@ -324,10 +324,10 @@ impl SyncManager { } } } - RequestId::RangeSync { id } => { + RequestId::RangeBlocks { id } => { if let Some((chain_id, batch_id)) = self .network - .range_sync_request_failed(id, ExpectedBatchTy::OnlyBlock) + .range_sync_request_failed(id, ByRangeRequestType::Blocks) { self.range_sync.inject_error( &mut self.network, @@ -339,10 +339,10 @@ impl SyncManager { self.update_sync_state() } } - RequestId::RangeSidecarPair { id } => { + RequestId::RangeBlobs { id } => { if let Some((chain_id, batch_id)) = self .network - .range_sync_request_failed(id, ExpectedBatchTy::OnlyBlockBlobs) + .range_sync_request_failed(id, ByRangeRequestType::BlocksAndBlobs) { self.range_sync.inject_error( &mut self.network, @@ -648,18 +648,18 @@ impl SyncManager { .block_lookups .parent_chain_processed(chain_hash, result, &mut self.network), }, - SyncMessage::RpcGlob { + SyncMessage::RpcBlobs { request_id, peer_id, blob_sidecar, seen_timestamp, - } => self.rpc_sidecar_received(request_id, peer_id, blob_sidecar, seen_timestamp), - SyncMessage::RpcBlockAndGlob { + } => self.rpc_blobs_received(request_id, peer_id, blob_sidecar, seen_timestamp), + SyncMessage::RpcBlockAndBlobs { request_id, peer_id, block_and_blobs, seen_timestamp, - } => self.rpc_block_sidecar_pair_received( + } => self.rpc_block_block_and_blobs_received( request_id, peer_id, block_and_blobs, @@ -734,18 +734,18 @@ impl SyncManager { RequestId::SingleBlock { id } => self.block_lookups.single_block_lookup_response( id, peer_id, - beacon_block.map(|block| BlockWrapper::Block(block)), + beacon_block.map(|block| block.into()), seen_timestamp, &mut self.network, ), RequestId::ParentLookup { id } => self.block_lookups.parent_lookup_response( id, peer_id, - beacon_block.map(|block| BlockWrapper::Block(block)), + beacon_block.map(|block| block.into()), seen_timestamp, &mut self.network, ), - RequestId::BackFillSync { id } => { + RequestId::BackFillBlocks { id } => { let is_stream_terminator = beacon_block.is_none(); if let Some(batch_id) = self .network @@ -756,7 +756,7 @@ impl SyncManager { batch_id, &peer_id, id, - beacon_block.map(|block| BlockWrapper::Block(block)), + beacon_block.map(|block| block.into()), ) { Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), Ok(ProcessResult::Successful) => {} @@ -768,7 +768,7 @@ impl SyncManager { } } } - RequestId::RangeSync { id } => { + RequestId::RangeBlocks { id } => { let is_stream_terminator = beacon_block.is_none(); if let Some((chain_id, batch_id)) = self .network @@ -780,28 +780,28 @@ impl SyncManager { chain_id, batch_id, id, - beacon_block.map(|block| BlockWrapper::Block(block)), + beacon_block.map(|block| block.into()), ); self.update_sync_state(); } } - RequestId::BackFillSidecarPair { id } => { - self.block_blob_backfill_response(id, peer_id, beacon_block.into()) + RequestId::BackFillBlobs { id } => { + self.blobs_backfill_response(id, peer_id, beacon_block.into()) } - RequestId::RangeSidecarPair { id } => { - self.block_blob_range_response(id, peer_id, beacon_block.into()) + RequestId::RangeBlobs { id } => { + self.blobs_range_response(id, peer_id, beacon_block.into()) } } } /// Handles receiving a response for a range sync request that should have both blocks and /// blobs. - fn block_blob_range_response( + fn blobs_range_response( &mut self, id: Id, peer_id: PeerId, - block_or_blob: BlockOrBlob, + block_or_blob: BlockOrBlobs, ) { if let Some((chain_id, batch_id, block_responses)) = self .network @@ -834,7 +834,7 @@ impl SyncManager { "peer_id" => %peer_id, "batch_id" => batch_id, "error" => e ); // TODO: penalize the peer for being a bad boy - let id = RequestId::RangeSidecarPair { id }; + let id = RequestId::RangeBlobs { id }; self.inject_error(peer_id, id, RPCError::InvalidData(e.into())) } } @@ -843,11 +843,11 @@ impl SyncManager { /// Handles receiving a response for a Backfill sync request that should have both blocks and /// blobs. - fn block_blob_backfill_response( + fn blobs_backfill_response( &mut self, id: Id, peer_id: PeerId, - block_or_blob: BlockOrBlob, + block_or_blob: BlockOrBlobs, ) { if let Some((batch_id, block_responses)) = self .network @@ -886,14 +886,14 @@ impl SyncManager { "peer_id" => %peer_id, "batch_id" => batch_id, "error" => e ); // TODO: penalize the peer for being a bad boy - let id = RequestId::BackFillSidecarPair { id }; + let id = RequestId::BackFillBlobs { id }; self.inject_error(peer_id, id, RPCError::InvalidData(e.into())) } } } } - fn rpc_sidecar_received( + fn rpc_blobs_received( &mut self, request_id: RequestId, peer_id: PeerId, @@ -904,57 +904,47 @@ impl SyncManager { RequestId::SingleBlock { .. } | RequestId::ParentLookup { .. } => { unreachable!("There is no such thing as a singular 'by root' glob request that is not accompanied by the block") } - RequestId::BackFillSync { .. } => { + RequestId::BackFillBlocks { .. } => { unreachable!("An only blocks request does not receive sidecars") } - RequestId::BackFillSidecarPair { id } => { - self.block_blob_backfill_response(id, peer_id, maybe_sidecar.into()) + RequestId::BackFillBlobs { id } => { + self.blobs_backfill_response(id, peer_id, maybe_sidecar.into()) } - RequestId::RangeSync { .. } => { + RequestId::RangeBlocks { .. } => { unreachable!("Only-blocks range requests don't receive sidecars") } - RequestId::RangeSidecarPair { id } => { - self.block_blob_range_response(id, peer_id, maybe_sidecar.into()) + RequestId::RangeBlobs { id } => { + self.blobs_range_response(id, peer_id, maybe_sidecar.into()) } } } - fn rpc_block_sidecar_pair_received( + fn rpc_block_block_and_blobs_received( &mut self, request_id: RequestId, peer_id: PeerId, - block_sidecar_pair: Option>>, + block_sidecar_pair: Option>, seen_timestamp: Duration, ) { match request_id { RequestId::SingleBlock { id } => self.block_lookups.single_block_lookup_response( id, peer_id, - block_sidecar_pair.map(|block_sidecar_pair| { - BlockWrapper::BlockAndBlob( - // TODO: why is this in an arc - (*block_sidecar_pair).clone(), - ) - }), + block_sidecar_pair.map(|block_sidecar_pair| block_sidecar_pair.into()), seen_timestamp, &mut self.network, ), RequestId::ParentLookup { id } => self.block_lookups.parent_lookup_response( id, peer_id, - block_sidecar_pair.map(|block_sidecar_pair| { - BlockWrapper::BlockAndBlob( - // TODO: why is this in an arc - (*block_sidecar_pair).clone(), - ) - }), + block_sidecar_pair.map(|block_sidecar_pair| block_sidecar_pair.into()), seen_timestamp, &mut self.network, ), - RequestId::BackFillSync { .. } - | RequestId::BackFillSidecarPair { .. } - | RequestId::RangeSync { .. } - | RequestId::RangeSidecarPair { .. } => unreachable!( + RequestId::BackFillBlocks { .. } + | RequestId::BackFillBlobs { .. } + | RequestId::RangeBlocks { .. } + | RequestId::RangeBlobs { .. } => unreachable!( "since range requests are not block-glob coupled, this should never be reachable" ), } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 36da3bf82..c54b3b1a9 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -1,9 +1,9 @@ //! Provides network functionality for the Syncing thread. This fundamentally wraps a network //! channel and stores a global RPC ID to perform requests. -use super::block_sidecar_coupling::BlockBlobRequestInfo; +use super::block_sidecar_coupling::BlocksAndBlobsRequestInfo; use super::manager::{Id, RequestId as SyncRequestId}; -use super::range_sync::{BatchId, ChainId, ExpectedBatchTy}; +use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; use crate::beacon_processor::WorkEvent; use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; @@ -38,11 +38,12 @@ pub struct SyncNetworkContext { backfill_requests: FnvHashMap, /// BlocksByRange requests paired with BlobsByRange requests made by the range. - range_sidecar_pair_requests: - FnvHashMap)>, + range_blocks_and_blobs_requests: + FnvHashMap)>, /// BlocksByRange requests paired with BlobsByRange requests made by the backfill sync. - backfill_sidecar_pair_requests: FnvHashMap)>, + backfill_blocks_and_blobs_requests: + FnvHashMap)>, /// Whether the ee is online. If it's not, we don't allow access to the /// `beacon_processor_send`. @@ -58,20 +59,20 @@ pub struct SyncNetworkContext { } /// Small enumeration to make dealing with block and blob requests easier. -pub enum BlockOrBlob { +pub enum BlockOrBlobs { Block(Option>>), - Blob(Option>>), + Blobs(Option>>), } -impl From>>> for BlockOrBlob { +impl From>>> for BlockOrBlobs { fn from(block: Option>>) -> Self { - BlockOrBlob::Block(block) + BlockOrBlobs::Block(block) } } -impl From>>> for BlockOrBlob { +impl From>>> for BlockOrBlobs { fn from(blob: Option>>) -> Self { - BlockOrBlob::Blob(blob) + BlockOrBlobs::Blobs(blob) } } @@ -89,8 +90,8 @@ impl SyncNetworkContext { request_id: 1, range_requests: Default::default(), backfill_requests: Default::default(), - range_sidecar_pair_requests: Default::default(), - backfill_sidecar_pair_requests: Default::default(), + range_blocks_and_blobs_requests: Default::default(), + backfill_blocks_and_blobs_requests: Default::default(), execution_engine_state: EngineState::Online, // always assume `Online` at the start beacon_processor_send, chain, @@ -140,13 +141,13 @@ impl SyncNetworkContext { pub fn blocks_by_range_request( &mut self, peer_id: PeerId, - batch_type: ExpectedBatchTy, + batch_type: ByRangeRequestType, request: BlocksByRangeRequest, chain_id: ChainId, batch_id: BatchId, ) -> Result { match batch_type { - ExpectedBatchTy::OnlyBlock => { + ByRangeRequestType::Blocks => { trace!( self.log, "Sending BlocksByRange request"; @@ -156,7 +157,7 @@ impl SyncNetworkContext { ); let request = Request::BlocksByRange(request); let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::RangeSync { id }); + let request_id = RequestId::Sync(SyncRequestId::RangeBlocks { id }); self.send_network_msg(NetworkMessage::SendRequest { peer_id, request, @@ -165,7 +166,7 @@ impl SyncNetworkContext { self.range_requests.insert(id, (chain_id, batch_id)); Ok(id) } - ExpectedBatchTy::OnlyBlockBlobs => { + ByRangeRequestType::BlocksAndBlobs => { debug!( self.log, "Sending BlocksByRange and BlobsByRange requests"; @@ -176,7 +177,7 @@ impl SyncNetworkContext { // create the shared request id. This is fine since the rpc handles substream ids. let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::RangeSidecarPair { id }); + let request_id = RequestId::Sync(SyncRequestId::RangeBlobs { id }); // Create the blob request based on the blob request. let blobs_request = Request::BlobsByRange(BlobsByRangeRequest { @@ -196,8 +197,8 @@ impl SyncNetworkContext { request: blobs_request, request_id, })?; - let block_blob_info = BlockBlobRequestInfo::default(); - self.range_sidecar_pair_requests + let block_blob_info = BlocksAndBlobsRequestInfo::default(); + self.range_blocks_and_blobs_requests .insert(id, (chain_id, batch_id, block_blob_info)); Ok(id) } @@ -208,12 +209,12 @@ impl SyncNetworkContext { pub fn backfill_blocks_by_range_request( &mut self, peer_id: PeerId, - batch_type: ExpectedBatchTy, + batch_type: ByRangeRequestType, request: BlocksByRangeRequest, batch_id: BatchId, ) -> Result { match batch_type { - ExpectedBatchTy::OnlyBlock => { + ByRangeRequestType::Blocks => { trace!( self.log, "Sending backfill BlocksByRange request"; @@ -223,7 +224,7 @@ impl SyncNetworkContext { ); let request = Request::BlocksByRange(request); let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::BackFillSync { id }); + let request_id = RequestId::Sync(SyncRequestId::BackFillBlocks { id }); self.send_network_msg(NetworkMessage::SendRequest { peer_id, request, @@ -232,7 +233,7 @@ impl SyncNetworkContext { self.backfill_requests.insert(id, batch_id); Ok(id) } - ExpectedBatchTy::OnlyBlockBlobs => { + ByRangeRequestType::BlocksAndBlobs => { debug!( self.log, "Sending backfill BlocksByRange and BlobsByRange requests"; @@ -243,7 +244,7 @@ impl SyncNetworkContext { // create the shared request id. This is fine since the rpc handles substream ids. let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::BackFillSidecarPair { id }); + let request_id = RequestId::Sync(SyncRequestId::BackFillBlobs { id }); // Create the blob request based on the blob request. let blobs_request = Request::BlobsByRange(BlobsByRangeRequest { @@ -263,8 +264,8 @@ impl SyncNetworkContext { request: blobs_request, request_id, })?; - let block_blob_info = BlockBlobRequestInfo::default(); - self.backfill_sidecar_pair_requests + let block_blob_info = BlocksAndBlobsRequestInfo::default(); + self.backfill_blocks_and_blobs_requests .insert(id, (batch_id, block_blob_info)); Ok(id) } @@ -288,18 +289,18 @@ impl SyncNetworkContext { pub fn range_sync_block_and_blob_response( &mut self, request_id: Id, - block_or_blob: BlockOrBlob, + block_or_blob: BlockOrBlobs, ) -> Option<( ChainId, BatchId, Result>, &'static str>, )> { - match self.range_sidecar_pair_requests.entry(request_id) { + match self.range_blocks_and_blobs_requests.entry(request_id) { Entry::Occupied(mut entry) => { let (_, _, info) = entry.get_mut(); match block_or_blob { - BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), - BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), + BlockOrBlobs::Block(maybe_block) => info.add_block_response(maybe_block), + BlockOrBlobs::Blobs(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), } if info.is_finished() { // If the request is finished, dequeue everything @@ -316,28 +317,28 @@ impl SyncNetworkContext { pub fn range_sync_request_failed( &mut self, request_id: Id, - batch_type: ExpectedBatchTy, + batch_type: ByRangeRequestType, ) -> Option<(ChainId, BatchId)> { match batch_type { - ExpectedBatchTy::OnlyBlockBlobs => self - .range_sidecar_pair_requests + ByRangeRequestType::BlocksAndBlobs => self + .range_blocks_and_blobs_requests .remove(&request_id) .map(|(chain_id, batch_id, _info)| (chain_id, batch_id)), - ExpectedBatchTy::OnlyBlock => self.range_requests.remove(&request_id), + ByRangeRequestType::Blocks => self.range_requests.remove(&request_id), } } pub fn backfill_request_failed( &mut self, request_id: Id, - batch_type: ExpectedBatchTy, + batch_type: ByRangeRequestType, ) -> Option { match batch_type { - ExpectedBatchTy::OnlyBlockBlobs => self - .backfill_sidecar_pair_requests + ByRangeRequestType::BlocksAndBlobs => self + .backfill_blocks_and_blobs_requests .remove(&request_id) .map(|(batch_id, _info)| batch_id), - ExpectedBatchTy::OnlyBlock => self.backfill_requests.remove(&request_id), + ByRangeRequestType::Blocks => self.backfill_requests.remove(&request_id), } } @@ -360,14 +361,14 @@ impl SyncNetworkContext { pub fn backfill_sync_block_and_blob_response( &mut self, request_id: Id, - block_or_blob: BlockOrBlob, + block_or_blob: BlockOrBlobs, ) -> Option<(BatchId, Result>, &'static str>)> { - match self.backfill_sidecar_pair_requests.entry(request_id) { + match self.backfill_blocks_and_blobs_requests.entry(request_id) { Entry::Occupied(mut entry) => { let (_, info) = entry.get_mut(); match block_or_blob { - BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), - BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), + BlockOrBlobs::Block(maybe_block) => info.add_block_response(maybe_block), + BlockOrBlobs::Blobs(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), } if info.is_finished() { // If the request is finished, dequeue everything @@ -533,7 +534,7 @@ impl SyncNetworkContext { /// Check whether a batch for this epoch (and only this epoch) should request just blocks or /// blocks and blobs. - pub fn batch_type(&self, epoch: types::Epoch) -> ExpectedBatchTy { + pub fn batch_type(&self, epoch: types::Epoch) -> ByRangeRequestType { const _: () = assert!( super::backfill_sync::BACKFILL_EPOCHS_PER_BATCH == 1 && super::range_sync::EPOCHS_PER_BATCH == 1, @@ -542,18 +543,18 @@ impl SyncNetworkContext { #[cfg(test)] { // Keep tests only for blocks. - return ExpectedBatchTy::OnlyBlock; + return ByRangeRequestType::Blocks; } #[cfg(not(test))] { if let Some(data_availability_boundary) = self.chain.data_availability_boundary() { if epoch >= data_availability_boundary { - ExpectedBatchTy::OnlyBlockBlobs + ByRangeRequestType::BlocksAndBlobs } else { - ExpectedBatchTy::OnlyBlock + ByRangeRequestType::Blocks } } else { - ExpectedBatchTy::OnlyBlock + ByRangeRequestType::Blocks } } } diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 80f34f8b4..184dcffc4 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -4,10 +4,9 @@ use lighthouse_network::PeerId; use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::ops::Sub; -use std::sync::Arc; use strum::Display; use types::signed_block_and_blobs::BlockWrapper; -use types::{Epoch, EthSpec, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, Slot}; +use types::{Epoch, EthSpec, Slot}; /// The number of times to retry a batch before it is considered failed. const MAX_BATCH_DOWNLOAD_ATTEMPTS: u8 = 5; @@ -16,36 +15,12 @@ const MAX_BATCH_DOWNLOAD_ATTEMPTS: u8 = 5; /// after `MAX_BATCH_PROCESSING_ATTEMPTS` times, it is considered faulty. const MAX_BATCH_PROCESSING_ATTEMPTS: u8 = 3; -pub enum BatchTy { - Blocks(Vec>>), - BlocksAndBlobs(Vec>), -} - -impl BatchTy { - pub fn into_wrapped_blocks(self) -> Vec> { - match self { - BatchTy::Blocks(blocks) => blocks - .into_iter() - .map(|block| BlockWrapper::Block(block)) - .collect(), - BatchTy::BlocksAndBlobs(block_sidecar_pair) => block_sidecar_pair - .into_iter() - .map(|block_sidecar_pair| BlockWrapper::BlockAndBlob(block_sidecar_pair)) - .collect(), - } - } -} - -/// Error representing a batch with mixed block types. -#[derive(Debug)] -pub struct MixedBlockTyErr; - /// Type of expected batch. #[derive(Debug, Copy, Clone, Display)] #[strum(serialize_all = "snake_case")] -pub enum ExpectedBatchTy { - OnlyBlockBlobs, - OnlyBlock, +pub enum ByRangeRequestType { + BlocksAndBlobs, + Blocks, } /// Allows customisation of the above constants used in other sync methods such as BackFillSync. @@ -131,7 +106,7 @@ pub struct BatchInfo { /// State of the batch. state: BatchState, /// Whether this batch contains all blocks or all blocks and blobs. - batch_type: ExpectedBatchTy, + batch_type: ByRangeRequestType, /// Pin the generic marker: std::marker::PhantomData, } @@ -180,7 +155,7 @@ impl BatchInfo { /// fork boundary will be of mixed type (all blocks and one last blockblob), and I don't want to /// deal with this for now. /// This means finalization might be slower in eip4844 - pub fn new(start_epoch: &Epoch, num_of_epochs: u64, batch_type: ExpectedBatchTy) -> Self { + pub fn new(start_epoch: &Epoch, num_of_epochs: u64, batch_type: ByRangeRequestType) -> Self { let start_slot = start_epoch.start_slot(T::slots_per_epoch()); let end_slot = start_slot + num_of_epochs * T::slots_per_epoch(); BatchInfo { @@ -243,7 +218,7 @@ impl BatchInfo { } /// Returns a BlocksByRange request associated with the batch. - pub fn to_blocks_by_range_request(&self) -> (BlocksByRangeRequest, ExpectedBatchTy) { + pub fn to_blocks_by_range_request(&self) -> (BlocksByRangeRequest, ByRangeRequestType) { ( BlocksByRangeRequest { start_slot: self.start_slot.into(), @@ -408,30 +383,11 @@ impl BatchInfo { } } - pub fn start_processing(&mut self) -> Result, WrongState> { + pub fn start_processing(&mut self) -> Result>, WrongState> { match self.state.poison() { BatchState::AwaitingProcessing(peer, blocks) => { self.state = BatchState::Processing(Attempt::new::(peer, &blocks)); - match self.batch_type { - ExpectedBatchTy::OnlyBlockBlobs => { - let blocks = blocks.into_iter().map(|block| { - let BlockWrapper::BlockAndBlob(block_and_blob) = block else { - panic!("Batches should never have a mixed type. This is a bug. Contact D") - }; - block_and_blob - }).collect(); - Ok(BatchTy::BlocksAndBlobs(blocks)) - } - ExpectedBatchTy::OnlyBlock => { - let blocks = blocks.into_iter().map(|block| { - let BlockWrapper::Block(block) = block else { - panic!("Batches should never have a mixed type. This is a bug. Contact D") - }; - block - }).collect(); - Ok(BatchTy::Blocks(blocks)) - } - } + Ok(blocks) } BatchState::Poisoned => unreachable!("Poisoned batch"), other => { diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 89e120050..d60de3224 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -332,7 +332,7 @@ impl SyncingChain { let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id, count_unrealized); self.current_processing_batch = Some(batch_id); - let work_event = BeaconWorkEvent::chain_segment(process_id, blocks.into_wrapped_blocks()); + let work_event = BeaconWorkEvent::chain_segment(process_id, blocks); if let Err(e) = beacon_processor_send.try_send(work_event) { crit!(self.log, "Failed to send chain segment to processor."; "msg" => "process_batch", diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index 284260321..d0f2f9217 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -9,8 +9,8 @@ mod range; mod sync_type; pub use batch::{ - BatchConfig, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, BatchTy, - ExpectedBatchTy, + BatchConfig, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, + ByRangeRequestType, }; pub use chain::{BatchId, ChainId, EPOCHS_PER_BATCH}; pub use range::RangeSync; diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 1e3474fa5..09d93b0e8 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -373,7 +373,7 @@ where #[cfg(test)] mod tests { use crate::service::RequestId; - use crate::sync::range_sync::ExpectedBatchTy; + use crate::sync::range_sync::ByRangeRequestType; use crate::NetworkMessage; use super::*; @@ -686,7 +686,7 @@ mod tests { let (peer1, local_info, head_info) = rig.head_peer(); range.add_peer(&mut rig.cx, local_info, peer1, head_info); let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 { - RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => { + RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => { (rig.cx.range_sync_response(id, true).unwrap(), id) } other => panic!("unexpected request {:?}", other), @@ -705,7 +705,7 @@ mod tests { let (peer2, local_info, finalized_info) = rig.finalized_peer(); range.add_peer(&mut rig.cx, local_info, peer2, finalized_info); let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 { - RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => { + RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => { (rig.cx.range_sync_response(id, true).unwrap(), id) } other => panic!("unexpected request {:?}", other), diff --git a/consensus/types/src/signed_block_and_blobs.rs b/consensus/types/src/signed_block_and_blobs.rs index f21545f27..c589fbcfe 100644 --- a/consensus/types/src/signed_block_and_blobs.rs +++ b/consensus/types/src/signed_block_and_blobs.rs @@ -34,33 +34,56 @@ impl SignedBeaconBlockAndBlobsSidecar { } } +/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobsSidecar`]. This newtype +/// wraps the `BlockWrapperInner` to ensure blobs cannot be accessed via an enum match. This would +/// circumvent empty blob reconstruction when accessing blobs. +#[derive(Clone, Debug, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +pub struct BlockWrapper(BlockWrapperInner); + /// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobsSidecar`]. #[derive(Clone, Debug, Derivative)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] -pub enum BlockWrapper { +pub enum BlockWrapperInner { Block(Arc>), BlockAndBlob(SignedBeaconBlockAndBlobsSidecar), } impl BlockWrapper { + pub fn new(block: Arc>) -> Self { + Self(BlockWrapperInner::Block(block)) + } + + pub fn new_with_blobs( + beacon_block: Arc>, + blobs_sidecar: Arc>, + ) -> Self { + Self(BlockWrapperInner::BlockAndBlob( + SignedBeaconBlockAndBlobsSidecar { + beacon_block, + blobs_sidecar, + }, + )) + } + pub fn slot(&self) -> Slot { - match self { - BlockWrapper::Block(block) => block.slot(), - BlockWrapper::BlockAndBlob(block_sidecar_pair) => { + match &self.0 { + BlockWrapperInner::Block(block) => block.slot(), + BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => { block_sidecar_pair.beacon_block.slot() } } } pub fn block(&self) -> &SignedBeaconBlock { - match self { - BlockWrapper::Block(block) => &block, - BlockWrapper::BlockAndBlob(block_sidecar_pair) => &block_sidecar_pair.beacon_block, + match &self.0 { + BlockWrapperInner::Block(block) => &block, + BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => &block_sidecar_pair.beacon_block, } } pub fn block_cloned(&self) -> Arc> { - match self { - BlockWrapper::Block(block) => block.clone(), - BlockWrapper::BlockAndBlob(block_sidecar_pair) => { + match &self.0 { + BlockWrapperInner::Block(block) => block.clone(), + BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => { block_sidecar_pair.beacon_block.clone() } } @@ -70,20 +93,20 @@ impl BlockWrapper { &self, block_root: Option, ) -> Result>>, BlobReconstructionError> { - match self { - BlockWrapper::Block(block) => block + match &self.0 { + BlockWrapperInner::Block(block) => block .reconstruct_empty_blobs(block_root) .map(|blob_opt| blob_opt.map(Arc::new)), - BlockWrapper::BlockAndBlob(block_sidecar_pair) => { + BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => { Ok(Some(block_sidecar_pair.blobs_sidecar.clone())) } } } pub fn message(&self) -> crate::BeaconBlockRef { - match self { - BlockWrapper::Block(block) => block.message(), - BlockWrapper::BlockAndBlob(block_sidecar_pair) => { + match &self.0 { + BlockWrapperInner::Block(block) => block.message(), + BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => { block_sidecar_pair.beacon_block.message() } } @@ -100,14 +123,14 @@ impl BlockWrapper { Arc>, Result>>, BlobReconstructionError>, ) { - match self { - BlockWrapper::Block(block) => { + match self.0 { + BlockWrapperInner::Block(block) => { let blobs = block .reconstruct_empty_blobs(block_root) .map(|blob_opt| blob_opt.map(Arc::new)); (block, blobs) } - BlockWrapper::BlockAndBlob(block_sidecar_pair) => { + BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => { let SignedBeaconBlockAndBlobsSidecar { beacon_block, blobs_sidecar, @@ -120,12 +143,18 @@ impl BlockWrapper { impl From> for BlockWrapper { fn from(block: SignedBeaconBlock) -> Self { - BlockWrapper::Block(Arc::new(block)) + BlockWrapper(BlockWrapperInner::Block(Arc::new(block))) } } impl From>> for BlockWrapper { fn from(block: Arc>) -> Self { - BlockWrapper::Block(block) + BlockWrapper(BlockWrapperInner::Block(block)) + } +} + +impl From> for BlockWrapper { + fn from(block: SignedBeaconBlockAndBlobsSidecar) -> Self { + BlockWrapper(BlockWrapperInner::BlockAndBlob(block)) } }