Deduplicate block root computation (#3590)

## Issue Addressed

NA

## Proposed Changes

This PR removes duplicated block root computation.

Computing the `SignedBeaconBlock::canonical_root` has become more expensive since the merge as we need to compute the merke root of each transaction inside an `ExecutionPayload`.

Computing the root for [a mainnet block](https://beaconcha.in/slot/4704236) is taking ~10ms on my i7-8700K CPU @ 3.70GHz (no sha extensions). Given that our median seen-to-imported time for blocks is presently 300-400ms, removing a few duplicated block roots (~30ms) could represent an easy 10% improvement. When we consider that the seen-to-imported times include operations *after* the block has been placed in the early attester cache, we could expect the 30ms to be more significant WRT our seen-to-attestable times.

## Additional Info

NA
This commit is contained in:
Paul Hauner 2022-09-23 03:52:42 +00:00
parent 76ba0a1aaf
commit fa6ad1a11a
23 changed files with 252 additions and 106 deletions

View File

@ -2220,7 +2220,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
} }
} }
match check_block_relevancy(&block, Some(block_root), self) { match check_block_relevancy(&block, block_root, self) {
// If the block is relevant, add it to the filtered chain segment. // If the block is relevant, add it to the filtered chain segment.
Ok(_) => filtered_chain_segment.push((block_root, block)), Ok(_) => filtered_chain_segment.push((block_root, block)),
// If the block is already known, simply ignore this block. // If the block is already known, simply ignore this block.
@ -2344,7 +2344,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Import the blocks into the chain. // Import the blocks into the chain.
for signature_verified_block in signature_verified_blocks { for signature_verified_block in signature_verified_blocks {
match self match self
.process_block(signature_verified_block, count_unrealized) .process_block(
signature_verified_block.block_root(),
signature_verified_block,
count_unrealized,
)
.await .await
{ {
Ok(_) => imported_blocks += 1, Ok(_) => imported_blocks += 1,
@ -2429,6 +2433,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// verification. /// verification.
pub async fn process_block<B: IntoExecutionPendingBlock<T>>( pub async fn process_block<B: IntoExecutionPendingBlock<T>>(
self: &Arc<Self>, self: &Arc<Self>,
block_root: Hash256,
unverified_block: B, unverified_block: B,
count_unrealized: CountUnrealized, count_unrealized: CountUnrealized,
) -> Result<Hash256, BlockError<T::EthSpec>> { ) -> Result<Hash256, BlockError<T::EthSpec>> {
@ -2444,7 +2449,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// A small closure to group the verification and import errors. // A small closure to group the verification and import errors.
let chain = self.clone(); let chain = self.clone();
let import_block = async move { let import_block = async move {
let execution_pending = unverified_block.into_execution_pending_block(&chain)?; let execution_pending =
unverified_block.into_execution_pending_block(block_root, &chain)?;
chain chain
.import_execution_pending_block(execution_pending, count_unrealized) .import_execution_pending_block(execution_pending, count_unrealized)
.await .await

View File

@ -529,7 +529,7 @@ pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
} }
let (first_root, first_block) = chain_segment.remove(0); let (first_root, first_block) = chain_segment.remove(0);
let (mut parent, first_block) = load_parent(first_block, chain)?; let (mut parent, first_block) = load_parent(first_root, first_block, chain)?;
let slot = first_block.slot(); let slot = first_block.slot();
chain_segment.insert(0, (first_root, first_block)); chain_segment.insert(0, (first_root, first_block));
@ -622,9 +622,10 @@ pub struct ExecutionPendingBlock<T: BeaconChainTypes> {
pub trait IntoExecutionPendingBlock<T: BeaconChainTypes>: Sized { pub trait IntoExecutionPendingBlock<T: BeaconChainTypes>: Sized {
fn into_execution_pending_block( fn into_execution_pending_block(
self, self,
block_root: Hash256,
chain: &Arc<BeaconChain<T>>, chain: &Arc<BeaconChain<T>>,
) -> Result<ExecutionPendingBlock<T>, BlockError<T::EthSpec>> { ) -> Result<ExecutionPendingBlock<T>, BlockError<T::EthSpec>> {
self.into_execution_pending_block_slashable(chain) self.into_execution_pending_block_slashable(block_root, chain)
.map(|execution_pending| { .map(|execution_pending| {
// Supply valid block to slasher. // Supply valid block to slasher.
if let Some(slasher) = chain.slasher.as_ref() { if let Some(slasher) = chain.slasher.as_ref() {
@ -638,6 +639,7 @@ pub trait IntoExecutionPendingBlock<T: BeaconChainTypes>: Sized {
/// Convert the block to fully-verified form while producing data to aid checking slashability. /// Convert the block to fully-verified form while producing data to aid checking slashability.
fn into_execution_pending_block_slashable( fn into_execution_pending_block_slashable(
self, self,
block_root: Hash256,
chain: &Arc<BeaconChain<T>>, chain: &Arc<BeaconChain<T>>,
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>>; ) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>>;
@ -781,7 +783,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
} else { } else {
// The proposer index was *not* cached and we must load the parent in order to determine // The proposer index was *not* cached and we must load the parent in order to determine
// the proposer index. // the proposer index.
let (mut parent, block) = load_parent(block, chain)?; let (mut parent, block) = load_parent(block_root, block, chain)?;
debug!( debug!(
chain.log, chain.log,
@ -877,11 +879,12 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for GossipVerifiedBlock<T
/// Completes verification of the wrapped `block`. /// Completes verification of the wrapped `block`.
fn into_execution_pending_block_slashable( fn into_execution_pending_block_slashable(
self, self,
block_root: Hash256,
chain: &Arc<BeaconChain<T>>, chain: &Arc<BeaconChain<T>>,
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> { ) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
let execution_pending = let execution_pending =
SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?; SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?;
execution_pending.into_execution_pending_block_slashable(chain) execution_pending.into_execution_pending_block_slashable(block_root, chain)
} }
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> { fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
@ -907,7 +910,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
// Check the anchor slot before loading the parent, to avoid spurious lookups. // Check the anchor slot before loading the parent, to avoid spurious lookups.
check_block_against_anchor_slot(block.message(), chain)?; check_block_against_anchor_slot(block.message(), chain)?;
let (mut parent, block) = load_parent(block, chain)?; let (mut parent, block) = load_parent(block_root, block, chain)?;
// Reject any block that exceeds our limit on skipped slots. // Reject any block that exceeds our limit on skipped slots.
check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?; check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?;
@ -955,7 +958,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
let (mut parent, block) = if let Some(parent) = from.parent { let (mut parent, block) = if let Some(parent) = from.parent {
(parent, from.block) (parent, from.block)
} else { } else {
load_parent(from.block, chain)? load_parent(from.block_root, from.block, chain)?
}; };
let state = cheap_state_advance_to_obtain_committees( let state = cheap_state_advance_to_obtain_committees(
@ -991,29 +994,29 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
Self::from_gossip_verified_block(from, chain) Self::from_gossip_verified_block(from, chain)
.map_err(|e| BlockSlashInfo::from_early_error(header, e)) .map_err(|e| BlockSlashInfo::from_early_error(header, e))
} }
pub fn block_root(&self) -> Hash256 {
self.block_root
}
} }
impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBlock<T> { impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBlock<T> {
/// Completes verification of the wrapped `block`. /// Completes verification of the wrapped `block`.
fn into_execution_pending_block_slashable( fn into_execution_pending_block_slashable(
self, self,
block_root: Hash256,
chain: &Arc<BeaconChain<T>>, chain: &Arc<BeaconChain<T>>,
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> { ) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
let header = self.block.signed_block_header(); let header = self.block.signed_block_header();
let (parent, block) = if let Some(parent) = self.parent { let (parent, block) = if let Some(parent) = self.parent {
(parent, self.block) (parent, self.block)
} else { } else {
load_parent(self.block, chain) load_parent(self.block_root, self.block, chain)
.map_err(|e| BlockSlashInfo::SignatureValid(header.clone(), e))? .map_err(|e| BlockSlashInfo::SignatureValid(header.clone(), e))?
}; };
ExecutionPendingBlock::from_signature_verified_components( ExecutionPendingBlock::from_signature_verified_components(block, block_root, parent, chain)
block, .map_err(|e| BlockSlashInfo::SignatureValid(header, e))
self.block_root,
parent,
chain,
)
.map_err(|e| BlockSlashInfo::SignatureValid(header, e))
} }
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> { fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
@ -1026,14 +1029,15 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock
/// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification.
fn into_execution_pending_block_slashable( fn into_execution_pending_block_slashable(
self, self,
block_root: Hash256,
chain: &Arc<BeaconChain<T>>, chain: &Arc<BeaconChain<T>>,
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> { ) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
// Perform an early check to prevent wasting time on irrelevant blocks. // Perform an early check to prevent wasting time on irrelevant blocks.
let block_root = check_block_relevancy(&self, None, chain) let block_root = check_block_relevancy(&self, block_root, chain)
.map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?;
SignatureVerifiedBlock::check_slashable(self, block_root, chain)? SignatureVerifiedBlock::check_slashable(self, block_root, chain)?
.into_execution_pending_block_slashable(chain) .into_execution_pending_block_slashable(block_root, chain)
} }
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> { fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
@ -1088,7 +1092,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
* Perform cursory checks to see if the block is even worth processing. * Perform cursory checks to see if the block is even worth processing.
*/ */
check_block_relevancy(&block, Some(block_root), chain)?; check_block_relevancy(&block, block_root, chain)?;
/* /*
* Advance the given `parent.beacon_state` to the slot of the given `block`. * Advance the given `parent.beacon_state` to the slot of the given `block`.
@ -1502,7 +1506,7 @@ pub fn check_block_is_finalized_descendant<T: BeaconChainTypes>(
/// experienced whilst attempting to verify. /// experienced whilst attempting to verify.
pub fn check_block_relevancy<T: BeaconChainTypes>( pub fn check_block_relevancy<T: BeaconChainTypes>(
signed_block: &SignedBeaconBlock<T::EthSpec>, signed_block: &SignedBeaconBlock<T::EthSpec>,
block_root: Option<Hash256>, block_root: Hash256,
chain: &BeaconChain<T>, chain: &BeaconChain<T>,
) -> Result<Hash256, BlockError<T::EthSpec>> { ) -> Result<Hash256, BlockError<T::EthSpec>> {
let block = signed_block.message(); let block = signed_block.message();
@ -1526,8 +1530,6 @@ pub fn check_block_relevancy<T: BeaconChainTypes>(
return Err(BlockError::BlockSlotLimitReached); return Err(BlockError::BlockSlotLimitReached);
} }
let block_root = block_root.unwrap_or_else(|| get_block_root(signed_block));
// Do not process a block from a finalized slot. // Do not process a block from a finalized slot.
check_block_against_finalized_slot(block, block_root, chain)?; check_block_against_finalized_slot(block, block_root, chain)?;
@ -1581,6 +1583,7 @@ fn verify_parent_block_is_known<T: BeaconChainTypes>(
/// whilst attempting the operation. /// whilst attempting the operation.
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
fn load_parent<T: BeaconChainTypes>( fn load_parent<T: BeaconChainTypes>(
block_root: Hash256,
block: Arc<SignedBeaconBlock<T::EthSpec>>, block: Arc<SignedBeaconBlock<T::EthSpec>>,
chain: &BeaconChain<T>, chain: &BeaconChain<T>,
) -> Result< ) -> Result<
@ -1614,7 +1617,7 @@ fn load_parent<T: BeaconChainTypes>(
.block_times_cache .block_times_cache
.read() .read()
.get_block_delays( .get_block_delays(
block.canonical_root(), block_root,
chain chain
.slot_clock .slot_clock
.start_of(block.slot()) .start_of(block.slot())

View File

@ -55,7 +55,9 @@ pub use self::errors::{BeaconChainError, BlockProductionError};
pub use self::historical_blocks::HistoricalBlockError; pub use self::historical_blocks::HistoricalBlockError;
pub use attestation_verification::Error as AttestationError; pub use attestation_verification::Error as AttestationError;
pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError};
pub use block_verification::{BlockError, ExecutionPayloadError, GossipVerifiedBlock}; pub use block_verification::{
get_block_root, BlockError, ExecutionPayloadError, GossipVerifiedBlock,
};
pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock};
pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend};
pub use events::ServerSentEventHandler; pub use events::ServerSentEventHandler;

View File

@ -1453,12 +1453,13 @@ where
pub async fn process_block( pub async fn process_block(
&self, &self,
slot: Slot, slot: Slot,
block_root: Hash256,
block: SignedBeaconBlock<E>, block: SignedBeaconBlock<E>,
) -> Result<SignedBeaconBlockHash, BlockError<E>> { ) -> Result<SignedBeaconBlockHash, BlockError<E>> {
self.set_current_slot(slot); self.set_current_slot(slot);
let block_hash: SignedBeaconBlockHash = self let block_hash: SignedBeaconBlockHash = self
.chain .chain
.process_block(Arc::new(block), CountUnrealized::True) .process_block(block_root, Arc::new(block), CountUnrealized::True)
.await? .await?
.into(); .into();
self.chain.recompute_head_at_current_slot().await; self.chain.recompute_head_at_current_slot().await;
@ -1471,7 +1472,11 @@ where
) -> Result<SignedBeaconBlockHash, BlockError<E>> { ) -> Result<SignedBeaconBlockHash, BlockError<E>> {
let block_hash: SignedBeaconBlockHash = self let block_hash: SignedBeaconBlockHash = self
.chain .chain
.process_block(Arc::new(block), CountUnrealized::True) .process_block(
block.canonical_root(),
Arc::new(block),
CountUnrealized::True,
)
.await? .await?
.into(); .into();
self.chain.recompute_head_at_current_slot().await; self.chain.recompute_head_at_current_slot().await;
@ -1536,7 +1541,9 @@ where
) -> Result<(SignedBeaconBlockHash, SignedBeaconBlock<E>, BeaconState<E>), BlockError<E>> { ) -> Result<(SignedBeaconBlockHash, SignedBeaconBlock<E>, BeaconState<E>), BlockError<E>> {
self.set_current_slot(slot); self.set_current_slot(slot);
let (block, new_state) = self.make_block(state, slot).await; let (block, new_state) = self.make_block(state, slot).await;
let block_hash = self.process_block(slot, block.clone()).await?; let block_hash = self
.process_block(slot, block.canonical_root(), block.clone())
.await?;
Ok((block_hash, block, new_state)) Ok((block_hash, block, new_state))
} }

View File

@ -346,6 +346,7 @@ async fn assert_invalid_signature(
let process_res = harness let process_res = harness
.chain .chain
.process_block( .process_block(
snapshots[block_index].beacon_block.canonical_root(),
snapshots[block_index].beacon_block.clone(), snapshots[block_index].beacon_block.clone(),
CountUnrealized::True, CountUnrealized::True,
) )
@ -403,12 +404,14 @@ async fn invalid_signature_gossip_block() {
.await .await
.into_block_error() .into_block_error()
.expect("should import all blocks prior to the one being tested"); .expect("should import all blocks prior to the one being tested");
let signed_block = SignedBeaconBlock::from_block(block, junk_signature());
assert!( assert!(
matches!( matches!(
harness harness
.chain .chain
.process_block( .process_block(
Arc::new(SignedBeaconBlock::from_block(block, junk_signature())), signed_block.canonical_root(),
Arc::new(signed_block),
CountUnrealized::True CountUnrealized::True
) )
.await, .await,
@ -718,7 +721,11 @@ async fn block_gossip_verification() {
harness harness
.chain .chain
.process_block(gossip_verified, CountUnrealized::True) .process_block(
gossip_verified.block_root,
gossip_verified,
CountUnrealized::True,
)
.await .await
.expect("should import valid gossip verified block"); .expect("should import valid gossip verified block");
} }
@ -985,7 +992,11 @@ async fn verify_block_for_gossip_slashing_detection() {
.unwrap(); .unwrap();
harness harness
.chain .chain
.process_block(verified_block, CountUnrealized::True) .process_block(
verified_block.block_root,
verified_block,
CountUnrealized::True,
)
.await .await
.unwrap(); .unwrap();
unwrap_err( unwrap_err(
@ -1020,7 +1031,11 @@ async fn verify_block_for_gossip_doppelganger_detection() {
let attestations = verified_block.block.message().body().attestations().clone(); let attestations = verified_block.block.message().body().attestations().clone();
harness harness
.chain .chain
.process_block(verified_block, CountUnrealized::True) .process_block(
verified_block.block_root,
verified_block,
CountUnrealized::True,
)
.await .await
.unwrap(); .unwrap();
@ -1161,7 +1176,11 @@ async fn add_base_block_to_altair_chain() {
assert!(matches!( assert!(matches!(
harness harness
.chain .chain
.process_block(Arc::new(base_block.clone()), CountUnrealized::True) .process_block(
base_block.canonical_root(),
Arc::new(base_block.clone()),
CountUnrealized::True
)
.await .await
.err() .err()
.expect("should error when processing base block"), .expect("should error when processing base block"),
@ -1289,7 +1308,11 @@ async fn add_altair_block_to_base_chain() {
assert!(matches!( assert!(matches!(
harness harness
.chain .chain
.process_block(Arc::new(altair_block.clone()), CountUnrealized::True) .process_block(
altair_block.canonical_root(),
Arc::new(altair_block.clone()),
CountUnrealized::True
)
.await .await
.err() .err()
.expect("should error when processing altair block"), .expect("should error when processing altair block"),

View File

@ -281,7 +281,7 @@ impl InvalidPayloadRig {
} }
let root = self let root = self
.harness .harness
.process_block(slot, block.clone()) .process_block(slot, block.canonical_root(), block.clone())
.await .await
.unwrap(); .unwrap();
@ -320,7 +320,11 @@ impl InvalidPayloadRig {
set_new_payload(new_payload_response); set_new_payload(new_payload_response);
set_forkchoice_updated(forkchoice_response); set_forkchoice_updated(forkchoice_response);
match self.harness.process_block(slot, block).await { match self
.harness
.process_block(slot, block.canonical_root(), block)
.await
{
Err(error) if evaluate_error(&error) => (), Err(error) if evaluate_error(&error) => (),
Err(other) => { Err(other) => {
panic!("evaluate_error returned false with {:?}", other) panic!("evaluate_error returned false with {:?}", other)
@ -685,7 +689,11 @@ async fn invalidates_all_descendants() {
let fork_block_root = rig let fork_block_root = rig
.harness .harness
.chain .chain
.process_block(Arc::new(fork_block), CountUnrealized::True) .process_block(
fork_block.canonical_root(),
Arc::new(fork_block),
CountUnrealized::True,
)
.await .await
.unwrap(); .unwrap();
rig.recompute_head().await; rig.recompute_head().await;
@ -777,7 +785,11 @@ async fn switches_heads() {
let fork_block_root = rig let fork_block_root = rig
.harness .harness
.chain .chain
.process_block(Arc::new(fork_block), CountUnrealized::True) .process_block(
fork_block.canonical_root(),
Arc::new(fork_block),
CountUnrealized::True,
)
.await .await
.unwrap(); .unwrap();
rig.recompute_head().await; rig.recompute_head().await;
@ -1023,7 +1035,7 @@ async fn invalid_parent() {
// Ensure the block built atop an invalid payload is invalid for import. // Ensure the block built atop an invalid payload is invalid for import.
assert!(matches!( assert!(matches!(
rig.harness.chain.process_block(block.clone(), CountUnrealized::True).await, rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True).await,
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
if invalid_root == parent_root if invalid_root == parent_root
)); ));
@ -1305,7 +1317,7 @@ async fn build_optimistic_chain(
for block in blocks { for block in blocks {
rig.harness rig.harness
.chain .chain
.process_block(block, CountUnrealized::True) .process_block(block.canonical_root(), block, CountUnrealized::True)
.await .await
.unwrap(); .unwrap();
} }
@ -1863,7 +1875,11 @@ async fn recover_from_invalid_head_by_importing_blocks() {
// Import the fork block, it should become the head. // Import the fork block, it should become the head.
rig.harness rig.harness
.chain .chain
.process_block(fork_block.clone(), CountUnrealized::True) .process_block(
fork_block.canonical_root(),
fork_block.clone(),
CountUnrealized::True,
)
.await .await
.unwrap(); .unwrap();
rig.recompute_head().await; rig.recompute_head().await;

View File

@ -2125,7 +2125,11 @@ async fn weak_subjectivity_sync() {
beacon_chain.slot_clock.set_slot(slot.as_u64()); beacon_chain.slot_clock.set_slot(slot.as_u64());
beacon_chain beacon_chain
.process_block(Arc::new(full_block), CountUnrealized::True) .process_block(
full_block.canonical_root(),
Arc::new(full_block),
CountUnrealized::True,
)
.await .await
.unwrap(); .unwrap();
beacon_chain.recompute_head_at_current_slot().await; beacon_chain.recompute_head_at_current_slot().await;
@ -2382,8 +2386,14 @@ async fn revert_minority_fork_on_resume() {
let (block, new_state) = harness1.make_block(state, slot).await; let (block, new_state) = harness1.make_block(state, slot).await;
harness1.process_block(slot, block.clone()).await.unwrap(); harness1
harness2.process_block(slot, block.clone()).await.unwrap(); .process_block(slot, block.canonical_root(), block.clone())
.await
.unwrap();
harness2
.process_block(slot, block.canonical_root(), block.clone())
.await
.unwrap();
state = new_state; state = new_state;
block_root = block.canonical_root(); block_root = block.canonical_root();
@ -2416,12 +2426,18 @@ async fn revert_minority_fork_on_resume() {
// Minority chain block (no attesters). // Minority chain block (no attesters).
let (block1, new_state1) = harness1.make_block(state1, slot).await; let (block1, new_state1) = harness1.make_block(state1, slot).await;
harness1.process_block(slot, block1).await.unwrap(); harness1
.process_block(slot, block1.canonical_root(), block1)
.await
.unwrap();
state1 = new_state1; state1 = new_state1;
// Majority chain block (all attesters). // Majority chain block (all attesters).
let (block2, new_state2) = harness2.make_block(state2, slot).await; let (block2, new_state2) = harness2.make_block(state2, slot).await;
harness2.process_block(slot, block2.clone()).await.unwrap(); harness2
.process_block(slot, block2.canonical_root(), block2.clone())
.await
.unwrap();
state2 = new_state2; state2 = new_state2;
block_root = block2.canonical_root(); block_root = block2.canonical_root();

View File

@ -685,6 +685,7 @@ async fn run_skip_slot_test(skip_slots: u64) {
harness_b harness_b
.chain .chain
.process_block( .process_block(
harness_a.chain.head_snapshot().beacon_block_root,
harness_a.chain.head_snapshot().beacon_block.clone(), harness_a.chain.head_snapshot().beacon_block.clone(),
CountUnrealized::True CountUnrealized::True
) )

View File

@ -1393,12 +1393,13 @@ impl<T: EthSpec> ExecutionLayer<T> {
pub async fn propose_blinded_beacon_block( pub async fn propose_blinded_beacon_block(
&self, &self,
block_root: Hash256,
block: &SignedBeaconBlock<T, BlindedPayload<T>>, block: &SignedBeaconBlock<T, BlindedPayload<T>>,
) -> Result<ExecutionPayload<T>, Error> { ) -> Result<ExecutionPayload<T>, Error> {
debug!( debug!(
self.log(), self.log(),
"Sending block to builder"; "Sending block to builder";
"root" => ?block.canonical_root(), "root" => ?block_root,
); );
if let Some(builder) = self.builder() { if let Some(builder) = self.builder() {
builder builder

View File

@ -1046,7 +1046,7 @@ pub fn serve<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>, network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move { log: Logger| async move {
publish_blocks::publish_block(block, chain, &network_tx, log) publish_blocks::publish_block(None, block, chain, &network_tx, log)
.await .await
.map(|()| warp::reply()) .map(|()| warp::reply())
}, },

View File

@ -9,13 +9,14 @@ use std::sync::Arc;
use tokio::sync::mpsc::UnboundedSender; use tokio::sync::mpsc::UnboundedSender;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::{ use types::{
BlindedPayload, ExecPayload, ExecutionBlockHash, ExecutionPayload, FullPayload, BlindedPayload, ExecPayload, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256,
SignedBeaconBlock, SignedBeaconBlock,
}; };
use warp::Rejection; use warp::Rejection;
/// Handles a request from the HTTP API for full blocks. /// Handles a request from the HTTP API for full blocks.
pub async fn publish_block<T: BeaconChainTypes>( pub async fn publish_block<T: BeaconChainTypes>(
block_root: Option<Hash256>,
block: Arc<SignedBeaconBlock<T::EthSpec>>, block: Arc<SignedBeaconBlock<T::EthSpec>>,
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>, network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
@ -31,8 +32,10 @@ pub async fn publish_block<T: BeaconChainTypes>(
let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock);
metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay);
let block_root = block_root.unwrap_or_else(|| block.canonical_root());
match chain match chain
.process_block(block.clone(), CountUnrealized::True) .process_block(block_root, block.clone(), CountUnrealized::True)
.await .await
{ {
Ok(root) => { Ok(root) => {
@ -127,8 +130,16 @@ pub async fn publish_blinded_block<T: BeaconChainTypes>(
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>, network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger, log: Logger,
) -> Result<(), Rejection> { ) -> Result<(), Rejection> {
let full_block = reconstruct_block(chain.clone(), block, log.clone()).await?; let block_root = block.canonical_root();
publish_block::<T>(Arc::new(full_block), chain, network_tx, log).await let full_block = reconstruct_block(chain.clone(), block_root, block, log.clone()).await?;
publish_block::<T>(
Some(block_root),
Arc::new(full_block),
chain,
network_tx,
log,
)
.await
} }
/// Deconstruct the given blinded block, and construct a full block. This attempts to use the /// Deconstruct the given blinded block, and construct a full block. This attempts to use the
@ -136,6 +147,7 @@ pub async fn publish_blinded_block<T: BeaconChainTypes>(
/// the full payload. /// the full payload.
async fn reconstruct_block<T: BeaconChainTypes>( async fn reconstruct_block<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
block_root: Hash256,
block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>, block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
log: Logger, log: Logger,
) -> Result<SignedBeaconBlock<T::EthSpec, FullPayload<T::EthSpec>>, Rejection> { ) -> Result<SignedBeaconBlock<T::EthSpec, FullPayload<T::EthSpec>>, Rejection> {
@ -155,12 +167,15 @@ async fn reconstruct_block<T: BeaconChainTypes>(
cached_payload cached_payload
// Otherwise, this means we are attempting a blind block proposal. // Otherwise, this means we are attempting a blind block proposal.
} else { } else {
let full_payload = el.propose_blinded_beacon_block(&block).await.map_err(|e| { let full_payload = el
warp_utils::reject::custom_server_error(format!( .propose_blinded_beacon_block(block_root, &block)
"Blind block proposal failed: {:?}", .await
e .map_err(|e| {
)) warp_utils::reject::custom_server_error(format!(
})?; "Blind block proposal failed: {:?}",
e
))
})?;
info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash); info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash);
full_payload full_payload
}; };

View File

@ -67,7 +67,10 @@ pub async fn fork_choice_before_proposal() {
let state_a = harness.get_current_state(); let state_a = harness.get_current_state();
let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b).await; let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b).await;
let block_root_b = harness.process_block(slot_b, block_b).await.unwrap(); let block_root_b = harness
.process_block(slot_b, block_b.canonical_root(), block_b)
.await
.unwrap();
// Create attestations to B but keep them in reserve until after C has been processed. // Create attestations to B but keep them in reserve until after C has been processed.
let attestations_b = harness.make_attestations( let attestations_b = harness.make_attestations(
@ -80,7 +83,7 @@ pub async fn fork_choice_before_proposal() {
let (block_c, state_c) = harness.make_block(state_a, slot_c).await; let (block_c, state_c) = harness.make_block(state_a, slot_c).await;
let block_root_c = harness let block_root_c = harness
.process_block(slot_c, block_c.clone()) .process_block(slot_c, block_c.canonical_root(), block_c.clone())
.await .await
.unwrap(); .unwrap();

View File

@ -489,6 +489,7 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
/// Create a new `Work` event for some block, where the result from computation (if any) is /// Create a new `Work` event for some block, where the result from computation (if any) is
/// sent to the other side of `result_tx`. /// sent to the other side of `result_tx`.
pub fn rpc_beacon_block( pub fn rpc_beacon_block(
block_root: Hash256,
block: Arc<SignedBeaconBlock<T::EthSpec>>, block: Arc<SignedBeaconBlock<T::EthSpec>>,
seen_timestamp: Duration, seen_timestamp: Duration,
process_type: BlockProcessType, process_type: BlockProcessType,
@ -496,6 +497,7 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
Self { Self {
drop_during_sync: false, drop_during_sync: false,
work: Work::RpcBlock { work: Work::RpcBlock {
block_root,
block, block,
seen_timestamp, seen_timestamp,
process_type, process_type,
@ -577,6 +579,7 @@ impl<T: BeaconChainTypes> std::convert::From<ReadyWork<T>> for WorkEvent<T> {
}, },
}, },
ReadyWork::RpcBlock(QueuedRpcBlock { ReadyWork::RpcBlock(QueuedRpcBlock {
block_root,
block, block,
seen_timestamp, seen_timestamp,
process_type, process_type,
@ -584,6 +587,7 @@ impl<T: BeaconChainTypes> std::convert::From<ReadyWork<T>> for WorkEvent<T> {
}) => Self { }) => Self {
drop_during_sync: false, drop_during_sync: false,
work: Work::RpcBlock { work: Work::RpcBlock {
block_root,
block, block,
seen_timestamp, seen_timestamp,
process_type, process_type,
@ -705,6 +709,7 @@ pub enum Work<T: BeaconChainTypes> {
seen_timestamp: Duration, seen_timestamp: Duration,
}, },
RpcBlock { RpcBlock {
block_root: Hash256,
block: Arc<SignedBeaconBlock<T::EthSpec>>, block: Arc<SignedBeaconBlock<T::EthSpec>>,
seen_timestamp: Duration, seen_timestamp: Duration,
process_type: BlockProcessType, process_type: BlockProcessType,
@ -1532,11 +1537,13 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
* Verification for beacon blocks received during syncing via RPC. * Verification for beacon blocks received during syncing via RPC.
*/ */
Work::RpcBlock { Work::RpcBlock {
block_root,
block, block,
seen_timestamp, seen_timestamp,
process_type, process_type,
should_process, should_process,
} => task_spawner.spawn_async(worker.process_rpc_block( } => task_spawner.spawn_async(worker.process_rpc_block(
block_root,
block, block,
seen_timestamp, seen_timestamp,
process_type, process_type,

View File

@ -242,6 +242,7 @@ impl TestRig {
pub fn enqueue_rpc_block(&self) { pub fn enqueue_rpc_block(&self) {
let event = WorkEvent::rpc_beacon_block( let event = WorkEvent::rpc_beacon_block(
self.next_block.canonical_root(),
self.next_block.clone(), self.next_block.clone(),
std::time::Duration::default(), std::time::Duration::default(),
BlockProcessType::ParentLookup { BlockProcessType::ParentLookup {
@ -253,6 +254,7 @@ impl TestRig {
pub fn enqueue_single_lookup_rpc_block(&self) { pub fn enqueue_single_lookup_rpc_block(&self) {
let event = WorkEvent::rpc_beacon_block( let event = WorkEvent::rpc_beacon_block(
self.next_block.canonical_root(),
self.next_block.clone(), self.next_block.clone(),
std::time::Duration::default(), std::time::Duration::default(),
BlockProcessType::SingleBlock { id: 1 }, BlockProcessType::SingleBlock { id: 1 },

View File

@ -109,6 +109,7 @@ pub struct QueuedGossipBlock<T: BeaconChainTypes> {
/// A block that arrived for processing when the same block was being imported over gossip. /// A block that arrived for processing when the same block was being imported over gossip.
/// It is queued for later import. /// It is queued for later import.
pub struct QueuedRpcBlock<T: EthSpec> { pub struct QueuedRpcBlock<T: EthSpec> {
pub block_root: Hash256,
pub block: Arc<SignedBeaconBlock<T>>, pub block: Arc<SignedBeaconBlock<T>>,
pub process_type: BlockProcessType, pub process_type: BlockProcessType,
pub seen_timestamp: Duration, pub seen_timestamp: Duration,

View File

@ -713,16 +713,28 @@ impl<T: BeaconChainTypes> Worker<T> {
block_delay, block_delay,
); );
let verification_result = self
.chain
.clone()
.verify_block_for_gossip(block.clone())
.await;
let block_root = if let Ok(verified_block) = &verification_result {
verified_block.block_root
} else {
block.canonical_root()
};
// Write the time the block was observed into delay cache. // Write the time the block was observed into delay cache.
self.chain.block_times_cache.write().set_time_observed( self.chain.block_times_cache.write().set_time_observed(
block.canonical_root(), block_root,
block.slot(), block.slot(),
seen_duration, seen_duration,
Some(peer_id.to_string()), Some(peer_id.to_string()),
Some(peer_client.to_string()), Some(peer_client.to_string()),
); );
let verified_block = match self.chain.clone().verify_block_for_gossip(block).await { let verified_block = match verification_result {
Ok(verified_block) => { Ok(verified_block) => {
if block_delay >= self.chain.slot_clock.unagg_attestation_production_delay() { if block_delay >= self.chain.slot_clock.unagg_attestation_production_delay() {
metrics::inc_counter(&metrics::BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL); metrics::inc_counter(&metrics::BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL);
@ -762,9 +774,9 @@ impl<T: BeaconChainTypes> Worker<T> {
debug!( debug!(
self.log, self.log,
"Unknown parent for gossip block"; "Unknown parent for gossip block";
"root" => ?block.canonical_root() "root" => ?block_root
); );
self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block)); self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block, block_root));
return None; return None;
} }
Err(e @ BlockError::BeaconChainError(_)) => { Err(e @ BlockError::BeaconChainError(_)) => {
@ -918,10 +930,11 @@ impl<T: BeaconChainTypes> Worker<T> {
_seen_duration: Duration, _seen_duration: Duration,
) { ) {
let block: Arc<_> = verified_block.block.clone(); let block: Arc<_> = verified_block.block.clone();
let block_root = verified_block.block_root;
match self match self
.chain .chain
.process_block(verified_block, CountUnrealized::True) .process_block(block_root, verified_block, CountUnrealized::True)
.await .await
{ {
Ok(block_root) => { Ok(block_root) => {
@ -956,7 +969,7 @@ impl<T: BeaconChainTypes> Worker<T> {
"Block with unknown parent attempted to be processed"; "Block with unknown parent attempted to be processed";
"peer_id" => %peer_id "peer_id" => %peer_id
); );
self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block)); self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block, block_root));
} }
Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => {
debug!( debug!(
@ -970,7 +983,7 @@ impl<T: BeaconChainTypes> Worker<T> {
self.log, self.log,
"Invalid gossip beacon block"; "Invalid gossip beacon block";
"outcome" => ?other, "outcome" => ?other,
"block root" => ?block.canonical_root(), "block root" => ?block_root,
"block slot" => block.slot() "block slot" => block.slot()
); );
self.gossip_penalize_peer( self.gossip_penalize_peer(

View File

@ -38,8 +38,10 @@ struct ChainSegmentFailed {
impl<T: BeaconChainTypes> Worker<T> { impl<T: BeaconChainTypes> Worker<T> {
/// Attempt to process a block received from a direct RPC request. /// Attempt to process a block received from a direct RPC request.
#[allow(clippy::too_many_arguments)]
pub async fn process_rpc_block( pub async fn process_rpc_block(
self, self,
block_root: Hash256,
block: Arc<SignedBeaconBlock<T::EthSpec>>, block: Arc<SignedBeaconBlock<T::EthSpec>>,
seen_timestamp: Duration, seen_timestamp: Duration,
process_type: BlockProcessType, process_type: BlockProcessType,
@ -56,17 +58,18 @@ impl<T: BeaconChainTypes> Worker<T> {
return; return;
} }
// Check if the block is already being imported through another source // Check if the block is already being imported through another source
let handle = match duplicate_cache.check_and_insert(block.canonical_root()) { let handle = match duplicate_cache.check_and_insert(block_root) {
Some(handle) => handle, Some(handle) => handle,
None => { None => {
debug!( debug!(
self.log, self.log,
"Gossip block is being processed"; "Gossip block is being processed";
"action" => "sending rpc block to reprocessing queue", "action" => "sending rpc block to reprocessing queue",
"block_root" => %block.canonical_root(), "block_root" => %block_root,
); );
// Send message to work reprocess queue to retry the block // Send message to work reprocess queue to retry the block
let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock {
block_root,
block: block.clone(), block: block.clone(),
process_type, process_type,
seen_timestamp, seen_timestamp,
@ -74,13 +77,16 @@ impl<T: BeaconChainTypes> Worker<T> {
}); });
if reprocess_tx.try_send(reprocess_msg).is_err() { if reprocess_tx.try_send(reprocess_msg).is_err() {
error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block.canonical_root()) error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block_root)
}; };
return; return;
} }
}; };
let slot = block.slot(); let slot = block.slot();
let result = self.chain.process_block(block, CountUnrealized::True).await; let result = self
.chain
.process_block(block_root, block, CountUnrealized::True)
.await;
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);

View File

@ -30,6 +30,8 @@ mod single_block_lookup;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
pub type RootBlockTuple<T> = (Hash256, Arc<SignedBeaconBlock<T>>);
const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60;
const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3;
@ -101,11 +103,11 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
/// called in order to find the block's parent. /// called in order to find the block's parent.
pub fn search_parent( pub fn search_parent(
&mut self, &mut self,
block_root: Hash256,
block: Arc<SignedBeaconBlock<T::EthSpec>>, block: Arc<SignedBeaconBlock<T::EthSpec>>,
peer_id: PeerId, peer_id: PeerId,
cx: &mut SyncNetworkContext<T>, cx: &mut SyncNetworkContext<T>,
) { ) {
let block_root = block.canonical_root();
let parent_root = block.parent_root(); let parent_root = block.parent_root();
// If this block or it's parent is part of a known failed chain, ignore it. // If this block or it's parent is part of a known failed chain, ignore it.
if self.failed_chains.contains(&parent_root) || self.failed_chains.contains(&block_root) { if self.failed_chains.contains(&parent_root) || self.failed_chains.contains(&block_root) {
@ -125,7 +127,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
return; return;
} }
let parent_lookup = ParentLookup::new(block, peer_id); let parent_lookup = ParentLookup::new(block_root, block, peer_id);
self.request_parent(parent_lookup, cx); self.request_parent(parent_lookup, cx);
} }
@ -153,10 +155,11 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
}; };
match request.get_mut().verify_block(block) { match request.get_mut().verify_block(block) {
Ok(Some(block)) => { Ok(Some((block_root, block))) => {
// This is the correct block, send it for processing // This is the correct block, send it for processing
if self if self
.send_block_for_processing( .send_block_for_processing(
block_root,
block, block,
seen_timestamp, seen_timestamp,
BlockProcessType::SingleBlock { id }, BlockProcessType::SingleBlock { id },
@ -217,11 +220,12 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
}; };
match parent_lookup.verify_block(block, &mut self.failed_chains) { match parent_lookup.verify_block(block, &mut self.failed_chains) {
Ok(Some(block)) => { Ok(Some((block_root, block))) => {
// Block is correct, send to the beacon processor. // Block is correct, send to the beacon processor.
let chain_hash = parent_lookup.chain_hash(); let chain_hash = parent_lookup.chain_hash();
if self if self
.send_block_for_processing( .send_block_for_processing(
block_root,
block, block,
seen_timestamp, seen_timestamp,
BlockProcessType::ParentLookup { chain_hash }, BlockProcessType::ParentLookup { chain_hash },
@ -420,7 +424,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e); error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e);
} }
BlockError::ParentUnknown(block) => { BlockError::ParentUnknown(block) => {
self.search_parent(block, peer_id, cx); self.search_parent(root, block, peer_id, cx);
} }
ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => {
// These errors indicate that the execution layer is offline // These errors indicate that the execution layer is offline
@ -625,6 +629,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
fn send_block_for_processing( fn send_block_for_processing(
&mut self, &mut self,
block_root: Hash256,
block: Arc<SignedBeaconBlock<T::EthSpec>>, block: Arc<SignedBeaconBlock<T::EthSpec>>,
duration: Duration, duration: Duration,
process_type: BlockProcessType, process_type: BlockProcessType,
@ -632,8 +637,8 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
) -> Result<(), ()> { ) -> Result<(), ()> {
match cx.processor_channel_if_enabled() { match cx.processor_channel_if_enabled() {
Some(beacon_processor_send) => { Some(beacon_processor_send) => {
trace!(self.log, "Sending block for processing"; "block" => %block.canonical_root(), "process" => ?process_type); trace!(self.log, "Sending block for processing"; "block" => ?block_root, "process" => ?process_type);
let event = WorkEvent::rpc_beacon_block(block, duration, process_type); let event = WorkEvent::rpc_beacon_block(block_root, block, duration, process_type);
if let Err(e) = beacon_processor_send.try_send(event) { if let Err(e) = beacon_processor_send.try_send(event) {
error!( error!(
self.log, self.log,
@ -646,7 +651,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
} }
} }
None => { None => {
trace!(self.log, "Dropping block ready for processing. Beacon processor not available"; "block" => %block.canonical_root()); trace!(self.log, "Dropping block ready for processing. Beacon processor not available"; "block" => %block_root);
Err(()) Err(())
} }
} }

View File

@ -1,3 +1,4 @@
use super::RootBlockTuple;
use beacon_chain::BeaconChainTypes; use beacon_chain::BeaconChainTypes;
use lighthouse_network::PeerId; use lighthouse_network::PeerId;
use std::sync::Arc; use std::sync::Arc;
@ -58,11 +59,15 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
.any(|d_block| d_block.as_ref() == block) .any(|d_block| d_block.as_ref() == block)
} }
pub fn new(block: Arc<SignedBeaconBlock<T::EthSpec>>, peer_id: PeerId) -> Self { pub fn new(
block_root: Hash256,
block: Arc<SignedBeaconBlock<T::EthSpec>>,
peer_id: PeerId,
) -> Self {
let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id); let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id);
Self { Self {
chain_hash: block.canonical_root(), chain_hash: block_root,
downloaded_blocks: vec![block], downloaded_blocks: vec![block],
current_parent_request, current_parent_request,
current_parent_request_id: None, current_parent_request_id: None,
@ -130,12 +135,15 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
&mut self, &mut self,
block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>, block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
failed_chains: &mut lru_cache::LRUTimeCache<Hash256>, failed_chains: &mut lru_cache::LRUTimeCache<Hash256>,
) -> Result<Option<Arc<SignedBeaconBlock<T::EthSpec>>>, VerifyError> { ) -> Result<Option<RootBlockTuple<T::EthSpec>>, VerifyError> {
let block = self.current_parent_request.verify_block(block)?; let root_and_block = self.current_parent_request.verify_block(block)?;
// check if the parent of this block isn't in the failed cache. If it is, this chain should // check if the parent of this block isn't in the failed cache. If it is, this chain should
// be dropped and the peer downscored. // be dropped and the peer downscored.
if let Some(parent_root) = block.as_ref().map(|block| block.parent_root()) { if let Some(parent_root) = root_and_block
.as_ref()
.map(|(_, block)| block.parent_root())
{
if failed_chains.contains(&parent_root) { if failed_chains.contains(&parent_root) {
self.current_parent_request.register_failure_downloading(); self.current_parent_request.register_failure_downloading();
self.current_parent_request_id = None; self.current_parent_request_id = None;
@ -143,7 +151,7 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
} }
} }
Ok(block) Ok(root_and_block)
} }
pub fn get_processing_peer(&self, chain_hash: Hash256) -> Option<PeerId> { pub fn get_processing_peer(&self, chain_hash: Hash256) -> Option<PeerId> {

View File

@ -1,6 +1,8 @@
use std::collections::HashSet; use std::collections::HashSet;
use std::sync::Arc; use std::sync::Arc;
use super::RootBlockTuple;
use beacon_chain::get_block_root;
use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; use lighthouse_network::{rpc::BlocksByRootRequest, PeerId};
use rand::seq::IteratorRandom; use rand::seq::IteratorRandom;
use ssz_types::VariableList; use ssz_types::VariableList;
@ -104,7 +106,7 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
pub fn verify_block<T: EthSpec>( pub fn verify_block<T: EthSpec>(
&mut self, &mut self,
block: Option<Arc<SignedBeaconBlock<T>>>, block: Option<Arc<SignedBeaconBlock<T>>>,
) -> Result<Option<Arc<SignedBeaconBlock<T>>>, VerifyError> { ) -> Result<Option<RootBlockTuple<T>>, VerifyError> {
match self.state { match self.state {
State::AwaitingDownload => { State::AwaitingDownload => {
self.register_failure_downloading(); self.register_failure_downloading();
@ -112,7 +114,10 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
} }
State::Downloading { peer_id } => match block { State::Downloading { peer_id } => match block {
Some(block) => { Some(block) => {
if block.canonical_root() != self.hash { // Compute the block root using this specific function so that we can get timing
// metrics.
let block_root = get_block_root(&block);
if block_root != self.hash {
// return an error and drop the block // return an error and drop the block
// NOTE: we take this is as a download failure to prevent counting the // NOTE: we take this is as a download failure to prevent counting the
// attempt as a chain failure, but simply a peer failure. // attempt as a chain failure, but simply a peer failure.
@ -121,7 +126,7 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
} else { } else {
// Return the block for processing. // Return the block for processing.
self.state = State::Processing { peer_id }; self.state = State::Processing { peer_id };
Ok(Some(block)) Ok(Some((block_root, block)))
} }
} }
None => { None => {

View File

@ -272,7 +272,7 @@ fn test_parent_lookup_happy_path() {
let peer_id = PeerId::random(); let peer_id = PeerId::random();
// Trigger the request // Trigger the request
bl.search_parent(Arc::new(block), peer_id, &mut cx); bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
let id = rig.expect_parent_request(); let id = rig.expect_parent_request();
// Peer sends the right block, it should be sent for processing. Peer should not be penalized. // Peer sends the right block, it should be sent for processing. Peer should not be penalized.
@ -300,7 +300,7 @@ fn test_parent_lookup_wrong_response() {
let peer_id = PeerId::random(); let peer_id = PeerId::random();
// Trigger the request // Trigger the request
bl.search_parent(Arc::new(block), peer_id, &mut cx); bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
let id1 = rig.expect_parent_request(); let id1 = rig.expect_parent_request();
// Peer sends the wrong block, peer should be penalized and the block re-requested. // Peer sends the wrong block, peer should be penalized and the block re-requested.
@ -337,7 +337,7 @@ fn test_parent_lookup_empty_response() {
let peer_id = PeerId::random(); let peer_id = PeerId::random();
// Trigger the request // Trigger the request
bl.search_parent(Arc::new(block), peer_id, &mut cx); bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
let id1 = rig.expect_parent_request(); let id1 = rig.expect_parent_request();
// Peer sends an empty response, peer should be penalized and the block re-requested. // Peer sends an empty response, peer should be penalized and the block re-requested.
@ -369,7 +369,7 @@ fn test_parent_lookup_rpc_failure() {
let peer_id = PeerId::random(); let peer_id = PeerId::random();
// Trigger the request // Trigger the request
bl.search_parent(Arc::new(block), peer_id, &mut cx); bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
let id1 = rig.expect_parent_request(); let id1 = rig.expect_parent_request();
// The request fails. It should be tried again. // The request fails. It should be tried again.
@ -396,10 +396,11 @@ fn test_parent_lookup_too_many_attempts() {
let parent = rig.rand_block(); let parent = rig.rand_block();
let block = rig.block_with_parent(parent.canonical_root()); let block = rig.block_with_parent(parent.canonical_root());
let chain_hash = block.canonical_root();
let peer_id = PeerId::random(); let peer_id = PeerId::random();
// Trigger the request // Trigger the request
bl.search_parent(Arc::new(block), peer_id, &mut cx); bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE {
let id = rig.expect_parent_request(); let id = rig.expect_parent_request();
match i % 2 { match i % 2 {
@ -435,7 +436,7 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() {
let peer_id = PeerId::random(); let peer_id = PeerId::random();
// Trigger the request // Trigger the request
bl.search_parent(Arc::new(block), peer_id, &mut cx); bl.search_parent(block_hash, Arc::new(block), peer_id, &mut cx);
for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE {
assert!(!bl.failed_chains.contains(&block_hash)); assert!(!bl.failed_chains.contains(&block_hash));
let id = rig.expect_parent_request(); let id = rig.expect_parent_request();
@ -469,7 +470,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() {
let peer_id = PeerId::random(); let peer_id = PeerId::random();
// Trigger the request // Trigger the request
bl.search_parent(Arc::new(block), peer_id, &mut cx); bl.search_parent(block_hash, Arc::new(block), peer_id, &mut cx);
// Fail downloading the block // Fail downloading the block
for _ in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { for _ in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) {
@ -510,7 +511,7 @@ fn test_parent_lookup_too_deep() {
let peer_id = PeerId::random(); let peer_id = PeerId::random();
let trigger_block = blocks.pop().unwrap(); let trigger_block = blocks.pop().unwrap();
let chain_hash = trigger_block.canonical_root(); let chain_hash = trigger_block.canonical_root();
bl.search_parent(Arc::new(trigger_block), peer_id, &mut cx); bl.search_parent(chain_hash, Arc::new(trigger_block), peer_id, &mut cx);
for block in blocks.into_iter().rev() { for block in blocks.into_iter().rev() {
let id = rig.expect_parent_request(); let id = rig.expect_parent_request();
@ -537,7 +538,12 @@ fn test_parent_lookup_disconnection() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let peer_id = PeerId::random(); let peer_id = PeerId::random();
let trigger_block = rig.rand_block(); let trigger_block = rig.rand_block();
bl.search_parent(Arc::new(trigger_block), peer_id, &mut cx); bl.search_parent(
trigger_block.canonical_root(),
Arc::new(trigger_block),
peer_id,
&mut cx,
);
bl.peer_disconnected(&peer_id, &mut cx); bl.peer_disconnected(&peer_id, &mut cx);
assert!(bl.parent_queue.is_empty()); assert!(bl.parent_queue.is_empty());
} }
@ -581,7 +587,7 @@ fn test_parent_lookup_ignored_response() {
let peer_id = PeerId::random(); let peer_id = PeerId::random();
// Trigger the request // Trigger the request
bl.search_parent(Arc::new(block), peer_id, &mut cx); bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
let id = rig.expect_parent_request(); let id = rig.expect_parent_request();
// Peer sends the right block, it should be sent for processing. Peer should not be penalized. // Peer sends the right block, it should be sent for processing. Peer should not be penalized.

View File

@ -94,7 +94,7 @@ pub enum SyncMessage<T: EthSpec> {
}, },
/// A block with an unknown parent has been received. /// A block with an unknown parent has been received.
UnknownBlock(PeerId, Arc<SignedBeaconBlock<T>>), UnknownBlock(PeerId, Arc<SignedBeaconBlock<T>>, Hash256),
/// A peer has sent an object that references a block that is unknown. This triggers the /// A peer has sent an object that references a block that is unknown. This triggers the
/// manager to attempt to find the block matching the unknown hash. /// manager to attempt to find the block matching the unknown hash.
@ -503,7 +503,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
} => { } => {
self.rpc_block_received(request_id, peer_id, beacon_block, seen_timestamp); self.rpc_block_received(request_id, peer_id, beacon_block, seen_timestamp);
} }
SyncMessage::UnknownBlock(peer_id, block) => { SyncMessage::UnknownBlock(peer_id, block, block_root) => {
// If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore // If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore
if !self.network_globals.sync_state.read().is_synced() { if !self.network_globals.sync_state.read().is_synced() {
let head_slot = self.chain.canonical_head.cached_head().head_slot(); let head_slot = self.chain.canonical_head.cached_head().head_slot();
@ -523,7 +523,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
&& self.network.is_execution_engine_online() && self.network.is_execution_engine_online()
{ {
self.block_lookups self.block_lookups
.search_parent(block, peer_id, &mut self.network); .search_parent(block_root, block, peer_id, &mut self.network);
} }
} }
SyncMessage::UnknownBlockHash(peer_id, block_hash) => { SyncMessage::UnknownBlockHash(peer_id, block_hash) => {

View File

@ -331,11 +331,11 @@ impl<E: EthSpec> Tester<E> {
pub fn process_block(&self, block: SignedBeaconBlock<E>, valid: bool) -> Result<(), Error> { pub fn process_block(&self, block: SignedBeaconBlock<E>, valid: bool) -> Result<(), Error> {
let block_root = block.canonical_root(); let block_root = block.canonical_root();
let block = Arc::new(block); let block = Arc::new(block);
let result = self.block_on_dangerous( let result = self.block_on_dangerous(self.harness.chain.process_block(
self.harness block_root,
.chain block.clone(),
.process_block(block.clone(), CountUnrealized::False), CountUnrealized::False,
)?; ))?;
if result.is_ok() != valid { if result.is_ok() != valid {
return Err(Error::DidntFail(format!( return Err(Error::DidntFail(format!(
"block with root {} was valid={} whilst test expects valid={}. result: {:?}", "block with root {} was valid={} whilst test expects valid={}. result: {:?}",