Deduplicate block root computation (#3590)
## Issue Addressed NA ## Proposed Changes This PR removes duplicated block root computation. Computing the `SignedBeaconBlock::canonical_root` has become more expensive since the merge as we need to compute the merke root of each transaction inside an `ExecutionPayload`. Computing the root for [a mainnet block](https://beaconcha.in/slot/4704236) is taking ~10ms on my i7-8700K CPU @ 3.70GHz (no sha extensions). Given that our median seen-to-imported time for blocks is presently 300-400ms, removing a few duplicated block roots (~30ms) could represent an easy 10% improvement. When we consider that the seen-to-imported times include operations *after* the block has been placed in the early attester cache, we could expect the 30ms to be more significant WRT our seen-to-attestable times. ## Additional Info NA
This commit is contained in:
parent
76ba0a1aaf
commit
fa6ad1a11a
@ -2220,7 +2220,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
}
|
||||
}
|
||||
|
||||
match check_block_relevancy(&block, Some(block_root), self) {
|
||||
match check_block_relevancy(&block, block_root, self) {
|
||||
// If the block is relevant, add it to the filtered chain segment.
|
||||
Ok(_) => filtered_chain_segment.push((block_root, block)),
|
||||
// If the block is already known, simply ignore this block.
|
||||
@ -2344,7 +2344,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
// Import the blocks into the chain.
|
||||
for signature_verified_block in signature_verified_blocks {
|
||||
match self
|
||||
.process_block(signature_verified_block, count_unrealized)
|
||||
.process_block(
|
||||
signature_verified_block.block_root(),
|
||||
signature_verified_block,
|
||||
count_unrealized,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_) => imported_blocks += 1,
|
||||
@ -2429,6 +2433,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// verification.
|
||||
pub async fn process_block<B: IntoExecutionPendingBlock<T>>(
|
||||
self: &Arc<Self>,
|
||||
block_root: Hash256,
|
||||
unverified_block: B,
|
||||
count_unrealized: CountUnrealized,
|
||||
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
||||
@ -2444,7 +2449,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
// A small closure to group the verification and import errors.
|
||||
let chain = self.clone();
|
||||
let import_block = async move {
|
||||
let execution_pending = unverified_block.into_execution_pending_block(&chain)?;
|
||||
let execution_pending =
|
||||
unverified_block.into_execution_pending_block(block_root, &chain)?;
|
||||
chain
|
||||
.import_execution_pending_block(execution_pending, count_unrealized)
|
||||
.await
|
||||
|
@ -529,7 +529,7 @@ pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
|
||||
}
|
||||
|
||||
let (first_root, first_block) = chain_segment.remove(0);
|
||||
let (mut parent, first_block) = load_parent(first_block, chain)?;
|
||||
let (mut parent, first_block) = load_parent(first_root, first_block, chain)?;
|
||||
let slot = first_block.slot();
|
||||
chain_segment.insert(0, (first_root, first_block));
|
||||
|
||||
@ -622,9 +622,10 @@ pub struct ExecutionPendingBlock<T: BeaconChainTypes> {
|
||||
pub trait IntoExecutionPendingBlock<T: BeaconChainTypes>: Sized {
|
||||
fn into_execution_pending_block(
|
||||
self,
|
||||
block_root: Hash256,
|
||||
chain: &Arc<BeaconChain<T>>,
|
||||
) -> Result<ExecutionPendingBlock<T>, BlockError<T::EthSpec>> {
|
||||
self.into_execution_pending_block_slashable(chain)
|
||||
self.into_execution_pending_block_slashable(block_root, chain)
|
||||
.map(|execution_pending| {
|
||||
// Supply valid block to slasher.
|
||||
if let Some(slasher) = chain.slasher.as_ref() {
|
||||
@ -638,6 +639,7 @@ pub trait IntoExecutionPendingBlock<T: BeaconChainTypes>: Sized {
|
||||
/// Convert the block to fully-verified form while producing data to aid checking slashability.
|
||||
fn into_execution_pending_block_slashable(
|
||||
self,
|
||||
block_root: Hash256,
|
||||
chain: &Arc<BeaconChain<T>>,
|
||||
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>>;
|
||||
|
||||
@ -781,7 +783,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
||||
} else {
|
||||
// The proposer index was *not* cached and we must load the parent in order to determine
|
||||
// the proposer index.
|
||||
let (mut parent, block) = load_parent(block, chain)?;
|
||||
let (mut parent, block) = load_parent(block_root, block, chain)?;
|
||||
|
||||
debug!(
|
||||
chain.log,
|
||||
@ -877,11 +879,12 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for GossipVerifiedBlock<T
|
||||
/// Completes verification of the wrapped `block`.
|
||||
fn into_execution_pending_block_slashable(
|
||||
self,
|
||||
block_root: Hash256,
|
||||
chain: &Arc<BeaconChain<T>>,
|
||||
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
||||
let execution_pending =
|
||||
SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?;
|
||||
execution_pending.into_execution_pending_block_slashable(chain)
|
||||
execution_pending.into_execution_pending_block_slashable(block_root, chain)
|
||||
}
|
||||
|
||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||
@ -907,7 +910,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
||||
// Check the anchor slot before loading the parent, to avoid spurious lookups.
|
||||
check_block_against_anchor_slot(block.message(), chain)?;
|
||||
|
||||
let (mut parent, block) = load_parent(block, chain)?;
|
||||
let (mut parent, block) = load_parent(block_root, block, chain)?;
|
||||
|
||||
// Reject any block that exceeds our limit on skipped slots.
|
||||
check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?;
|
||||
@ -955,7 +958,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
||||
let (mut parent, block) = if let Some(parent) = from.parent {
|
||||
(parent, from.block)
|
||||
} else {
|
||||
load_parent(from.block, chain)?
|
||||
load_parent(from.block_root, from.block, chain)?
|
||||
};
|
||||
|
||||
let state = cheap_state_advance_to_obtain_committees(
|
||||
@ -991,29 +994,29 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
||||
Self::from_gossip_verified_block(from, chain)
|
||||
.map_err(|e| BlockSlashInfo::from_early_error(header, e))
|
||||
}
|
||||
|
||||
pub fn block_root(&self) -> Hash256 {
|
||||
self.block_root
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBlock<T> {
|
||||
/// Completes verification of the wrapped `block`.
|
||||
fn into_execution_pending_block_slashable(
|
||||
self,
|
||||
block_root: Hash256,
|
||||
chain: &Arc<BeaconChain<T>>,
|
||||
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
||||
let header = self.block.signed_block_header();
|
||||
let (parent, block) = if let Some(parent) = self.parent {
|
||||
(parent, self.block)
|
||||
} else {
|
||||
load_parent(self.block, chain)
|
||||
load_parent(self.block_root, self.block, chain)
|
||||
.map_err(|e| BlockSlashInfo::SignatureValid(header.clone(), e))?
|
||||
};
|
||||
|
||||
ExecutionPendingBlock::from_signature_verified_components(
|
||||
block,
|
||||
self.block_root,
|
||||
parent,
|
||||
chain,
|
||||
)
|
||||
.map_err(|e| BlockSlashInfo::SignatureValid(header, e))
|
||||
ExecutionPendingBlock::from_signature_verified_components(block, block_root, parent, chain)
|
||||
.map_err(|e| BlockSlashInfo::SignatureValid(header, e))
|
||||
}
|
||||
|
||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||
@ -1026,14 +1029,15 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock
|
||||
/// and then using that implementation of `IntoExecutionPendingBlock` to complete verification.
|
||||
fn into_execution_pending_block_slashable(
|
||||
self,
|
||||
block_root: Hash256,
|
||||
chain: &Arc<BeaconChain<T>>,
|
||||
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
||||
// Perform an early check to prevent wasting time on irrelevant blocks.
|
||||
let block_root = check_block_relevancy(&self, None, chain)
|
||||
let block_root = check_block_relevancy(&self, block_root, chain)
|
||||
.map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?;
|
||||
|
||||
SignatureVerifiedBlock::check_slashable(self, block_root, chain)?
|
||||
.into_execution_pending_block_slashable(chain)
|
||||
.into_execution_pending_block_slashable(block_root, chain)
|
||||
}
|
||||
|
||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||
@ -1088,7 +1092,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
* Perform cursory checks to see if the block is even worth processing.
|
||||
*/
|
||||
|
||||
check_block_relevancy(&block, Some(block_root), chain)?;
|
||||
check_block_relevancy(&block, block_root, chain)?;
|
||||
|
||||
/*
|
||||
* Advance the given `parent.beacon_state` to the slot of the given `block`.
|
||||
@ -1502,7 +1506,7 @@ pub fn check_block_is_finalized_descendant<T: BeaconChainTypes>(
|
||||
/// experienced whilst attempting to verify.
|
||||
pub fn check_block_relevancy<T: BeaconChainTypes>(
|
||||
signed_block: &SignedBeaconBlock<T::EthSpec>,
|
||||
block_root: Option<Hash256>,
|
||||
block_root: Hash256,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
||||
let block = signed_block.message();
|
||||
@ -1526,8 +1530,6 @@ pub fn check_block_relevancy<T: BeaconChainTypes>(
|
||||
return Err(BlockError::BlockSlotLimitReached);
|
||||
}
|
||||
|
||||
let block_root = block_root.unwrap_or_else(|| get_block_root(signed_block));
|
||||
|
||||
// Do not process a block from a finalized slot.
|
||||
check_block_against_finalized_slot(block, block_root, chain)?;
|
||||
|
||||
@ -1581,6 +1583,7 @@ fn verify_parent_block_is_known<T: BeaconChainTypes>(
|
||||
/// whilst attempting the operation.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn load_parent<T: BeaconChainTypes>(
|
||||
block_root: Hash256,
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<
|
||||
@ -1614,7 +1617,7 @@ fn load_parent<T: BeaconChainTypes>(
|
||||
.block_times_cache
|
||||
.read()
|
||||
.get_block_delays(
|
||||
block.canonical_root(),
|
||||
block_root,
|
||||
chain
|
||||
.slot_clock
|
||||
.start_of(block.slot())
|
||||
|
@ -55,7 +55,9 @@ pub use self::errors::{BeaconChainError, BlockProductionError};
|
||||
pub use self::historical_blocks::HistoricalBlockError;
|
||||
pub use attestation_verification::Error as AttestationError;
|
||||
pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError};
|
||||
pub use block_verification::{BlockError, ExecutionPayloadError, GossipVerifiedBlock};
|
||||
pub use block_verification::{
|
||||
get_block_root, BlockError, ExecutionPayloadError, GossipVerifiedBlock,
|
||||
};
|
||||
pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock};
|
||||
pub use eth1_chain::{Eth1Chain, Eth1ChainBackend};
|
||||
pub use events::ServerSentEventHandler;
|
||||
|
@ -1453,12 +1453,13 @@ where
|
||||
pub async fn process_block(
|
||||
&self,
|
||||
slot: Slot,
|
||||
block_root: Hash256,
|
||||
block: SignedBeaconBlock<E>,
|
||||
) -> Result<SignedBeaconBlockHash, BlockError<E>> {
|
||||
self.set_current_slot(slot);
|
||||
let block_hash: SignedBeaconBlockHash = self
|
||||
.chain
|
||||
.process_block(Arc::new(block), CountUnrealized::True)
|
||||
.process_block(block_root, Arc::new(block), CountUnrealized::True)
|
||||
.await?
|
||||
.into();
|
||||
self.chain.recompute_head_at_current_slot().await;
|
||||
@ -1471,7 +1472,11 @@ where
|
||||
) -> Result<SignedBeaconBlockHash, BlockError<E>> {
|
||||
let block_hash: SignedBeaconBlockHash = self
|
||||
.chain
|
||||
.process_block(Arc::new(block), CountUnrealized::True)
|
||||
.process_block(
|
||||
block.canonical_root(),
|
||||
Arc::new(block),
|
||||
CountUnrealized::True,
|
||||
)
|
||||
.await?
|
||||
.into();
|
||||
self.chain.recompute_head_at_current_slot().await;
|
||||
@ -1536,7 +1541,9 @@ where
|
||||
) -> Result<(SignedBeaconBlockHash, SignedBeaconBlock<E>, BeaconState<E>), BlockError<E>> {
|
||||
self.set_current_slot(slot);
|
||||
let (block, new_state) = self.make_block(state, slot).await;
|
||||
let block_hash = self.process_block(slot, block.clone()).await?;
|
||||
let block_hash = self
|
||||
.process_block(slot, block.canonical_root(), block.clone())
|
||||
.await?;
|
||||
Ok((block_hash, block, new_state))
|
||||
}
|
||||
|
||||
|
@ -346,6 +346,7 @@ async fn assert_invalid_signature(
|
||||
let process_res = harness
|
||||
.chain
|
||||
.process_block(
|
||||
snapshots[block_index].beacon_block.canonical_root(),
|
||||
snapshots[block_index].beacon_block.clone(),
|
||||
CountUnrealized::True,
|
||||
)
|
||||
@ -403,12 +404,14 @@ async fn invalid_signature_gossip_block() {
|
||||
.await
|
||||
.into_block_error()
|
||||
.expect("should import all blocks prior to the one being tested");
|
||||
let signed_block = SignedBeaconBlock::from_block(block, junk_signature());
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_block(
|
||||
Arc::new(SignedBeaconBlock::from_block(block, junk_signature())),
|
||||
signed_block.canonical_root(),
|
||||
Arc::new(signed_block),
|
||||
CountUnrealized::True
|
||||
)
|
||||
.await,
|
||||
@ -718,7 +721,11 @@ async fn block_gossip_verification() {
|
||||
|
||||
harness
|
||||
.chain
|
||||
.process_block(gossip_verified, CountUnrealized::True)
|
||||
.process_block(
|
||||
gossip_verified.block_root,
|
||||
gossip_verified,
|
||||
CountUnrealized::True,
|
||||
)
|
||||
.await
|
||||
.expect("should import valid gossip verified block");
|
||||
}
|
||||
@ -985,7 +992,11 @@ async fn verify_block_for_gossip_slashing_detection() {
|
||||
.unwrap();
|
||||
harness
|
||||
.chain
|
||||
.process_block(verified_block, CountUnrealized::True)
|
||||
.process_block(
|
||||
verified_block.block_root,
|
||||
verified_block,
|
||||
CountUnrealized::True,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
unwrap_err(
|
||||
@ -1020,7 +1031,11 @@ async fn verify_block_for_gossip_doppelganger_detection() {
|
||||
let attestations = verified_block.block.message().body().attestations().clone();
|
||||
harness
|
||||
.chain
|
||||
.process_block(verified_block, CountUnrealized::True)
|
||||
.process_block(
|
||||
verified_block.block_root,
|
||||
verified_block,
|
||||
CountUnrealized::True,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@ -1161,7 +1176,11 @@ async fn add_base_block_to_altair_chain() {
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_block(Arc::new(base_block.clone()), CountUnrealized::True)
|
||||
.process_block(
|
||||
base_block.canonical_root(),
|
||||
Arc::new(base_block.clone()),
|
||||
CountUnrealized::True
|
||||
)
|
||||
.await
|
||||
.err()
|
||||
.expect("should error when processing base block"),
|
||||
@ -1289,7 +1308,11 @@ async fn add_altair_block_to_base_chain() {
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_block(Arc::new(altair_block.clone()), CountUnrealized::True)
|
||||
.process_block(
|
||||
altair_block.canonical_root(),
|
||||
Arc::new(altair_block.clone()),
|
||||
CountUnrealized::True
|
||||
)
|
||||
.await
|
||||
.err()
|
||||
.expect("should error when processing altair block"),
|
||||
|
@ -281,7 +281,7 @@ impl InvalidPayloadRig {
|
||||
}
|
||||
let root = self
|
||||
.harness
|
||||
.process_block(slot, block.clone())
|
||||
.process_block(slot, block.canonical_root(), block.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@ -320,7 +320,11 @@ impl InvalidPayloadRig {
|
||||
set_new_payload(new_payload_response);
|
||||
set_forkchoice_updated(forkchoice_response);
|
||||
|
||||
match self.harness.process_block(slot, block).await {
|
||||
match self
|
||||
.harness
|
||||
.process_block(slot, block.canonical_root(), block)
|
||||
.await
|
||||
{
|
||||
Err(error) if evaluate_error(&error) => (),
|
||||
Err(other) => {
|
||||
panic!("evaluate_error returned false with {:?}", other)
|
||||
@ -685,7 +689,11 @@ async fn invalidates_all_descendants() {
|
||||
let fork_block_root = rig
|
||||
.harness
|
||||
.chain
|
||||
.process_block(Arc::new(fork_block), CountUnrealized::True)
|
||||
.process_block(
|
||||
fork_block.canonical_root(),
|
||||
Arc::new(fork_block),
|
||||
CountUnrealized::True,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
rig.recompute_head().await;
|
||||
@ -777,7 +785,11 @@ async fn switches_heads() {
|
||||
let fork_block_root = rig
|
||||
.harness
|
||||
.chain
|
||||
.process_block(Arc::new(fork_block), CountUnrealized::True)
|
||||
.process_block(
|
||||
fork_block.canonical_root(),
|
||||
Arc::new(fork_block),
|
||||
CountUnrealized::True,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
rig.recompute_head().await;
|
||||
@ -1023,7 +1035,7 @@ async fn invalid_parent() {
|
||||
|
||||
// Ensure the block built atop an invalid payload is invalid for import.
|
||||
assert!(matches!(
|
||||
rig.harness.chain.process_block(block.clone(), CountUnrealized::True).await,
|
||||
rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True).await,
|
||||
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
|
||||
if invalid_root == parent_root
|
||||
));
|
||||
@ -1305,7 +1317,7 @@ async fn build_optimistic_chain(
|
||||
for block in blocks {
|
||||
rig.harness
|
||||
.chain
|
||||
.process_block(block, CountUnrealized::True)
|
||||
.process_block(block.canonical_root(), block, CountUnrealized::True)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@ -1863,7 +1875,11 @@ async fn recover_from_invalid_head_by_importing_blocks() {
|
||||
// Import the fork block, it should become the head.
|
||||
rig.harness
|
||||
.chain
|
||||
.process_block(fork_block.clone(), CountUnrealized::True)
|
||||
.process_block(
|
||||
fork_block.canonical_root(),
|
||||
fork_block.clone(),
|
||||
CountUnrealized::True,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
rig.recompute_head().await;
|
||||
|
@ -2125,7 +2125,11 @@ async fn weak_subjectivity_sync() {
|
||||
|
||||
beacon_chain.slot_clock.set_slot(slot.as_u64());
|
||||
beacon_chain
|
||||
.process_block(Arc::new(full_block), CountUnrealized::True)
|
||||
.process_block(
|
||||
full_block.canonical_root(),
|
||||
Arc::new(full_block),
|
||||
CountUnrealized::True,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
beacon_chain.recompute_head_at_current_slot().await;
|
||||
@ -2382,8 +2386,14 @@ async fn revert_minority_fork_on_resume() {
|
||||
|
||||
let (block, new_state) = harness1.make_block(state, slot).await;
|
||||
|
||||
harness1.process_block(slot, block.clone()).await.unwrap();
|
||||
harness2.process_block(slot, block.clone()).await.unwrap();
|
||||
harness1
|
||||
.process_block(slot, block.canonical_root(), block.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
harness2
|
||||
.process_block(slot, block.canonical_root(), block.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state = new_state;
|
||||
block_root = block.canonical_root();
|
||||
@ -2416,12 +2426,18 @@ async fn revert_minority_fork_on_resume() {
|
||||
|
||||
// Minority chain block (no attesters).
|
||||
let (block1, new_state1) = harness1.make_block(state1, slot).await;
|
||||
harness1.process_block(slot, block1).await.unwrap();
|
||||
harness1
|
||||
.process_block(slot, block1.canonical_root(), block1)
|
||||
.await
|
||||
.unwrap();
|
||||
state1 = new_state1;
|
||||
|
||||
// Majority chain block (all attesters).
|
||||
let (block2, new_state2) = harness2.make_block(state2, slot).await;
|
||||
harness2.process_block(slot, block2.clone()).await.unwrap();
|
||||
harness2
|
||||
.process_block(slot, block2.canonical_root(), block2.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state2 = new_state2;
|
||||
block_root = block2.canonical_root();
|
||||
|
@ -685,6 +685,7 @@ async fn run_skip_slot_test(skip_slots: u64) {
|
||||
harness_b
|
||||
.chain
|
||||
.process_block(
|
||||
harness_a.chain.head_snapshot().beacon_block_root,
|
||||
harness_a.chain.head_snapshot().beacon_block.clone(),
|
||||
CountUnrealized::True
|
||||
)
|
||||
|
@ -1393,12 +1393,13 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
|
||||
pub async fn propose_blinded_beacon_block(
|
||||
&self,
|
||||
block_root: Hash256,
|
||||
block: &SignedBeaconBlock<T, BlindedPayload<T>>,
|
||||
) -> Result<ExecutionPayload<T>, Error> {
|
||||
debug!(
|
||||
self.log(),
|
||||
"Sending block to builder";
|
||||
"root" => ?block.canonical_root(),
|
||||
"root" => ?block_root,
|
||||
);
|
||||
if let Some(builder) = self.builder() {
|
||||
builder
|
||||
|
@ -1046,7 +1046,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
log: Logger| async move {
|
||||
publish_blocks::publish_block(block, chain, &network_tx, log)
|
||||
publish_blocks::publish_block(None, block, chain, &network_tx, log)
|
||||
.await
|
||||
.map(|()| warp::reply())
|
||||
},
|
||||
|
@ -9,13 +9,14 @@ use std::sync::Arc;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{
|
||||
BlindedPayload, ExecPayload, ExecutionBlockHash, ExecutionPayload, FullPayload,
|
||||
BlindedPayload, ExecPayload, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256,
|
||||
SignedBeaconBlock,
|
||||
};
|
||||
use warp::Rejection;
|
||||
|
||||
/// Handles a request from the HTTP API for full blocks.
|
||||
pub async fn publish_block<T: BeaconChainTypes>(
|
||||
block_root: Option<Hash256>,
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
@ -31,8 +32,10 @@ pub async fn publish_block<T: BeaconChainTypes>(
|
||||
let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock);
|
||||
metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay);
|
||||
|
||||
let block_root = block_root.unwrap_or_else(|| block.canonical_root());
|
||||
|
||||
match chain
|
||||
.process_block(block.clone(), CountUnrealized::True)
|
||||
.process_block(block_root, block.clone(), CountUnrealized::True)
|
||||
.await
|
||||
{
|
||||
Ok(root) => {
|
||||
@ -127,8 +130,16 @@ pub async fn publish_blinded_block<T: BeaconChainTypes>(
|
||||
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
log: Logger,
|
||||
) -> Result<(), Rejection> {
|
||||
let full_block = reconstruct_block(chain.clone(), block, log.clone()).await?;
|
||||
publish_block::<T>(Arc::new(full_block), chain, network_tx, log).await
|
||||
let block_root = block.canonical_root();
|
||||
let full_block = reconstruct_block(chain.clone(), block_root, block, log.clone()).await?;
|
||||
publish_block::<T>(
|
||||
Some(block_root),
|
||||
Arc::new(full_block),
|
||||
chain,
|
||||
network_tx,
|
||||
log,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Deconstruct the given blinded block, and construct a full block. This attempts to use the
|
||||
@ -136,6 +147,7 @@ pub async fn publish_blinded_block<T: BeaconChainTypes>(
|
||||
/// the full payload.
|
||||
async fn reconstruct_block<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
block_root: Hash256,
|
||||
block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
|
||||
log: Logger,
|
||||
) -> Result<SignedBeaconBlock<T::EthSpec, FullPayload<T::EthSpec>>, Rejection> {
|
||||
@ -155,12 +167,15 @@ async fn reconstruct_block<T: BeaconChainTypes>(
|
||||
cached_payload
|
||||
// Otherwise, this means we are attempting a blind block proposal.
|
||||
} else {
|
||||
let full_payload = el.propose_blinded_beacon_block(&block).await.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"Blind block proposal failed: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
let full_payload = el
|
||||
.propose_blinded_beacon_block(block_root, &block)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"Blind block proposal failed: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash);
|
||||
full_payload
|
||||
};
|
||||
|
@ -67,7 +67,10 @@ pub async fn fork_choice_before_proposal() {
|
||||
|
||||
let state_a = harness.get_current_state();
|
||||
let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b).await;
|
||||
let block_root_b = harness.process_block(slot_b, block_b).await.unwrap();
|
||||
let block_root_b = harness
|
||||
.process_block(slot_b, block_b.canonical_root(), block_b)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create attestations to B but keep them in reserve until after C has been processed.
|
||||
let attestations_b = harness.make_attestations(
|
||||
@ -80,7 +83,7 @@ pub async fn fork_choice_before_proposal() {
|
||||
|
||||
let (block_c, state_c) = harness.make_block(state_a, slot_c).await;
|
||||
let block_root_c = harness
|
||||
.process_block(slot_c, block_c.clone())
|
||||
.process_block(slot_c, block_c.canonical_root(), block_c.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
@ -489,6 +489,7 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
|
||||
/// Create a new `Work` event for some block, where the result from computation (if any) is
|
||||
/// sent to the other side of `result_tx`.
|
||||
pub fn rpc_beacon_block(
|
||||
block_root: Hash256,
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
seen_timestamp: Duration,
|
||||
process_type: BlockProcessType,
|
||||
@ -496,6 +497,7 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
|
||||
Self {
|
||||
drop_during_sync: false,
|
||||
work: Work::RpcBlock {
|
||||
block_root,
|
||||
block,
|
||||
seen_timestamp,
|
||||
process_type,
|
||||
@ -577,6 +579,7 @@ impl<T: BeaconChainTypes> std::convert::From<ReadyWork<T>> for WorkEvent<T> {
|
||||
},
|
||||
},
|
||||
ReadyWork::RpcBlock(QueuedRpcBlock {
|
||||
block_root,
|
||||
block,
|
||||
seen_timestamp,
|
||||
process_type,
|
||||
@ -584,6 +587,7 @@ impl<T: BeaconChainTypes> std::convert::From<ReadyWork<T>> for WorkEvent<T> {
|
||||
}) => Self {
|
||||
drop_during_sync: false,
|
||||
work: Work::RpcBlock {
|
||||
block_root,
|
||||
block,
|
||||
seen_timestamp,
|
||||
process_type,
|
||||
@ -705,6 +709,7 @@ pub enum Work<T: BeaconChainTypes> {
|
||||
seen_timestamp: Duration,
|
||||
},
|
||||
RpcBlock {
|
||||
block_root: Hash256,
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
seen_timestamp: Duration,
|
||||
process_type: BlockProcessType,
|
||||
@ -1532,11 +1537,13 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
||||
* Verification for beacon blocks received during syncing via RPC.
|
||||
*/
|
||||
Work::RpcBlock {
|
||||
block_root,
|
||||
block,
|
||||
seen_timestamp,
|
||||
process_type,
|
||||
should_process,
|
||||
} => task_spawner.spawn_async(worker.process_rpc_block(
|
||||
block_root,
|
||||
block,
|
||||
seen_timestamp,
|
||||
process_type,
|
||||
|
@ -242,6 +242,7 @@ impl TestRig {
|
||||
|
||||
pub fn enqueue_rpc_block(&self) {
|
||||
let event = WorkEvent::rpc_beacon_block(
|
||||
self.next_block.canonical_root(),
|
||||
self.next_block.clone(),
|
||||
std::time::Duration::default(),
|
||||
BlockProcessType::ParentLookup {
|
||||
@ -253,6 +254,7 @@ impl TestRig {
|
||||
|
||||
pub fn enqueue_single_lookup_rpc_block(&self) {
|
||||
let event = WorkEvent::rpc_beacon_block(
|
||||
self.next_block.canonical_root(),
|
||||
self.next_block.clone(),
|
||||
std::time::Duration::default(),
|
||||
BlockProcessType::SingleBlock { id: 1 },
|
||||
|
@ -109,6 +109,7 @@ pub struct QueuedGossipBlock<T: BeaconChainTypes> {
|
||||
/// A block that arrived for processing when the same block was being imported over gossip.
|
||||
/// It is queued for later import.
|
||||
pub struct QueuedRpcBlock<T: EthSpec> {
|
||||
pub block_root: Hash256,
|
||||
pub block: Arc<SignedBeaconBlock<T>>,
|
||||
pub process_type: BlockProcessType,
|
||||
pub seen_timestamp: Duration,
|
||||
|
@ -713,16 +713,28 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
block_delay,
|
||||
);
|
||||
|
||||
let verification_result = self
|
||||
.chain
|
||||
.clone()
|
||||
.verify_block_for_gossip(block.clone())
|
||||
.await;
|
||||
|
||||
let block_root = if let Ok(verified_block) = &verification_result {
|
||||
verified_block.block_root
|
||||
} else {
|
||||
block.canonical_root()
|
||||
};
|
||||
|
||||
// Write the time the block was observed into delay cache.
|
||||
self.chain.block_times_cache.write().set_time_observed(
|
||||
block.canonical_root(),
|
||||
block_root,
|
||||
block.slot(),
|
||||
seen_duration,
|
||||
Some(peer_id.to_string()),
|
||||
Some(peer_client.to_string()),
|
||||
);
|
||||
|
||||
let verified_block = match self.chain.clone().verify_block_for_gossip(block).await {
|
||||
let verified_block = match verification_result {
|
||||
Ok(verified_block) => {
|
||||
if block_delay >= self.chain.slot_clock.unagg_attestation_production_delay() {
|
||||
metrics::inc_counter(&metrics::BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL);
|
||||
@ -762,9 +774,9 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
debug!(
|
||||
self.log,
|
||||
"Unknown parent for gossip block";
|
||||
"root" => ?block.canonical_root()
|
||||
"root" => ?block_root
|
||||
);
|
||||
self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block));
|
||||
self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block, block_root));
|
||||
return None;
|
||||
}
|
||||
Err(e @ BlockError::BeaconChainError(_)) => {
|
||||
@ -918,10 +930,11 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
_seen_duration: Duration,
|
||||
) {
|
||||
let block: Arc<_> = verified_block.block.clone();
|
||||
let block_root = verified_block.block_root;
|
||||
|
||||
match self
|
||||
.chain
|
||||
.process_block(verified_block, CountUnrealized::True)
|
||||
.process_block(block_root, verified_block, CountUnrealized::True)
|
||||
.await
|
||||
{
|
||||
Ok(block_root) => {
|
||||
@ -956,7 +969,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
"Block with unknown parent attempted to be processed";
|
||||
"peer_id" => %peer_id
|
||||
);
|
||||
self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block));
|
||||
self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block, block_root));
|
||||
}
|
||||
Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => {
|
||||
debug!(
|
||||
@ -970,7 +983,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
self.log,
|
||||
"Invalid gossip beacon block";
|
||||
"outcome" => ?other,
|
||||
"block root" => ?block.canonical_root(),
|
||||
"block root" => ?block_root,
|
||||
"block slot" => block.slot()
|
||||
);
|
||||
self.gossip_penalize_peer(
|
||||
|
@ -38,8 +38,10 @@ struct ChainSegmentFailed {
|
||||
|
||||
impl<T: BeaconChainTypes> Worker<T> {
|
||||
/// Attempt to process a block received from a direct RPC request.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn process_rpc_block(
|
||||
self,
|
||||
block_root: Hash256,
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
seen_timestamp: Duration,
|
||||
process_type: BlockProcessType,
|
||||
@ -56,17 +58,18 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
return;
|
||||
}
|
||||
// Check if the block is already being imported through another source
|
||||
let handle = match duplicate_cache.check_and_insert(block.canonical_root()) {
|
||||
let handle = match duplicate_cache.check_and_insert(block_root) {
|
||||
Some(handle) => handle,
|
||||
None => {
|
||||
debug!(
|
||||
self.log,
|
||||
"Gossip block is being processed";
|
||||
"action" => "sending rpc block to reprocessing queue",
|
||||
"block_root" => %block.canonical_root(),
|
||||
"block_root" => %block_root,
|
||||
);
|
||||
// Send message to work reprocess queue to retry the block
|
||||
let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock {
|
||||
block_root,
|
||||
block: block.clone(),
|
||||
process_type,
|
||||
seen_timestamp,
|
||||
@ -74,13 +77,16 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
});
|
||||
|
||||
if reprocess_tx.try_send(reprocess_msg).is_err() {
|
||||
error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block.canonical_root())
|
||||
error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block_root)
|
||||
};
|
||||
return;
|
||||
}
|
||||
};
|
||||
let slot = block.slot();
|
||||
let result = self.chain.process_block(block, CountUnrealized::True).await;
|
||||
let result = self
|
||||
.chain
|
||||
.process_block(block_root, block, CountUnrealized::True)
|
||||
.await;
|
||||
|
||||
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);
|
||||
|
||||
|
@ -30,6 +30,8 @@ mod single_block_lookup;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub type RootBlockTuple<T> = (Hash256, Arc<SignedBeaconBlock<T>>);
|
||||
|
||||
const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60;
|
||||
const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3;
|
||||
|
||||
@ -101,11 +103,11 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
||||
/// called in order to find the block's parent.
|
||||
pub fn search_parent(
|
||||
&mut self,
|
||||
block_root: Hash256,
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
peer_id: PeerId,
|
||||
cx: &mut SyncNetworkContext<T>,
|
||||
) {
|
||||
let block_root = block.canonical_root();
|
||||
let parent_root = block.parent_root();
|
||||
// If this block or it's parent is part of a known failed chain, ignore it.
|
||||
if self.failed_chains.contains(&parent_root) || self.failed_chains.contains(&block_root) {
|
||||
@ -125,7 +127,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
||||
return;
|
||||
}
|
||||
|
||||
let parent_lookup = ParentLookup::new(block, peer_id);
|
||||
let parent_lookup = ParentLookup::new(block_root, block, peer_id);
|
||||
self.request_parent(parent_lookup, cx);
|
||||
}
|
||||
|
||||
@ -153,10 +155,11 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
||||
};
|
||||
|
||||
match request.get_mut().verify_block(block) {
|
||||
Ok(Some(block)) => {
|
||||
Ok(Some((block_root, block))) => {
|
||||
// This is the correct block, send it for processing
|
||||
if self
|
||||
.send_block_for_processing(
|
||||
block_root,
|
||||
block,
|
||||
seen_timestamp,
|
||||
BlockProcessType::SingleBlock { id },
|
||||
@ -217,11 +220,12 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
||||
};
|
||||
|
||||
match parent_lookup.verify_block(block, &mut self.failed_chains) {
|
||||
Ok(Some(block)) => {
|
||||
Ok(Some((block_root, block))) => {
|
||||
// Block is correct, send to the beacon processor.
|
||||
let chain_hash = parent_lookup.chain_hash();
|
||||
if self
|
||||
.send_block_for_processing(
|
||||
block_root,
|
||||
block,
|
||||
seen_timestamp,
|
||||
BlockProcessType::ParentLookup { chain_hash },
|
||||
@ -420,7 +424,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
||||
error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e);
|
||||
}
|
||||
BlockError::ParentUnknown(block) => {
|
||||
self.search_parent(block, peer_id, cx);
|
||||
self.search_parent(root, block, peer_id, cx);
|
||||
}
|
||||
ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => {
|
||||
// These errors indicate that the execution layer is offline
|
||||
@ -625,6 +629,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
||||
|
||||
fn send_block_for_processing(
|
||||
&mut self,
|
||||
block_root: Hash256,
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
duration: Duration,
|
||||
process_type: BlockProcessType,
|
||||
@ -632,8 +637,8 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
||||
) -> Result<(), ()> {
|
||||
match cx.processor_channel_if_enabled() {
|
||||
Some(beacon_processor_send) => {
|
||||
trace!(self.log, "Sending block for processing"; "block" => %block.canonical_root(), "process" => ?process_type);
|
||||
let event = WorkEvent::rpc_beacon_block(block, duration, process_type);
|
||||
trace!(self.log, "Sending block for processing"; "block" => ?block_root, "process" => ?process_type);
|
||||
let event = WorkEvent::rpc_beacon_block(block_root, block, duration, process_type);
|
||||
if let Err(e) = beacon_processor_send.try_send(event) {
|
||||
error!(
|
||||
self.log,
|
||||
@ -646,7 +651,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
||||
}
|
||||
}
|
||||
None => {
|
||||
trace!(self.log, "Dropping block ready for processing. Beacon processor not available"; "block" => %block.canonical_root());
|
||||
trace!(self.log, "Dropping block ready for processing. Beacon processor not available"; "block" => %block_root);
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
use super::RootBlockTuple;
|
||||
use beacon_chain::BeaconChainTypes;
|
||||
use lighthouse_network::PeerId;
|
||||
use std::sync::Arc;
|
||||
@ -58,11 +59,15 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
|
||||
.any(|d_block| d_block.as_ref() == block)
|
||||
}
|
||||
|
||||
pub fn new(block: Arc<SignedBeaconBlock<T::EthSpec>>, peer_id: PeerId) -> Self {
|
||||
pub fn new(
|
||||
block_root: Hash256,
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
peer_id: PeerId,
|
||||
) -> Self {
|
||||
let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id);
|
||||
|
||||
Self {
|
||||
chain_hash: block.canonical_root(),
|
||||
chain_hash: block_root,
|
||||
downloaded_blocks: vec![block],
|
||||
current_parent_request,
|
||||
current_parent_request_id: None,
|
||||
@ -130,12 +135,15 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
|
||||
&mut self,
|
||||
block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
||||
failed_chains: &mut lru_cache::LRUTimeCache<Hash256>,
|
||||
) -> Result<Option<Arc<SignedBeaconBlock<T::EthSpec>>>, VerifyError> {
|
||||
let block = self.current_parent_request.verify_block(block)?;
|
||||
) -> Result<Option<RootBlockTuple<T::EthSpec>>, VerifyError> {
|
||||
let root_and_block = self.current_parent_request.verify_block(block)?;
|
||||
|
||||
// check if the parent of this block isn't in the failed cache. If it is, this chain should
|
||||
// be dropped and the peer downscored.
|
||||
if let Some(parent_root) = block.as_ref().map(|block| block.parent_root()) {
|
||||
if let Some(parent_root) = root_and_block
|
||||
.as_ref()
|
||||
.map(|(_, block)| block.parent_root())
|
||||
{
|
||||
if failed_chains.contains(&parent_root) {
|
||||
self.current_parent_request.register_failure_downloading();
|
||||
self.current_parent_request_id = None;
|
||||
@ -143,7 +151,7 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
|
||||
}
|
||||
}
|
||||
|
||||
Ok(block)
|
||||
Ok(root_and_block)
|
||||
}
|
||||
|
||||
pub fn get_processing_peer(&self, chain_hash: Hash256) -> Option<PeerId> {
|
||||
|
@ -1,6 +1,8 @@
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::RootBlockTuple;
|
||||
use beacon_chain::get_block_root;
|
||||
use lighthouse_network::{rpc::BlocksByRootRequest, PeerId};
|
||||
use rand::seq::IteratorRandom;
|
||||
use ssz_types::VariableList;
|
||||
@ -104,7 +106,7 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
|
||||
pub fn verify_block<T: EthSpec>(
|
||||
&mut self,
|
||||
block: Option<Arc<SignedBeaconBlock<T>>>,
|
||||
) -> Result<Option<Arc<SignedBeaconBlock<T>>>, VerifyError> {
|
||||
) -> Result<Option<RootBlockTuple<T>>, VerifyError> {
|
||||
match self.state {
|
||||
State::AwaitingDownload => {
|
||||
self.register_failure_downloading();
|
||||
@ -112,7 +114,10 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
|
||||
}
|
||||
State::Downloading { peer_id } => match block {
|
||||
Some(block) => {
|
||||
if block.canonical_root() != self.hash {
|
||||
// Compute the block root using this specific function so that we can get timing
|
||||
// metrics.
|
||||
let block_root = get_block_root(&block);
|
||||
if block_root != self.hash {
|
||||
// return an error and drop the block
|
||||
// NOTE: we take this is as a download failure to prevent counting the
|
||||
// attempt as a chain failure, but simply a peer failure.
|
||||
@ -121,7 +126,7 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
|
||||
} else {
|
||||
// Return the block for processing.
|
||||
self.state = State::Processing { peer_id };
|
||||
Ok(Some(block))
|
||||
Ok(Some((block_root, block)))
|
||||
}
|
||||
}
|
||||
None => {
|
||||
|
@ -272,7 +272,7 @@ fn test_parent_lookup_happy_path() {
|
||||
let peer_id = PeerId::random();
|
||||
|
||||
// Trigger the request
|
||||
bl.search_parent(Arc::new(block), peer_id, &mut cx);
|
||||
bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
|
||||
let id = rig.expect_parent_request();
|
||||
|
||||
// Peer sends the right block, it should be sent for processing. Peer should not be penalized.
|
||||
@ -300,7 +300,7 @@ fn test_parent_lookup_wrong_response() {
|
||||
let peer_id = PeerId::random();
|
||||
|
||||
// Trigger the request
|
||||
bl.search_parent(Arc::new(block), peer_id, &mut cx);
|
||||
bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
|
||||
let id1 = rig.expect_parent_request();
|
||||
|
||||
// Peer sends the wrong block, peer should be penalized and the block re-requested.
|
||||
@ -337,7 +337,7 @@ fn test_parent_lookup_empty_response() {
|
||||
let peer_id = PeerId::random();
|
||||
|
||||
// Trigger the request
|
||||
bl.search_parent(Arc::new(block), peer_id, &mut cx);
|
||||
bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
|
||||
let id1 = rig.expect_parent_request();
|
||||
|
||||
// Peer sends an empty response, peer should be penalized and the block re-requested.
|
||||
@ -369,7 +369,7 @@ fn test_parent_lookup_rpc_failure() {
|
||||
let peer_id = PeerId::random();
|
||||
|
||||
// Trigger the request
|
||||
bl.search_parent(Arc::new(block), peer_id, &mut cx);
|
||||
bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
|
||||
let id1 = rig.expect_parent_request();
|
||||
|
||||
// The request fails. It should be tried again.
|
||||
@ -396,10 +396,11 @@ fn test_parent_lookup_too_many_attempts() {
|
||||
|
||||
let parent = rig.rand_block();
|
||||
let block = rig.block_with_parent(parent.canonical_root());
|
||||
let chain_hash = block.canonical_root();
|
||||
let peer_id = PeerId::random();
|
||||
|
||||
// Trigger the request
|
||||
bl.search_parent(Arc::new(block), peer_id, &mut cx);
|
||||
bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
|
||||
for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE {
|
||||
let id = rig.expect_parent_request();
|
||||
match i % 2 {
|
||||
@ -435,7 +436,7 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() {
|
||||
let peer_id = PeerId::random();
|
||||
|
||||
// Trigger the request
|
||||
bl.search_parent(Arc::new(block), peer_id, &mut cx);
|
||||
bl.search_parent(block_hash, Arc::new(block), peer_id, &mut cx);
|
||||
for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE {
|
||||
assert!(!bl.failed_chains.contains(&block_hash));
|
||||
let id = rig.expect_parent_request();
|
||||
@ -469,7 +470,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() {
|
||||
let peer_id = PeerId::random();
|
||||
|
||||
// Trigger the request
|
||||
bl.search_parent(Arc::new(block), peer_id, &mut cx);
|
||||
bl.search_parent(block_hash, Arc::new(block), peer_id, &mut cx);
|
||||
|
||||
// Fail downloading the block
|
||||
for _ in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) {
|
||||
@ -510,7 +511,7 @@ fn test_parent_lookup_too_deep() {
|
||||
let peer_id = PeerId::random();
|
||||
let trigger_block = blocks.pop().unwrap();
|
||||
let chain_hash = trigger_block.canonical_root();
|
||||
bl.search_parent(Arc::new(trigger_block), peer_id, &mut cx);
|
||||
bl.search_parent(chain_hash, Arc::new(trigger_block), peer_id, &mut cx);
|
||||
|
||||
for block in blocks.into_iter().rev() {
|
||||
let id = rig.expect_parent_request();
|
||||
@ -537,7 +538,12 @@ fn test_parent_lookup_disconnection() {
|
||||
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
|
||||
let peer_id = PeerId::random();
|
||||
let trigger_block = rig.rand_block();
|
||||
bl.search_parent(Arc::new(trigger_block), peer_id, &mut cx);
|
||||
bl.search_parent(
|
||||
trigger_block.canonical_root(),
|
||||
Arc::new(trigger_block),
|
||||
peer_id,
|
||||
&mut cx,
|
||||
);
|
||||
bl.peer_disconnected(&peer_id, &mut cx);
|
||||
assert!(bl.parent_queue.is_empty());
|
||||
}
|
||||
@ -581,7 +587,7 @@ fn test_parent_lookup_ignored_response() {
|
||||
let peer_id = PeerId::random();
|
||||
|
||||
// Trigger the request
|
||||
bl.search_parent(Arc::new(block), peer_id, &mut cx);
|
||||
bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
|
||||
let id = rig.expect_parent_request();
|
||||
|
||||
// Peer sends the right block, it should be sent for processing. Peer should not be penalized.
|
||||
|
@ -94,7 +94,7 @@ pub enum SyncMessage<T: EthSpec> {
|
||||
},
|
||||
|
||||
/// A block with an unknown parent has been received.
|
||||
UnknownBlock(PeerId, Arc<SignedBeaconBlock<T>>),
|
||||
UnknownBlock(PeerId, Arc<SignedBeaconBlock<T>>, Hash256),
|
||||
|
||||
/// A peer has sent an object that references a block that is unknown. This triggers the
|
||||
/// manager to attempt to find the block matching the unknown hash.
|
||||
@ -503,7 +503,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
} => {
|
||||
self.rpc_block_received(request_id, peer_id, beacon_block, seen_timestamp);
|
||||
}
|
||||
SyncMessage::UnknownBlock(peer_id, block) => {
|
||||
SyncMessage::UnknownBlock(peer_id, block, block_root) => {
|
||||
// If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore
|
||||
if !self.network_globals.sync_state.read().is_synced() {
|
||||
let head_slot = self.chain.canonical_head.cached_head().head_slot();
|
||||
@ -523,7 +523,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
&& self.network.is_execution_engine_online()
|
||||
{
|
||||
self.block_lookups
|
||||
.search_parent(block, peer_id, &mut self.network);
|
||||
.search_parent(block_root, block, peer_id, &mut self.network);
|
||||
}
|
||||
}
|
||||
SyncMessage::UnknownBlockHash(peer_id, block_hash) => {
|
||||
|
@ -331,11 +331,11 @@ impl<E: EthSpec> Tester<E> {
|
||||
pub fn process_block(&self, block: SignedBeaconBlock<E>, valid: bool) -> Result<(), Error> {
|
||||
let block_root = block.canonical_root();
|
||||
let block = Arc::new(block);
|
||||
let result = self.block_on_dangerous(
|
||||
self.harness
|
||||
.chain
|
||||
.process_block(block.clone(), CountUnrealized::False),
|
||||
)?;
|
||||
let result = self.block_on_dangerous(self.harness.chain.process_block(
|
||||
block_root,
|
||||
block.clone(),
|
||||
CountUnrealized::False,
|
||||
))?;
|
||||
if result.is_ok() != valid {
|
||||
return Err(Error::DidntFail(format!(
|
||||
"block with root {} was valid={} whilst test expects valid={}. result: {:?}",
|
||||
|
Loading…
Reference in New Issue
Block a user