Remove CountUnrealized
(#4357)
## Issue Addressed Closes #4332 ## Proposed Changes Remove the `CountUnrealized` type, defaulting unrealized justification to _on_. This fixes the #4332 issue by ensuring that importing the same block to fork choice always results in the same outcome. Finalized sync speed may be slightly impacted by this change, but that is deemed an acceptable trade-off until the optimisation from #4118 is implemented. TODO: - [x] Also check that the block isn't a duplicate before importing
This commit is contained in:
parent
77fc511170
commit
affea585f4
@ -63,7 +63,6 @@ use execution_layer::{
|
|||||||
BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition,
|
BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition,
|
||||||
PayloadAttributes, PayloadStatus,
|
PayloadAttributes, PayloadStatus,
|
||||||
};
|
};
|
||||||
pub use fork_choice::CountUnrealized;
|
|
||||||
use fork_choice::{
|
use fork_choice::{
|
||||||
AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters,
|
AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters,
|
||||||
InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses,
|
InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses,
|
||||||
@ -2510,7 +2509,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
pub async fn process_chain_segment(
|
pub async fn process_chain_segment(
|
||||||
self: &Arc<Self>,
|
self: &Arc<Self>,
|
||||||
chain_segment: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
chain_segment: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
||||||
count_unrealized: CountUnrealized,
|
|
||||||
notify_execution_layer: NotifyExecutionLayer,
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> ChainSegmentResult<T::EthSpec> {
|
) -> ChainSegmentResult<T::EthSpec> {
|
||||||
let mut imported_blocks = 0;
|
let mut imported_blocks = 0;
|
||||||
@ -2579,7 +2577,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.process_block(
|
.process_block(
|
||||||
signature_verified_block.block_root(),
|
signature_verified_block.block_root(),
|
||||||
signature_verified_block,
|
signature_verified_block,
|
||||||
count_unrealized,
|
|
||||||
notify_execution_layer,
|
notify_execution_layer,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -2668,7 +2665,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
self: &Arc<Self>,
|
self: &Arc<Self>,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
unverified_block: B,
|
unverified_block: B,
|
||||||
count_unrealized: CountUnrealized,
|
|
||||||
notify_execution_layer: NotifyExecutionLayer,
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
||||||
// Start the Prometheus timer.
|
// Start the Prometheus timer.
|
||||||
@ -2689,7 +2685,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
notify_execution_layer,
|
notify_execution_layer,
|
||||||
)?;
|
)?;
|
||||||
chain
|
chain
|
||||||
.import_execution_pending_block(execution_pending, count_unrealized)
|
.import_execution_pending_block(execution_pending)
|
||||||
.await
|
.await
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -2744,10 +2740,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
///
|
///
|
||||||
/// An error is returned if the block was unable to be imported. It may be partially imported
|
/// An error is returned if the block was unable to be imported. It may be partially imported
|
||||||
/// (i.e., this function is not atomic).
|
/// (i.e., this function is not atomic).
|
||||||
async fn import_execution_pending_block(
|
pub async fn import_execution_pending_block(
|
||||||
self: Arc<Self>,
|
self: Arc<Self>,
|
||||||
execution_pending_block: ExecutionPendingBlock<T>,
|
execution_pending_block: ExecutionPendingBlock<T>,
|
||||||
count_unrealized: CountUnrealized,
|
|
||||||
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
||||||
let ExecutionPendingBlock {
|
let ExecutionPendingBlock {
|
||||||
block,
|
block,
|
||||||
@ -2808,7 +2803,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
state,
|
state,
|
||||||
confirmed_state_roots,
|
confirmed_state_roots,
|
||||||
payload_verification_status,
|
payload_verification_status,
|
||||||
count_unrealized,
|
|
||||||
parent_block,
|
parent_block,
|
||||||
parent_eth1_finalization_data,
|
parent_eth1_finalization_data,
|
||||||
consensus_context,
|
consensus_context,
|
||||||
@ -2834,7 +2828,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
mut state: BeaconState<T::EthSpec>,
|
mut state: BeaconState<T::EthSpec>,
|
||||||
confirmed_state_roots: Vec<Hash256>,
|
confirmed_state_roots: Vec<Hash256>,
|
||||||
payload_verification_status: PayloadVerificationStatus,
|
payload_verification_status: PayloadVerificationStatus,
|
||||||
count_unrealized: CountUnrealized,
|
|
||||||
parent_block: SignedBlindedBeaconBlock<T::EthSpec>,
|
parent_block: SignedBlindedBeaconBlock<T::EthSpec>,
|
||||||
parent_eth1_finalization_data: Eth1FinalizationData,
|
parent_eth1_finalization_data: Eth1FinalizationData,
|
||||||
mut consensus_context: ConsensusContext<T::EthSpec>,
|
mut consensus_context: ConsensusContext<T::EthSpec>,
|
||||||
@ -2903,7 +2896,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
&state,
|
&state,
|
||||||
payload_verification_status,
|
payload_verification_status,
|
||||||
&self.spec,
|
&self.spec,
|
||||||
count_unrealized,
|
|
||||||
)
|
)
|
||||||
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
|
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,7 @@ use crate::{
|
|||||||
};
|
};
|
||||||
use eth1::Config as Eth1Config;
|
use eth1::Config as Eth1Config;
|
||||||
use execution_layer::ExecutionLayer;
|
use execution_layer::ExecutionLayer;
|
||||||
use fork_choice::{CountUnrealized, ForkChoice, ResetPayloadStatuses};
|
use fork_choice::{ForkChoice, ResetPayloadStatuses};
|
||||||
use futures::channel::mpsc::Sender;
|
use futures::channel::mpsc::Sender;
|
||||||
use operation_pool::{OperationPool, PersistedOperationPool};
|
use operation_pool::{OperationPool, PersistedOperationPool};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
@ -687,7 +687,6 @@ where
|
|||||||
store.clone(),
|
store.clone(),
|
||||||
Some(current_slot),
|
Some(current_slot),
|
||||||
&self.spec,
|
&self.spec,
|
||||||
CountUnrealized::True,
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use crate::{BeaconForkChoiceStore, BeaconSnapshot};
|
use crate::{BeaconForkChoiceStore, BeaconSnapshot};
|
||||||
use fork_choice::{CountUnrealized, ForkChoice, PayloadVerificationStatus};
|
use fork_choice::{ForkChoice, PayloadVerificationStatus};
|
||||||
use itertools::process_results;
|
use itertools::process_results;
|
||||||
use slog::{info, warn, Logger};
|
use slog::{info, warn, Logger};
|
||||||
use state_processing::state_advance::complete_state_advance;
|
use state_processing::state_advance::complete_state_advance;
|
||||||
@ -100,7 +100,6 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
|
|||||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||||
current_slot: Option<Slot>,
|
current_slot: Option<Slot>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
count_unrealized_config: CountUnrealized,
|
|
||||||
) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> {
|
) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> {
|
||||||
// Fetch finalized block.
|
// Fetch finalized block.
|
||||||
let finalized_checkpoint = head_state.finalized_checkpoint();
|
let finalized_checkpoint = head_state.finalized_checkpoint();
|
||||||
@ -166,8 +165,7 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
|
|||||||
.map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?;
|
.map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?;
|
||||||
|
|
||||||
let mut state = finalized_snapshot.beacon_state;
|
let mut state = finalized_snapshot.beacon_state;
|
||||||
let blocks_len = blocks.len();
|
for block in blocks {
|
||||||
for (i, block) in blocks.into_iter().enumerate() {
|
|
||||||
complete_state_advance(&mut state, None, block.slot(), spec)
|
complete_state_advance(&mut state, None, block.slot(), spec)
|
||||||
.map_err(|e| format!("State advance failed: {:?}", e))?;
|
.map_err(|e| format!("State advance failed: {:?}", e))?;
|
||||||
|
|
||||||
@ -190,15 +188,6 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
|
|||||||
// This scenario is so rare that it seems OK to double-verify some blocks.
|
// This scenario is so rare that it seems OK to double-verify some blocks.
|
||||||
let payload_verification_status = PayloadVerificationStatus::Optimistic;
|
let payload_verification_status = PayloadVerificationStatus::Optimistic;
|
||||||
|
|
||||||
// Because we are replaying a single chain of blocks, we only need to calculate unrealized
|
|
||||||
// justification for the last block in the chain.
|
|
||||||
let is_last_block = i + 1 == blocks_len;
|
|
||||||
let count_unrealized = if is_last_block {
|
|
||||||
count_unrealized_config
|
|
||||||
} else {
|
|
||||||
CountUnrealized::False
|
|
||||||
};
|
|
||||||
|
|
||||||
fork_choice
|
fork_choice
|
||||||
.on_block(
|
.on_block(
|
||||||
block.slot(),
|
block.slot(),
|
||||||
@ -209,7 +198,6 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
|
|||||||
&state,
|
&state,
|
||||||
payload_verification_status,
|
payload_verification_status,
|
||||||
spec,
|
spec,
|
||||||
count_unrealized,
|
|
||||||
)
|
)
|
||||||
.map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?;
|
.map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?;
|
||||||
}
|
}
|
||||||
|
@ -52,8 +52,8 @@ pub mod validator_pubkey_cache;
|
|||||||
|
|
||||||
pub use self::beacon_chain::{
|
pub use self::beacon_chain::{
|
||||||
AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult,
|
AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult,
|
||||||
CountUnrealized, ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification,
|
ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig,
|
||||||
StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
|
WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
|
||||||
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
|
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
|
||||||
};
|
};
|
||||||
pub use self::beacon_snapshot::BeaconSnapshot;
|
pub use self::beacon_snapshot::BeaconSnapshot;
|
||||||
@ -64,6 +64,7 @@ pub use attestation_verification::Error as AttestationError;
|
|||||||
pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError};
|
pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError};
|
||||||
pub use block_verification::{
|
pub use block_verification::{
|
||||||
get_block_root, BlockError, ExecutionPayloadError, GossipVerifiedBlock,
|
get_block_root, BlockError, ExecutionPayloadError, GossipVerifiedBlock,
|
||||||
|
IntoExecutionPendingBlock,
|
||||||
};
|
};
|
||||||
pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock};
|
pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock};
|
||||||
pub use eth1_chain::{Eth1Chain, Eth1ChainBackend};
|
pub use eth1_chain::{Eth1Chain, Eth1ChainBackend};
|
||||||
|
@ -22,7 +22,6 @@ use execution_layer::{
|
|||||||
},
|
},
|
||||||
ExecutionLayer,
|
ExecutionLayer,
|
||||||
};
|
};
|
||||||
use fork_choice::CountUnrealized;
|
|
||||||
use futures::channel::mpsc::Receiver;
|
use futures::channel::mpsc::Receiver;
|
||||||
pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH};
|
pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH};
|
||||||
use int_to_bytes::int_to_bytes32;
|
use int_to_bytes::int_to_bytes32;
|
||||||
@ -1693,12 +1692,7 @@ where
|
|||||||
self.set_current_slot(slot);
|
self.set_current_slot(slot);
|
||||||
let block_hash: SignedBeaconBlockHash = self
|
let block_hash: SignedBeaconBlockHash = self
|
||||||
.chain
|
.chain
|
||||||
.process_block(
|
.process_block(block_root, Arc::new(block), NotifyExecutionLayer::Yes)
|
||||||
block_root,
|
|
||||||
Arc::new(block),
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await?
|
.await?
|
||||||
.into();
|
.into();
|
||||||
self.chain.recompute_head_at_current_slot().await;
|
self.chain.recompute_head_at_current_slot().await;
|
||||||
@ -1714,7 +1708,6 @@ where
|
|||||||
.process_block(
|
.process_block(
|
||||||
block.canonical_root(),
|
block.canonical_root(),
|
||||||
Arc::new(block),
|
Arc::new(block),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
|
@ -3,8 +3,9 @@
|
|||||||
use beacon_chain::test_utils::{
|
use beacon_chain::test_utils::{
|
||||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
||||||
};
|
};
|
||||||
use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult, NotifyExecutionLayer};
|
use beacon_chain::{
|
||||||
use fork_choice::CountUnrealized;
|
BeaconSnapshot, BlockError, ChainSegmentResult, IntoExecutionPendingBlock, NotifyExecutionLayer,
|
||||||
|
};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use logging::test_logger;
|
use logging::test_logger;
|
||||||
use slasher::{Config as SlasherConfig, Slasher};
|
use slasher::{Config as SlasherConfig, Slasher};
|
||||||
@ -148,18 +149,14 @@ async fn chain_segment_full_segment() {
|
|||||||
// Sneak in a little check to ensure we can process empty chain segments.
|
// Sneak in a little check to ensure we can process empty chain segments.
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(vec![], CountUnrealized::True, NotifyExecutionLayer::Yes)
|
.process_chain_segment(vec![], NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error()
|
.into_block_error()
|
||||||
.expect("should import empty chain segment");
|
.expect("should import empty chain segment");
|
||||||
|
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(
|
.process_chain_segment(blocks.clone(), NotifyExecutionLayer::Yes)
|
||||||
blocks.clone(),
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.into_block_error()
|
.into_block_error()
|
||||||
.expect("should import chain segment");
|
.expect("should import chain segment");
|
||||||
@ -188,11 +185,7 @@ async fn chain_segment_varying_chunk_size() {
|
|||||||
for chunk in blocks.chunks(*chunk_size) {
|
for chunk in blocks.chunks(*chunk_size) {
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(
|
.process_chain_segment(chunk.to_vec(), NotifyExecutionLayer::Yes)
|
||||||
chunk.to_vec(),
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.into_block_error()
|
.into_block_error()
|
||||||
.unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size));
|
.unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size));
|
||||||
@ -228,7 +221,7 @@ async fn chain_segment_non_linear_parent_roots() {
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::NonLinearParentRoots)
|
Err(BlockError::NonLinearParentRoots)
|
||||||
@ -248,7 +241,7 @@ async fn chain_segment_non_linear_parent_roots() {
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::NonLinearParentRoots)
|
Err(BlockError::NonLinearParentRoots)
|
||||||
@ -279,7 +272,7 @@ async fn chain_segment_non_linear_slots() {
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::NonLinearSlots)
|
Err(BlockError::NonLinearSlots)
|
||||||
@ -300,7 +293,7 @@ async fn chain_segment_non_linear_slots() {
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::NonLinearSlots)
|
Err(BlockError::NonLinearSlots)
|
||||||
@ -326,7 +319,7 @@ async fn assert_invalid_signature(
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::InvalidSignature)
|
Err(BlockError::InvalidSignature)
|
||||||
@ -348,11 +341,7 @@ async fn assert_invalid_signature(
|
|||||||
// imported prior to this test.
|
// imported prior to this test.
|
||||||
let _ = harness
|
let _ = harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(
|
.process_chain_segment(ancestor_blocks, NotifyExecutionLayer::Yes)
|
||||||
ancestor_blocks,
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await;
|
.await;
|
||||||
harness.chain.recompute_head_at_current_slot().await;
|
harness.chain.recompute_head_at_current_slot().await;
|
||||||
|
|
||||||
@ -361,7 +350,6 @@ async fn assert_invalid_signature(
|
|||||||
.process_block(
|
.process_block(
|
||||||
snapshots[block_index].beacon_block.canonical_root(),
|
snapshots[block_index].beacon_block.canonical_root(),
|
||||||
snapshots[block_index].beacon_block.clone(),
|
snapshots[block_index].beacon_block.clone(),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
@ -414,11 +402,7 @@ async fn invalid_signature_gossip_block() {
|
|||||||
.collect();
|
.collect();
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(
|
.process_chain_segment(ancestor_blocks, NotifyExecutionLayer::Yes)
|
||||||
ancestor_blocks,
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.into_block_error()
|
.into_block_error()
|
||||||
.expect("should import all blocks prior to the one being tested");
|
.expect("should import all blocks prior to the one being tested");
|
||||||
@ -430,7 +414,6 @@ async fn invalid_signature_gossip_block() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
signed_block.canonical_root(),
|
signed_block.canonical_root(),
|
||||||
Arc::new(signed_block),
|
Arc::new(signed_block),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await,
|
.await,
|
||||||
@ -465,7 +448,7 @@ async fn invalid_signature_block_proposal() {
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::InvalidSignature)
|
Err(BlockError::InvalidSignature)
|
||||||
@ -663,7 +646,7 @@ async fn invalid_signature_deposit() {
|
|||||||
!matches!(
|
!matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::InvalidSignature)
|
Err(BlockError::InvalidSignature)
|
||||||
@ -743,7 +726,6 @@ async fn block_gossip_verification() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
gossip_verified.block_root,
|
gossip_verified.block_root,
|
||||||
gossip_verified,
|
gossip_verified,
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -1015,7 +997,6 @@ async fn verify_block_for_gossip_slashing_detection() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
verified_block.block_root,
|
verified_block.block_root,
|
||||||
verified_block,
|
verified_block,
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -1055,7 +1036,6 @@ async fn verify_block_for_gossip_doppelganger_detection() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
verified_block.block_root,
|
verified_block.block_root,
|
||||||
verified_block,
|
verified_block,
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -1203,7 +1183,6 @@ async fn add_base_block_to_altair_chain() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
base_block.canonical_root(),
|
base_block.canonical_root(),
|
||||||
Arc::new(base_block.clone()),
|
Arc::new(base_block.clone()),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -1219,11 +1198,7 @@ async fn add_base_block_to_altair_chain() {
|
|||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(
|
.process_chain_segment(vec![Arc::new(base_block)], NotifyExecutionLayer::Yes,)
|
||||||
vec![Arc::new(base_block)],
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await,
|
.await,
|
||||||
ChainSegmentResult::Failed {
|
ChainSegmentResult::Failed {
|
||||||
imported_blocks: 0,
|
imported_blocks: 0,
|
||||||
@ -1342,7 +1317,6 @@ async fn add_altair_block_to_base_chain() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
altair_block.canonical_root(),
|
altair_block.canonical_root(),
|
||||||
Arc::new(altair_block.clone()),
|
Arc::new(altair_block.clone()),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -1358,11 +1332,7 @@ async fn add_altair_block_to_base_chain() {
|
|||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(
|
.process_chain_segment(vec![Arc::new(altair_block)], NotifyExecutionLayer::Yes)
|
||||||
vec![Arc::new(altair_block)],
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes
|
|
||||||
)
|
|
||||||
.await,
|
.await,
|
||||||
ChainSegmentResult::Failed {
|
ChainSegmentResult::Failed {
|
||||||
imported_blocks: 0,
|
imported_blocks: 0,
|
||||||
@ -1373,3 +1343,100 @@ async fn add_altair_block_to_base_chain() {
|
|||||||
}
|
}
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn import_duplicate_block_unrealized_justification() {
|
||||||
|
let spec = MainnetEthSpec::default_spec();
|
||||||
|
|
||||||
|
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||||
|
.spec(spec)
|
||||||
|
.keypairs(KEYPAIRS[..].to_vec())
|
||||||
|
.fresh_ephemeral_store()
|
||||||
|
.mock_execution_layer()
|
||||||
|
.build();
|
||||||
|
let chain = &harness.chain;
|
||||||
|
|
||||||
|
// Move out of the genesis slot.
|
||||||
|
harness.advance_slot();
|
||||||
|
|
||||||
|
// Build the chain out to the first justification opportunity 2/3rds of the way through epoch 2.
|
||||||
|
let num_slots = E::slots_per_epoch() as usize * 8 / 3;
|
||||||
|
harness
|
||||||
|
.extend_chain(
|
||||||
|
num_slots,
|
||||||
|
BlockStrategy::OnCanonicalHead,
|
||||||
|
AttestationStrategy::AllValidators,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Move into the next empty slot.
|
||||||
|
harness.advance_slot();
|
||||||
|
|
||||||
|
// The store's justified checkpoint must still be at epoch 0, while unrealized justification
|
||||||
|
// must be at epoch 1.
|
||||||
|
let fc = chain.canonical_head.fork_choice_read_lock();
|
||||||
|
assert_eq!(fc.justified_checkpoint().epoch, 0);
|
||||||
|
assert_eq!(fc.unrealized_justified_checkpoint().epoch, 1);
|
||||||
|
drop(fc);
|
||||||
|
|
||||||
|
// Produce a block to justify epoch 2.
|
||||||
|
let state = harness.get_current_state();
|
||||||
|
let slot = harness.get_current_slot();
|
||||||
|
let (block, _) = harness.make_block(state.clone(), slot).await;
|
||||||
|
let block = Arc::new(block);
|
||||||
|
let block_root = block.canonical_root();
|
||||||
|
|
||||||
|
// Create two verified variants of the block, representing the same block being processed in
|
||||||
|
// parallel.
|
||||||
|
let notify_execution_layer = NotifyExecutionLayer::Yes;
|
||||||
|
let verified_block1 = block
|
||||||
|
.clone()
|
||||||
|
.into_execution_pending_block(block_root, &chain, notify_execution_layer)
|
||||||
|
.unwrap();
|
||||||
|
let verified_block2 = block
|
||||||
|
.into_execution_pending_block(block_root, &chain, notify_execution_layer)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Import the first block, simulating a block processed via a finalized chain segment.
|
||||||
|
chain
|
||||||
|
.clone()
|
||||||
|
.import_execution_pending_block(verified_block1)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Unrealized justification should NOT have updated.
|
||||||
|
let fc = chain.canonical_head.fork_choice_read_lock();
|
||||||
|
assert_eq!(fc.justified_checkpoint().epoch, 0);
|
||||||
|
let unrealized_justification = fc.unrealized_justified_checkpoint();
|
||||||
|
assert_eq!(unrealized_justification.epoch, 2);
|
||||||
|
|
||||||
|
// The fork choice node for the block should have unrealized justification.
|
||||||
|
let fc_block = fc.get_block(&block_root).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
fc_block.unrealized_justified_checkpoint,
|
||||||
|
Some(unrealized_justification)
|
||||||
|
);
|
||||||
|
drop(fc);
|
||||||
|
|
||||||
|
// Import the second verified block, simulating a block processed via RPC.
|
||||||
|
chain
|
||||||
|
.clone()
|
||||||
|
.import_execution_pending_block(verified_block2)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Unrealized justification should still be updated.
|
||||||
|
let fc = chain.canonical_head.fork_choice_read_lock();
|
||||||
|
assert_eq!(fc.justified_checkpoint().epoch, 0);
|
||||||
|
assert_eq!(
|
||||||
|
fc.unrealized_justified_checkpoint(),
|
||||||
|
unrealized_justification
|
||||||
|
);
|
||||||
|
|
||||||
|
// The fork choice node for the block should still have the unrealized justified checkpoint.
|
||||||
|
let fc_block = fc.get_block(&block_root).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
fc_block.unrealized_justified_checkpoint,
|
||||||
|
Some(unrealized_justification)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
@ -17,9 +17,7 @@ use execution_layer::{
|
|||||||
test_utils::ExecutionBlockGenerator,
|
test_utils::ExecutionBlockGenerator,
|
||||||
ExecutionLayer, ForkchoiceState, PayloadAttributes,
|
ExecutionLayer, ForkchoiceState, PayloadAttributes,
|
||||||
};
|
};
|
||||||
use fork_choice::{
|
use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus};
|
||||||
CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus,
|
|
||||||
};
|
|
||||||
use logging::test_logger;
|
use logging::test_logger;
|
||||||
use proto_array::{Error as ProtoArrayError, ExecutionStatus};
|
use proto_array::{Error as ProtoArrayError, ExecutionStatus};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
@ -698,7 +696,6 @@ async fn invalidates_all_descendants() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
fork_block.canonical_root(),
|
fork_block.canonical_root(),
|
||||||
Arc::new(fork_block),
|
Arc::new(fork_block),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -795,7 +792,6 @@ async fn switches_heads() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
fork_block.canonical_root(),
|
fork_block.canonical_root(),
|
||||||
Arc::new(fork_block),
|
Arc::new(fork_block),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -1050,7 +1046,7 @@ async fn invalid_parent() {
|
|||||||
|
|
||||||
// Ensure the block built atop an invalid payload is invalid for import.
|
// Ensure the block built atop an invalid payload is invalid for import.
|
||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True, NotifyExecutionLayer::Yes).await,
|
rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes).await,
|
||||||
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
|
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
|
||||||
if invalid_root == parent_root
|
if invalid_root == parent_root
|
||||||
));
|
));
|
||||||
@ -1065,7 +1061,7 @@ async fn invalid_parent() {
|
|||||||
&state,
|
&state,
|
||||||
PayloadVerificationStatus::Optimistic,
|
PayloadVerificationStatus::Optimistic,
|
||||||
&rig.harness.chain.spec,
|
&rig.harness.chain.spec,
|
||||||
CountUnrealized::True,
|
|
||||||
),
|
),
|
||||||
Err(ForkChoiceError::ProtoArrayStringError(message))
|
Err(ForkChoiceError::ProtoArrayStringError(message))
|
||||||
if message.contains(&format!(
|
if message.contains(&format!(
|
||||||
@ -1336,12 +1332,7 @@ async fn build_optimistic_chain(
|
|||||||
for block in blocks {
|
for block in blocks {
|
||||||
rig.harness
|
rig.harness
|
||||||
.chain
|
.chain
|
||||||
.process_block(
|
.process_block(block.canonical_root(), block, NotifyExecutionLayer::Yes)
|
||||||
block.canonical_root(),
|
|
||||||
block,
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
@ -1900,7 +1891,6 @@ async fn recover_from_invalid_head_by_importing_blocks() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
fork_block.canonical_root(),
|
fork_block.canonical_root(),
|
||||||
fork_block.clone(),
|
fork_block.clone(),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
@ -12,7 +12,6 @@ use beacon_chain::{
|
|||||||
BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer,
|
BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer,
|
||||||
ServerSentEventHandler, WhenSlotSkipped,
|
ServerSentEventHandler, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
use fork_choice::CountUnrealized;
|
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use logging::test_logger;
|
use logging::test_logger;
|
||||||
use maplit::hashset;
|
use maplit::hashset;
|
||||||
@ -2151,7 +2150,6 @@ async fn weak_subjectivity_sync() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
full_block.canonical_root(),
|
full_block.canonical_root(),
|
||||||
Arc::new(full_block),
|
Arc::new(full_block),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
@ -8,7 +8,6 @@ use beacon_chain::{
|
|||||||
},
|
},
|
||||||
BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped,
|
BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
use fork_choice::CountUnrealized;
|
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use operation_pool::PersistedOperationPool;
|
use operation_pool::PersistedOperationPool;
|
||||||
use state_processing::{
|
use state_processing::{
|
||||||
@ -687,7 +686,6 @@ async fn run_skip_slot_test(skip_slots: u64) {
|
|||||||
.process_block(
|
.process_block(
|
||||||
harness_a.chain.head_snapshot().beacon_block_root,
|
harness_a.chain.head_snapshot().beacon_block_root,
|
||||||
harness_a.chain.head_snapshot().beacon_block.clone(),
|
harness_a.chain.head_snapshot().beacon_block.clone(),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
|
use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
|
||||||
use beacon_chain::{
|
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, NotifyExecutionLayer};
|
||||||
BeaconChain, BeaconChainTypes, BlockError, CountUnrealized, NotifyExecutionLayer,
|
|
||||||
};
|
|
||||||
use execution_layer::ProvenancedPayload;
|
use execution_layer::ProvenancedPayload;
|
||||||
use lighthouse_network::PubsubMessage;
|
use lighthouse_network::PubsubMessage;
|
||||||
use network::NetworkMessage;
|
use network::NetworkMessage;
|
||||||
@ -56,12 +54,7 @@ pub async fn publish_block<T: BeaconChainTypes>(
|
|||||||
let block_root = block_root.unwrap_or_else(|| block.canonical_root());
|
let block_root = block_root.unwrap_or_else(|| block.canonical_root());
|
||||||
|
|
||||||
match chain
|
match chain
|
||||||
.process_block(
|
.process_block(block_root, block.clone(), NotifyExecutionLayer::Yes)
|
||||||
block_root,
|
|
||||||
block.clone(),
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(root) => {
|
Ok(root) => {
|
||||||
|
@ -8,8 +8,8 @@ use beacon_chain::{
|
|||||||
observed_operations::ObservationOutcome,
|
observed_operations::ObservationOutcome,
|
||||||
sync_committee_verification::{self, Error as SyncCommitteeError},
|
sync_committee_verification::{self, Error as SyncCommitteeError},
|
||||||
validator_monitor::get_block_delay_ms,
|
validator_monitor::get_block_delay_ms,
|
||||||
BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError,
|
BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock,
|
||||||
GossipVerifiedBlock, NotifyExecutionLayer,
|
NotifyExecutionLayer,
|
||||||
};
|
};
|
||||||
use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource};
|
use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource};
|
||||||
use operation_pool::ReceivedPreCapella;
|
use operation_pool::ReceivedPreCapella;
|
||||||
@ -949,12 +949,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
|
|
||||||
let result = self
|
let result = self
|
||||||
.chain
|
.chain
|
||||||
.process_block(
|
.process_block(block_root, verified_block, NotifyExecutionLayer::Yes)
|
||||||
block_root,
|
|
||||||
verified_block,
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
match &result {
|
match &result {
|
||||||
|
@ -7,7 +7,6 @@ use crate::beacon_processor::DuplicateCache;
|
|||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
use crate::sync::manager::{BlockProcessType, SyncMessage};
|
use crate::sync::manager::{BlockProcessType, SyncMessage};
|
||||||
use crate::sync::{BatchProcessResult, ChainId};
|
use crate::sync::{BatchProcessResult, ChainId};
|
||||||
use beacon_chain::CountUnrealized;
|
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms,
|
observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms,
|
||||||
BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError,
|
BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError,
|
||||||
@ -25,7 +24,7 @@ use types::{Epoch, Hash256, SignedBeaconBlock};
|
|||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub enum ChainSegmentProcessId {
|
pub enum ChainSegmentProcessId {
|
||||||
/// Processing Id of a range syncing batch.
|
/// Processing Id of a range syncing batch.
|
||||||
RangeBatchId(ChainId, Epoch, CountUnrealized),
|
RangeBatchId(ChainId, Epoch),
|
||||||
/// Processing ID for a backfill syncing batch.
|
/// Processing ID for a backfill syncing batch.
|
||||||
BackSyncBatchId(Epoch),
|
BackSyncBatchId(Epoch),
|
||||||
/// Processing Id of the parent lookup of a block.
|
/// Processing Id of the parent lookup of a block.
|
||||||
@ -166,12 +165,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
let parent_root = block.message().parent_root();
|
let parent_root = block.message().parent_root();
|
||||||
let result = self
|
let result = self
|
||||||
.chain
|
.chain
|
||||||
.process_block(
|
.process_block(block_root, block, NotifyExecutionLayer::Yes)
|
||||||
block_root,
|
|
||||||
block,
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);
|
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);
|
||||||
@ -220,17 +214,13 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
) {
|
) {
|
||||||
let result = match sync_type {
|
let result = match sync_type {
|
||||||
// this a request from the range sync
|
// this a request from the range sync
|
||||||
ChainSegmentProcessId::RangeBatchId(chain_id, epoch, count_unrealized) => {
|
ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => {
|
||||||
let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64());
|
let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64());
|
||||||
let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64());
|
let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64());
|
||||||
let sent_blocks = downloaded_blocks.len();
|
let sent_blocks = downloaded_blocks.len();
|
||||||
|
|
||||||
match self
|
match self
|
||||||
.process_blocks(
|
.process_blocks(downloaded_blocks.iter(), notify_execution_layer)
|
||||||
downloaded_blocks.iter(),
|
|
||||||
count_unrealized,
|
|
||||||
notify_execution_layer,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
(_, Ok(_)) => {
|
(_, Ok(_)) => {
|
||||||
@ -309,11 +299,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
// parent blocks are ordered from highest slot to lowest, so we need to process in
|
// parent blocks are ordered from highest slot to lowest, so we need to process in
|
||||||
// reverse
|
// reverse
|
||||||
match self
|
match self
|
||||||
.process_blocks(
|
.process_blocks(downloaded_blocks.iter().rev(), notify_execution_layer)
|
||||||
downloaded_blocks.iter().rev(),
|
|
||||||
CountUnrealized::True,
|
|
||||||
notify_execution_layer,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
(imported_blocks, Err(e)) => {
|
(imported_blocks, Err(e)) => {
|
||||||
@ -343,13 +329,12 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
async fn process_blocks<'a>(
|
async fn process_blocks<'a>(
|
||||||
&self,
|
&self,
|
||||||
downloaded_blocks: impl Iterator<Item = &'a Arc<SignedBeaconBlock<T::EthSpec>>>,
|
downloaded_blocks: impl Iterator<Item = &'a Arc<SignedBeaconBlock<T::EthSpec>>>,
|
||||||
count_unrealized: CountUnrealized,
|
|
||||||
notify_execution_layer: NotifyExecutionLayer,
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> (usize, Result<(), ChainSegmentFailed>) {
|
) -> (usize, Result<(), ChainSegmentFailed>) {
|
||||||
let blocks: Vec<Arc<_>> = downloaded_blocks.cloned().collect();
|
let blocks: Vec<Arc<_>> = downloaded_blocks.cloned().collect();
|
||||||
match self
|
match self
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, count_unrealized, notify_execution_layer)
|
.process_chain_segment(blocks, notify_execution_layer)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
ChainSegmentResult::Successful { imported_blocks } => {
|
ChainSegmentResult::Successful { imported_blocks } => {
|
||||||
|
@ -556,7 +556,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
|||||||
.parent_block_processed(chain_hash, result, &mut self.network),
|
.parent_block_processed(chain_hash, result, &mut self.network),
|
||||||
},
|
},
|
||||||
SyncMessage::BatchProcessed { sync_type, result } => match sync_type {
|
SyncMessage::BatchProcessed { sync_type, result } => match sync_type {
|
||||||
ChainSegmentProcessId::RangeBatchId(chain_id, epoch, _) => {
|
ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => {
|
||||||
self.range_sync.handle_block_process_result(
|
self.range_sync.handle_block_process_result(
|
||||||
&mut self.network,
|
&mut self.network,
|
||||||
chain_id,
|
chain_id,
|
||||||
|
@ -3,7 +3,7 @@ use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEven
|
|||||||
use crate::sync::{
|
use crate::sync::{
|
||||||
manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult,
|
manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult,
|
||||||
};
|
};
|
||||||
use beacon_chain::{BeaconChainTypes, CountUnrealized};
|
use beacon_chain::BeaconChainTypes;
|
||||||
use fnv::FnvHashMap;
|
use fnv::FnvHashMap;
|
||||||
use lighthouse_network::{PeerAction, PeerId};
|
use lighthouse_network::{PeerAction, PeerId};
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
@ -101,8 +101,6 @@ pub struct SyncingChain<T: BeaconChainTypes> {
|
|||||||
/// Batches validated by this chain.
|
/// Batches validated by this chain.
|
||||||
validated_batches: u64,
|
validated_batches: u64,
|
||||||
|
|
||||||
is_finalized_segment: bool,
|
|
||||||
|
|
||||||
/// The chain's log.
|
/// The chain's log.
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
}
|
}
|
||||||
@ -128,7 +126,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
target_head_slot: Slot,
|
target_head_slot: Slot,
|
||||||
target_head_root: Hash256,
|
target_head_root: Hash256,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
is_finalized_segment: bool,
|
|
||||||
log: &slog::Logger,
|
log: &slog::Logger,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut peers = FnvHashMap::default();
|
let mut peers = FnvHashMap::default();
|
||||||
@ -150,7 +147,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
state: ChainSyncingState::Stopped,
|
state: ChainSyncingState::Stopped,
|
||||||
current_processing_batch: None,
|
current_processing_batch: None,
|
||||||
validated_batches: 0,
|
validated_batches: 0,
|
||||||
is_finalized_segment,
|
|
||||||
log: log.new(o!("chain" => id)),
|
log: log.new(o!("chain" => id)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -318,12 +314,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
// for removing chains and checking completion is in the callback.
|
// for removing chains and checking completion is in the callback.
|
||||||
|
|
||||||
let blocks = batch.start_processing()?;
|
let blocks = batch.start_processing()?;
|
||||||
let count_unrealized = if self.is_finalized_segment {
|
let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id);
|
||||||
CountUnrealized::False
|
|
||||||
} else {
|
|
||||||
CountUnrealized::True
|
|
||||||
};
|
|
||||||
let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id, count_unrealized);
|
|
||||||
self.current_processing_batch = Some(batch_id);
|
self.current_processing_batch = Some(batch_id);
|
||||||
|
|
||||||
if let Err(e) =
|
if let Err(e) =
|
||||||
|
@ -465,10 +465,10 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> {
|
|||||||
network: &mut SyncNetworkContext<T>,
|
network: &mut SyncNetworkContext<T>,
|
||||||
) {
|
) {
|
||||||
let id = SyncingChain::<T>::id(&target_head_root, &target_head_slot);
|
let id = SyncingChain::<T>::id(&target_head_root, &target_head_slot);
|
||||||
let (collection, is_finalized) = if let RangeSyncType::Finalized = sync_type {
|
let collection = if let RangeSyncType::Finalized = sync_type {
|
||||||
(&mut self.finalized_chains, true)
|
&mut self.finalized_chains
|
||||||
} else {
|
} else {
|
||||||
(&mut self.head_chains, false)
|
&mut self.head_chains
|
||||||
};
|
};
|
||||||
match collection.entry(id) {
|
match collection.entry(id) {
|
||||||
Entry::Occupied(mut entry) => {
|
Entry::Occupied(mut entry) => {
|
||||||
@ -493,7 +493,6 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> {
|
|||||||
target_head_slot,
|
target_head_slot,
|
||||||
target_head_root,
|
target_head_root,
|
||||||
peer,
|
peer,
|
||||||
is_finalized,
|
|
||||||
&self.log,
|
&self.log,
|
||||||
);
|
);
|
||||||
debug_assert_eq!(new_chain.get_id(), id);
|
debug_assert_eq!(new_chain.get_id(), id);
|
||||||
|
@ -174,21 +174,6 @@ impl<T> From<proto_array::Error> for Error<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Indicates whether the unrealized justification of a block should be calculated and tracked.
|
|
||||||
/// If a block has been finalized, this can be set to false. This is useful when syncing finalized
|
|
||||||
/// portions of the chain. Otherwise this should always be set to true.
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
|
||||||
pub enum CountUnrealized {
|
|
||||||
True,
|
|
||||||
False,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CountUnrealized {
|
|
||||||
pub fn is_true(&self) -> bool {
|
|
||||||
matches!(self, CountUnrealized::True)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Indicates if a block has been verified by an execution payload.
|
/// Indicates if a block has been verified by an execution payload.
|
||||||
///
|
///
|
||||||
/// There is no variant for "invalid", since such a block should never be added to fork choice.
|
/// There is no variant for "invalid", since such a block should never be added to fork choice.
|
||||||
@ -659,8 +644,14 @@ where
|
|||||||
state: &BeaconState<E>,
|
state: &BeaconState<E>,
|
||||||
payload_verification_status: PayloadVerificationStatus,
|
payload_verification_status: PayloadVerificationStatus,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
count_unrealized: CountUnrealized,
|
|
||||||
) -> Result<(), Error<T::Error>> {
|
) -> Result<(), Error<T::Error>> {
|
||||||
|
// If this block has already been processed we do not need to reprocess it.
|
||||||
|
// We check this immediately in case re-processing the block mutates some property of the
|
||||||
|
// global fork choice store, e.g. the justified checkpoints or the proposer boost root.
|
||||||
|
if self.proto_array.contains_block(&block_root) {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
// Provide the slot (as per the system clock) to the `fc_store` and then return its view of
|
// Provide the slot (as per the system clock) to the `fc_store` and then return its view of
|
||||||
// the current slot. The `fc_store` will ensure that the `current_slot` is never
|
// the current slot. The `fc_store` will ensure that the `current_slot` is never
|
||||||
// decreasing, a property which we must maintain.
|
// decreasing, a property which we must maintain.
|
||||||
@ -726,96 +717,84 @@ where
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Update unrealized justified/finalized checkpoints.
|
// Update unrealized justified/finalized checkpoints.
|
||||||
let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if count_unrealized
|
let block_epoch = block.slot().epoch(E::slots_per_epoch());
|
||||||
.is_true()
|
|
||||||
{
|
|
||||||
let block_epoch = block.slot().epoch(E::slots_per_epoch());
|
|
||||||
|
|
||||||
// If the parent checkpoints are already at the same epoch as the block being imported,
|
// If the parent checkpoints are already at the same epoch as the block being imported,
|
||||||
// it's impossible for the unrealized checkpoints to differ from the parent's. This
|
// it's impossible for the unrealized checkpoints to differ from the parent's. This
|
||||||
// holds true because:
|
// holds true because:
|
||||||
//
|
//
|
||||||
// 1. A child block cannot have lower FFG checkpoints than its parent.
|
// 1. A child block cannot have lower FFG checkpoints than its parent.
|
||||||
// 2. A block in epoch `N` cannot contain attestations which would justify an epoch higher than `N`.
|
// 2. A block in epoch `N` cannot contain attestations which would justify an epoch higher than `N`.
|
||||||
// 3. A block in epoch `N` cannot contain attestations which would finalize an epoch higher than `N - 1`.
|
// 3. A block in epoch `N` cannot contain attestations which would finalize an epoch higher than `N - 1`.
|
||||||
//
|
//
|
||||||
// This is an optimization. It should reduce the amount of times we run
|
// This is an optimization. It should reduce the amount of times we run
|
||||||
// `process_justification_and_finalization` by approximately 1/3rd when the chain is
|
// `process_justification_and_finalization` by approximately 1/3rd when the chain is
|
||||||
// performing optimally.
|
// performing optimally.
|
||||||
let parent_checkpoints = parent_block
|
let parent_checkpoints = parent_block
|
||||||
.unrealized_justified_checkpoint
|
.unrealized_justified_checkpoint
|
||||||
.zip(parent_block.unrealized_finalized_checkpoint)
|
.zip(parent_block.unrealized_finalized_checkpoint)
|
||||||
.filter(|(parent_justified, parent_finalized)| {
|
.filter(|(parent_justified, parent_finalized)| {
|
||||||
parent_justified.epoch == block_epoch
|
parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 >= block_epoch
|
||||||
&& parent_finalized.epoch + 1 >= block_epoch
|
});
|
||||||
});
|
|
||||||
|
|
||||||
let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) =
|
let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) =
|
||||||
if let Some((parent_justified, parent_finalized)) = parent_checkpoints {
|
if let Some((parent_justified, parent_finalized)) = parent_checkpoints {
|
||||||
(parent_justified, parent_finalized)
|
(parent_justified, parent_finalized)
|
||||||
} else {
|
} else {
|
||||||
let justification_and_finalization_state = match block {
|
let justification_and_finalization_state = match block {
|
||||||
BeaconBlockRef::Capella(_)
|
BeaconBlockRef::Capella(_)
|
||||||
| BeaconBlockRef::Merge(_)
|
| BeaconBlockRef::Merge(_)
|
||||||
| BeaconBlockRef::Altair(_) => {
|
| BeaconBlockRef::Altair(_) => {
|
||||||
let participation_cache =
|
let participation_cache =
|
||||||
per_epoch_processing::altair::ParticipationCache::new(state, spec)
|
per_epoch_processing::altair::ParticipationCache::new(state, spec)
|
||||||
.map_err(Error::ParticipationCacheBuild)?;
|
.map_err(Error::ParticipationCacheBuild)?;
|
||||||
per_epoch_processing::altair::process_justification_and_finalization(
|
per_epoch_processing::altair::process_justification_and_finalization(
|
||||||
state,
|
state,
|
||||||
&participation_cache,
|
&participation_cache,
|
||||||
)?
|
)?
|
||||||
}
|
}
|
||||||
BeaconBlockRef::Base(_) => {
|
BeaconBlockRef::Base(_) => {
|
||||||
let mut validator_statuses =
|
let mut validator_statuses =
|
||||||
per_epoch_processing::base::ValidatorStatuses::new(state, spec)
|
per_epoch_processing::base::ValidatorStatuses::new(state, spec)
|
||||||
.map_err(Error::ValidatorStatuses)?;
|
|
||||||
validator_statuses
|
|
||||||
.process_attestations(state)
|
|
||||||
.map_err(Error::ValidatorStatuses)?;
|
.map_err(Error::ValidatorStatuses)?;
|
||||||
per_epoch_processing::base::process_justification_and_finalization(
|
validator_statuses
|
||||||
state,
|
.process_attestations(state)
|
||||||
&validator_statuses.total_balances,
|
.map_err(Error::ValidatorStatuses)?;
|
||||||
spec,
|
per_epoch_processing::base::process_justification_and_finalization(
|
||||||
)?
|
state,
|
||||||
}
|
&validator_statuses.total_balances,
|
||||||
};
|
spec,
|
||||||
|
)?
|
||||||
(
|
}
|
||||||
justification_and_finalization_state.current_justified_checkpoint(),
|
|
||||||
justification_and_finalization_state.finalized_checkpoint(),
|
|
||||||
)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Update best known unrealized justified & finalized checkpoints
|
(
|
||||||
if unrealized_justified_checkpoint.epoch
|
justification_and_finalization_state.current_justified_checkpoint(),
|
||||||
> self.fc_store.unrealized_justified_checkpoint().epoch
|
justification_and_finalization_state.finalized_checkpoint(),
|
||||||
{
|
)
|
||||||
self.fc_store
|
};
|
||||||
.set_unrealized_justified_checkpoint(unrealized_justified_checkpoint);
|
|
||||||
}
|
|
||||||
if unrealized_finalized_checkpoint.epoch
|
|
||||||
> self.fc_store.unrealized_finalized_checkpoint().epoch
|
|
||||||
{
|
|
||||||
self.fc_store
|
|
||||||
.set_unrealized_finalized_checkpoint(unrealized_finalized_checkpoint);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If block is from past epochs, try to update store's justified & finalized checkpoints right away
|
// Update best known unrealized justified & finalized checkpoints
|
||||||
if block.slot().epoch(E::slots_per_epoch()) < current_slot.epoch(E::slots_per_epoch()) {
|
if unrealized_justified_checkpoint.epoch
|
||||||
self.pull_up_store_checkpoints(
|
> self.fc_store.unrealized_justified_checkpoint().epoch
|
||||||
unrealized_justified_checkpoint,
|
{
|
||||||
unrealized_finalized_checkpoint,
|
self.fc_store
|
||||||
)?;
|
.set_unrealized_justified_checkpoint(unrealized_justified_checkpoint);
|
||||||
}
|
}
|
||||||
|
if unrealized_finalized_checkpoint.epoch
|
||||||
|
> self.fc_store.unrealized_finalized_checkpoint().epoch
|
||||||
|
{
|
||||||
|
self.fc_store
|
||||||
|
.set_unrealized_finalized_checkpoint(unrealized_finalized_checkpoint);
|
||||||
|
}
|
||||||
|
|
||||||
(
|
// If block is from past epochs, try to update store's justified & finalized checkpoints right away
|
||||||
Some(unrealized_justified_checkpoint),
|
if block.slot().epoch(E::slots_per_epoch()) < current_slot.epoch(E::slots_per_epoch()) {
|
||||||
Some(unrealized_finalized_checkpoint),
|
self.pull_up_store_checkpoints(
|
||||||
)
|
unrealized_justified_checkpoint,
|
||||||
} else {
|
unrealized_finalized_checkpoint,
|
||||||
(None, None)
|
)?;
|
||||||
};
|
}
|
||||||
|
|
||||||
let target_slot = block
|
let target_slot = block
|
||||||
.slot()
|
.slot()
|
||||||
@ -886,8 +865,8 @@ where
|
|||||||
justified_checkpoint: state.current_justified_checkpoint(),
|
justified_checkpoint: state.current_justified_checkpoint(),
|
||||||
finalized_checkpoint: state.finalized_checkpoint(),
|
finalized_checkpoint: state.finalized_checkpoint(),
|
||||||
execution_status,
|
execution_status,
|
||||||
unrealized_justified_checkpoint,
|
unrealized_justified_checkpoint: Some(unrealized_justified_checkpoint),
|
||||||
unrealized_finalized_checkpoint,
|
unrealized_finalized_checkpoint: Some(unrealized_finalized_checkpoint),
|
||||||
},
|
},
|
||||||
current_slot,
|
current_slot,
|
||||||
)?;
|
)?;
|
||||||
|
@ -2,9 +2,9 @@ mod fork_choice;
|
|||||||
mod fork_choice_store;
|
mod fork_choice_store;
|
||||||
|
|
||||||
pub use crate::fork_choice::{
|
pub use crate::fork_choice::{
|
||||||
AttestationFromBlock, CountUnrealized, Error, ForkChoice, ForkChoiceView,
|
AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters,
|
||||||
ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus,
|
InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice,
|
||||||
PersistedForkChoice, QueuedAttestation, ResetPayloadStatuses,
|
QueuedAttestation, ResetPayloadStatuses,
|
||||||
};
|
};
|
||||||
pub use fork_choice_store::ForkChoiceStore;
|
pub use fork_choice_store::ForkChoiceStore;
|
||||||
pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation};
|
pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation};
|
||||||
|
@ -12,8 +12,7 @@ use beacon_chain::{
|
|||||||
StateSkipConfig, WhenSlotSkipped,
|
StateSkipConfig, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
use fork_choice::{
|
use fork_choice::{
|
||||||
CountUnrealized, ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus,
|
ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation,
|
||||||
QueuedAttestation,
|
|
||||||
};
|
};
|
||||||
use store::MemoryStore;
|
use store::MemoryStore;
|
||||||
use types::{
|
use types::{
|
||||||
@ -288,7 +287,6 @@ impl ForkChoiceTest {
|
|||||||
&state,
|
&state,
|
||||||
PayloadVerificationStatus::Verified,
|
PayloadVerificationStatus::Verified,
|
||||||
&self.harness.chain.spec,
|
&self.harness.chain.spec,
|
||||||
CountUnrealized::True,
|
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
self
|
self
|
||||||
@ -331,7 +329,6 @@ impl ForkChoiceTest {
|
|||||||
&state,
|
&state,
|
||||||
PayloadVerificationStatus::Verified,
|
PayloadVerificationStatus::Verified,
|
||||||
&self.harness.chain.spec,
|
&self.harness.chain.spec,
|
||||||
CountUnrealized::True,
|
|
||||||
)
|
)
|
||||||
.err()
|
.err()
|
||||||
.expect("on_block did not return an error");
|
.expect("on_block did not return an error");
|
||||||
|
@ -7,7 +7,7 @@ use beacon_chain::{
|
|||||||
obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation,
|
obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation,
|
||||||
},
|
},
|
||||||
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
||||||
BeaconChainTypes, CachedHead, CountUnrealized, NotifyExecutionLayer,
|
BeaconChainTypes, CachedHead, NotifyExecutionLayer,
|
||||||
};
|
};
|
||||||
use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1};
|
use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
@ -381,7 +381,6 @@ impl<E: EthSpec> Tester<E> {
|
|||||||
let result = self.block_on_dangerous(self.harness.chain.process_block(
|
let result = self.block_on_dangerous(self.harness.chain.process_block(
|
||||||
block_root,
|
block_root,
|
||||||
block.clone(),
|
block.clone(),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
))?;
|
))?;
|
||||||
if result.is_ok() != valid {
|
if result.is_ok() != valid {
|
||||||
@ -441,7 +440,6 @@ impl<E: EthSpec> Tester<E> {
|
|||||||
&state,
|
&state,
|
||||||
PayloadVerificationStatus::Irrelevant,
|
PayloadVerificationStatus::Irrelevant,
|
||||||
&self.harness.chain.spec,
|
&self.harness.chain.spec,
|
||||||
CountUnrealized::True,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
if result.is_ok() {
|
if result.is_ok() {
|
||||||
|
Loading…
Reference in New Issue
Block a user