merge with capella

This commit is contained in:
realbigsean 2022-12-15 09:33:18 -05:00
commit d893706e0e
No known key found for this signature in database
GPG Key ID: B372B64D866BF8CC
95 changed files with 3783 additions and 539 deletions

View File

@ -16,6 +16,7 @@ async-wrapper-methods = [
"task_executor::TaskExecutor::spawn_blocking_handle",
"warp_utils::task::blocking_task",
"warp_utils::task::blocking_json_task",
"beacon_chain::beacon_chain::BeaconChain::spawn_blocking_handle",
"validator_client::http_api::blocking_signed_json_task",
"execution_layer::test_utils::MockServer::new",
"execution_layer::test_utils::MockServer::new_with_config",

View File

@ -5,6 +5,7 @@ on:
branches:
- unstable
- stable
- capella
tags:
- v*
@ -34,6 +35,11 @@ jobs:
run: |
echo "VERSION=latest" >> $GITHUB_ENV
echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV
- name: Extract version (if capella)
if: github.event.ref == 'refs/heads/capella'
run: |
echo "VERSION=capella" >> $GITHUB_ENV
echo "VERSION_SUFFIX=" >> $GITHUB_ENV
- name: Extract version (if tagged release)
if: startsWith(github.event.ref, 'refs/tags')
run: |
@ -60,6 +66,7 @@ jobs:
DOCKER_CLI_EXPERIMENTAL: enabled
VERSION: ${{ needs.extract-version.outputs.VERSION }}
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
CROSS_FEATURES: withdrawals,withdrawals-processing
steps:
- uses: actions/checkout@v3
- name: Update Rust

3
Cargo.lock generated
View File

@ -721,7 +721,7 @@ dependencies = [
[[package]]
name = "c-kzg"
version = "0.1.0"
source = "git+https://github.com/pawanjay176/c-kzg-4844?rev=669a13800a8a0d094c5387db58e06936ef194a25#669a13800a8a0d094c5387db58e06936ef194a25"
source = "git+https://github.com/pawanjay176/c-kzg-4844?rev=48c048b12a7d29ae3e7bf09000e07870da4cb701#48c048b12a7d29ae3e7bf09000e07870da4cb701"
dependencies = [
"hex",
"libc",
@ -4998,6 +4998,7 @@ version = "0.2.0"
dependencies = [
"eth2_ssz",
"eth2_ssz_derive",
"safe_arith",
"serde",
"serde_derive",
"serde_yaml",

View File

@ -1,5 +1,5 @@
FROM rust:1.62.1-bullseye AS builder
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake clang libclang-dev protobuf-compiler
FROM rust:1.65.0-bullseye AS builder
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler
COPY . lighthouse
ARG FEATURES
ENV FEATURES $FEATURES

View File

@ -24,6 +24,12 @@ use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, Prep
use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult};
use crate::head_tracker::HeadTracker;
use crate::historical_blocks::HistoricalBlockError;
use crate::light_client_finality_update_verification::{
Error as LightClientFinalityUpdateError, VerifiedLightClientFinalityUpdate,
};
use crate::light_client_optimistic_update_verification::{
Error as LightClientOptimisticUpdateError, VerifiedLightClientOptimisticUpdate,
};
use crate::migrate::BackgroundMigrator;
use crate::naive_aggregation_pool::{
AggregatedAttestationMap, Error as NaiveAggregationError, NaiveAggregationPool,
@ -40,9 +46,8 @@ use crate::observed_operations::{ObservationOutcome, ObservedOperations};
use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT};
use crate::persisted_fork_choice::PersistedForkChoice;
use crate::pre_finalization_cache::PreFinalizationBlockCache;
use crate::proposer_prep_service::PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR;
use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache};
use crate::snapshot_cache::SnapshotCache;
use crate::snapshot_cache::{BlockProductionPreState, SnapshotCache};
use crate::sync_committee_verification::{
Error as SyncCommitteeError, VerifiedSyncCommitteeMessage, VerifiedSyncContribution,
};
@ -52,9 +57,8 @@ use crate::validator_monitor::{
HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS,
};
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
use crate::BeaconSnapshot;
use crate::{kzg_utils, BeaconForkChoiceStore};
use crate::{metrics, BeaconChainError};
use crate::{kzg_utils};
use crate::{metrics, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead};
use eth2::types::{EventKind, SseBlock, SyncDuty};
use execution_layer::{
BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition,
@ -70,7 +74,7 @@ use itertools::process_results;
use itertools::Itertools;
use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool};
use parking_lot::{Mutex, RwLock};
use proto_array::CountUnrealizedFull;
use proto_array::{CountUnrealizedFull, DoNotReOrg, ProposerHeadError};
use safe_arith::SafeArith;
use slasher::Slasher;
use slog::{crit, debug, error, info, trace, warn, Logger};
@ -105,6 +109,7 @@ use tree_hash::TreeHash;
use types::beacon_state::CloneConfig;
use types::consts::eip4844::MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS;
use types::signed_block_and_blobs::BlockWrapper;
use types::consts::merge::INTERVALS_PER_SLOT;
use types::*;
pub type ForkChoiceError = fork_choice::Error<crate::ForkChoiceStoreError>;
@ -126,6 +131,12 @@ pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1)
/// The timeout for the eth1 finalization cache
pub const ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_millis(200);
/// The latest delay from the start of the slot at which to attempt a 1-slot re-org.
fn max_re_org_slot_delay(seconds_per_slot: u64) -> Duration {
// Allow at least half of the attestation deadline for the block to propagate.
Duration::from_secs(seconds_per_slot) / INTERVALS_PER_SLOT as u32 / 2
}
// These keys are all zero because they get stored in different columns, see `DBColumn` type.
pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero();
pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero();
@ -187,6 +198,21 @@ pub enum ProduceBlockVerification {
NoVerification,
}
/// Payload attributes for which the `beacon_chain` crate is responsible.
pub struct PrePayloadAttributes {
pub proposer_index: u64,
pub prev_randao: Hash256,
}
/// Define whether a forkchoiceUpdate needs to be checked for an override (`Yes`) or has already
/// been checked (`AlreadyApplied`). It is safe to specify `Yes` even if re-orgs are disabled.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub enum OverrideForkchoiceUpdate {
#[default]
Yes,
AlreadyApplied,
}
/// The accepted clock drift for nodes gossiping blocks and attestations. See:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#configuration
@ -346,6 +372,10 @@ pub struct BeaconChain<T: BeaconChainTypes> {
#[cfg(feature = "withdrawals-processing")]
pub(crate) observed_bls_to_execution_changes:
Mutex<ObservedOperations<SignedBlsToExecutionChange, T::EthSpec>>,
/// The most recently validated light client finality update received on gossip.
pub latest_seen_finality_update: Mutex<Option<LightClientFinalityUpdate<T::EthSpec>>>,
/// The most recently validated light client optimistic update received on gossip.
pub latest_seen_optimistic_update: Mutex<Option<LightClientOptimisticUpdate<T::EthSpec>>>,
/// Provides information from the Ethereum 1 (PoW) chain.
pub eth1_chain: Option<Eth1Chain<T::Eth1Chain, T::EthSpec>>,
/// Interfaces with the execution client.
@ -965,6 +995,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Some(DatabaseBlock::Blinded(block)) => block,
None => return Ok(None),
};
let fork = blinded_block.fork_name(&self.spec)?;
// If we only have a blinded block, load the execution payload from the EL.
let block_message = blinded_block.message();
@ -979,7 +1010,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.execution_layer
.as_ref()
.ok_or(Error::ExecutionLayerMissing)?
.get_payload_by_block_hash(exec_block_hash)
.get_payload_by_block_hash(exec_block_hash, fork)
.await
.map_err(|e| Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, e))?
.ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?;
@ -1829,6 +1860,40 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
})
}
/// Accepts some 'LightClientFinalityUpdate' from the network and attempts to verify it
pub fn verify_finality_update_for_gossip(
self: &Arc<Self>,
light_client_finality_update: LightClientFinalityUpdate<T::EthSpec>,
seen_timestamp: Duration,
) -> Result<VerifiedLightClientFinalityUpdate<T>, LightClientFinalityUpdateError> {
VerifiedLightClientFinalityUpdate::verify(
light_client_finality_update,
self,
seen_timestamp,
)
.map(|v| {
metrics::inc_counter(&metrics::FINALITY_UPDATE_PROCESSING_SUCCESSES);
v
})
}
/// Accepts some 'LightClientOptimisticUpdate' from the network and attempts to verify it
pub fn verify_optimistic_update_for_gossip(
self: &Arc<Self>,
light_client_optimistic_update: LightClientOptimisticUpdate<T::EthSpec>,
seen_timestamp: Duration,
) -> Result<VerifiedLightClientOptimisticUpdate<T>, LightClientOptimisticUpdateError> {
VerifiedLightClientOptimisticUpdate::verify(
light_client_optimistic_update,
self,
seen_timestamp,
)
.map(|v| {
metrics::inc_counter(&metrics::OPTIMISTIC_UPDATE_PROCESSING_SUCCESSES);
v
})
}
/// Accepts some attestation-type object and attempts to verify it in the context of fork
/// choice. If it is valid it is applied to `self.fork_choice`.
///
@ -2212,15 +2277,24 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
) -> Result<ObservationOutcome<SignedBlsToExecutionChange, T::EthSpec>, Error> {
#[cfg(feature = "withdrawals-processing")]
{
let current_fork = self.spec.fork_name_at_slot::<T::EthSpec>(self.slot()?);
if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork {
// Disallow BLS to execution changes prior to the Capella fork.
return Err(Error::BlsToExecutionChangeBadFork(current_fork));
}
let wall_clock_state = self.wall_clock_state()?;
Ok(self
.observed_bls_to_execution_changes
.lock()
.verify_and_observe(bls_to_execution_change, &wall_clock_state, &self.spec)?)
}
// TODO: remove this whole block once withdrawals-processing is removed
#[cfg(not(feature = "withdrawals-processing"))]
{
#[allow(clippy::drop_non_drop)]
drop(bls_to_execution_change);
Ok(ObservationOutcome::AlreadyKnown)
}
@ -2798,6 +2872,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
if !payload_verification_status.is_optimistic()
&& block.slot() + EARLY_ATTESTER_CACHE_HISTORIC_SLOTS >= current_slot
{
let fork_choice_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE);
match fork_choice.get_head(current_slot, &self.spec) {
// This block became the head, add it to the early attester cache.
Ok(new_head_root) if new_head_root == block_root => {
@ -2831,6 +2906,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
"error" => ?e
),
}
drop(fork_choice_timer);
}
drop(post_exec_timer);
@ -3525,6 +3601,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// signed. If we miss the cache or we're producing a block that conflicts with the head,
// fall back to getting the head from `slot - 1`.
let state_load_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_LOAD_TIMES);
// Atomically read some values from the head whilst avoiding holding cached head `Arc` any
// longer than necessary.
let (head_slot, head_block_root) = {
@ -3532,8 +3609,19 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
(head.head_slot(), head.head_block_root())
};
let (state, state_root_opt) = if head_slot < slot {
// Attempt an aggressive re-org if configured and the conditions are right.
if let Some(re_org_state) = self.get_state_for_re_org(slot, head_slot, head_block_root)
{
info!(
self.log,
"Proposing block to re-org current head";
"slot" => slot,
"head_to_reorg" => %head_block_root,
);
(re_org_state.pre_state, re_org_state.state_root)
}
// Normal case: proposing a block atop the current head. Use the snapshot cache.
if let Some(pre_state) = self
else if let Some(pre_state) = self
.snapshot_cache
.try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
.and_then(|snapshot_cache| {
@ -3573,6 +3661,400 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Ok((state, state_root_opt))
}
/// Fetch the beacon state to use for producing a block if a 1-slot proposer re-org is viable.
///
/// This function will return `None` if proposer re-orgs are disabled.
fn get_state_for_re_org(
&self,
slot: Slot,
head_slot: Slot,
canonical_head: Hash256,
) -> Option<BlockProductionPreState<T::EthSpec>> {
let re_org_threshold = self.config.re_org_threshold?;
if self.spec.proposer_score_boost.is_none() {
warn!(
self.log,
"Ignoring proposer re-org configuration";
"reason" => "this network does not have proposer boost enabled"
);
return None;
}
let slot_delay = self
.slot_clock
.seconds_from_current_slot_start(self.spec.seconds_per_slot)
.or_else(|| {
warn!(
self.log,
"Not attempting re-org";
"error" => "unable to read slot clock"
);
None
})?;
// Attempt a proposer re-org if:
//
// 1. It seems we have time to propagate and still receive the proposer boost.
// 2. The current head block was seen late.
// 3. The `get_proposer_head` conditions from fork choice pass.
let proposing_on_time = slot_delay < max_re_org_slot_delay(self.spec.seconds_per_slot);
if !proposing_on_time {
debug!(
self.log,
"Not attempting re-org";
"reason" => "not proposing on time",
);
return None;
}
let head_late = self.block_observed_after_attestation_deadline(canonical_head, head_slot);
if !head_late {
debug!(
self.log,
"Not attempting re-org";
"reason" => "head not late"
);
return None;
}
// Is the current head weak and appropriate for re-orging?
let proposer_head_timer =
metrics::start_timer(&metrics::BLOCK_PRODUCTION_GET_PROPOSER_HEAD_TIMES);
let proposer_head = self
.canonical_head
.fork_choice_read_lock()
.get_proposer_head(
slot,
canonical_head,
re_org_threshold,
self.config.re_org_max_epochs_since_finalization,
)
.map_err(|e| match e {
ProposerHeadError::DoNotReOrg(reason) => {
debug!(
self.log,
"Not attempting re-org";
"reason" => %reason,
);
}
ProposerHeadError::Error(e) => {
warn!(
self.log,
"Not attempting re-org";
"error" => ?e,
);
}
})
.ok()?;
drop(proposer_head_timer);
let re_org_parent_block = proposer_head.parent_node.root;
// Only attempt a re-org if we hit the snapshot cache.
let pre_state = self
.snapshot_cache
.try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
.and_then(|snapshot_cache| {
snapshot_cache.get_state_for_block_production(re_org_parent_block)
})
.or_else(|| {
debug!(
self.log,
"Not attempting re-org";
"reason" => "missed snapshot cache",
"parent_block" => ?re_org_parent_block,
);
None
})?;
info!(
self.log,
"Attempting re-org due to weak head";
"weak_head" => ?canonical_head,
"parent" => ?re_org_parent_block,
"head_weight" => proposer_head.head_node.weight,
"threshold_weight" => proposer_head.re_org_weight_threshold
);
Some(pre_state)
}
/// Get the proposer index and `prev_randao` value for a proposal at slot `proposal_slot`.
///
/// The `proposer_head` may be the head block of `cached_head` or its parent. An error will
/// be returned for any other value.
pub fn get_pre_payload_attributes(
&self,
proposal_slot: Slot,
proposer_head: Hash256,
cached_head: &CachedHead<T::EthSpec>,
) -> Result<Option<PrePayloadAttributes>, Error> {
let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch());
let head_block_root = cached_head.head_block_root();
let parent_block_root = cached_head.parent_block_root();
// The proposer head must be equal to the canonical head or its parent.
if proposer_head != head_block_root && proposer_head != parent_block_root {
warn!(
self.log,
"Unable to compute payload attributes";
"block_root" => ?proposer_head,
"head_block_root" => ?head_block_root,
);
return Ok(None);
}
// Compute the proposer index.
let head_epoch = cached_head.head_slot().epoch(T::EthSpec::slots_per_epoch());
let shuffling_decision_root = if head_epoch == proposal_epoch {
cached_head
.snapshot
.beacon_state
.proposer_shuffling_decision_root(proposer_head)?
} else {
proposer_head
};
let cached_proposer = self
.beacon_proposer_cache
.lock()
.get_slot::<T::EthSpec>(shuffling_decision_root, proposal_slot);
let proposer_index = if let Some(proposer) = cached_proposer {
proposer.index as u64
} else {
if head_epoch + 2 < proposal_epoch {
warn!(
self.log,
"Skipping proposer preparation";
"msg" => "this is a non-critical issue that can happen on unhealthy nodes or \
networks.",
"proposal_epoch" => proposal_epoch,
"head_epoch" => head_epoch,
);
// Don't skip the head forward more than two epochs. This avoids burdening an
// unhealthy node.
//
// Although this node might miss out on preparing for a proposal, they should still
// be able to propose. This will prioritise beacon chain health over efficient
// packing of execution blocks.
return Ok(None);
}
let (proposers, decision_root, _, fork) =
compute_proposer_duties_from_head(proposal_epoch, self)?;
let proposer_offset = (proposal_slot % T::EthSpec::slots_per_epoch()).as_usize();
let proposer = *proposers
.get(proposer_offset)
.ok_or(BeaconChainError::NoProposerForSlot(proposal_slot))?;
self.beacon_proposer_cache.lock().insert(
proposal_epoch,
decision_root,
proposers,
fork,
)?;
// It's possible that the head changes whilst computing these duties. If so, abandon
// this routine since the change of head would have also spawned another instance of
// this routine.
//
// Exit now, after updating the cache.
if decision_root != shuffling_decision_root {
warn!(
self.log,
"Head changed during proposer preparation";
);
return Ok(None);
}
proposer as u64
};
// Get the `prev_randao` value.
let prev_randao = if proposer_head == parent_block_root {
cached_head.parent_random()
} else {
cached_head.head_random()
}?;
Ok(Some(PrePayloadAttributes {
proposer_index,
prev_randao,
}))
}
/// Determine whether a fork choice update to the execution layer should be overridden.
///
/// This is *only* necessary when proposer re-orgs are enabled, because we have to prevent the
/// execution layer from enshrining the block we want to re-org as the head.
///
/// This function uses heuristics that align quite closely but not exactly with the re-org
/// conditions set out in `get_state_for_re_org` and `get_proposer_head`. The differences are
/// documented below.
fn overridden_forkchoice_update_params(
&self,
canonical_forkchoice_params: ForkchoiceUpdateParameters,
) -> Result<ForkchoiceUpdateParameters, Error> {
self.overridden_forkchoice_update_params_or_failure_reason(&canonical_forkchoice_params)
.or_else(|e| match e {
ProposerHeadError::DoNotReOrg(reason) => {
trace!(
self.log,
"Not suppressing fork choice update";
"reason" => %reason,
);
Ok(canonical_forkchoice_params)
}
ProposerHeadError::Error(e) => Err(e),
})
}
fn overridden_forkchoice_update_params_or_failure_reason(
&self,
canonical_forkchoice_params: &ForkchoiceUpdateParameters,
) -> Result<ForkchoiceUpdateParameters, ProposerHeadError<Error>> {
let _timer = metrics::start_timer(&metrics::FORK_CHOICE_OVERRIDE_FCU_TIMES);
// Never override if proposer re-orgs are disabled.
let re_org_threshold = self
.config
.re_org_threshold
.ok_or(DoNotReOrg::ReOrgsDisabled)?;
let head_block_root = canonical_forkchoice_params.head_root;
// Perform initial checks and load the relevant info from fork choice.
let info = self
.canonical_head
.fork_choice_read_lock()
.get_preliminary_proposer_head(
head_block_root,
re_org_threshold,
self.config.re_org_max_epochs_since_finalization,
)
.map_err(|e| e.map_inner_error(Error::ProposerHeadForkChoiceError))?;
// The slot of our potential re-org block is always 1 greater than the head block because we
// only attempt single-slot re-orgs.
let head_slot = info.head_node.slot;
let re_org_block_slot = head_slot + 1;
let fork_choice_slot = info.current_slot;
// If a re-orging proposal isn't made by the `max_re_org_slot_delay` then we give up
// and allow the fork choice update for the canonical head through so that we may attest
// correctly.
let current_slot_ok = if head_slot == fork_choice_slot {
true
} else if re_org_block_slot == fork_choice_slot {
self.slot_clock
.start_of(re_org_block_slot)
.and_then(|slot_start| {
let now = self.slot_clock.now_duration()?;
let slot_delay = now.saturating_sub(slot_start);
Some(slot_delay <= max_re_org_slot_delay(self.spec.seconds_per_slot))
})
.unwrap_or(false)
} else {
false
};
if !current_slot_ok {
return Err(DoNotReOrg::HeadDistance.into());
}
// Only attempt a re-org if we have a proposer registered for the re-org slot.
let proposing_at_re_org_slot = {
// The proposer shuffling has the same decision root as the next epoch attestation
// shuffling. We know our re-org block is not on the epoch boundary, so it has the
// same proposer shuffling as the head (but not necessarily the parent which may lie
// in the previous epoch).
let shuffling_decision_root = info
.head_node
.next_epoch_shuffling_id
.shuffling_decision_block;
let proposer_index = self
.beacon_proposer_cache
.lock()
.get_slot::<T::EthSpec>(shuffling_decision_root, re_org_block_slot)
.ok_or_else(|| {
debug!(
self.log,
"Fork choice override proposer shuffling miss";
"slot" => re_org_block_slot,
"decision_root" => ?shuffling_decision_root,
);
DoNotReOrg::NotProposing
})?
.index as u64;
self.execution_layer
.as_ref()
.ok_or(ProposerHeadError::Error(Error::ExecutionLayerMissing))?
.has_proposer_preparation_data_blocking(proposer_index)
};
if !proposing_at_re_org_slot {
return Err(DoNotReOrg::NotProposing.into());
}
// If the current slot is already equal to the proposal slot (or we are in the tail end of
// the prior slot), then check the actual weight of the head against the re-org threshold.
let head_weak = if fork_choice_slot == re_org_block_slot {
info.head_node.weight < info.re_org_weight_threshold
} else {
true
};
if !head_weak {
return Err(DoNotReOrg::HeadNotWeak {
head_weight: info.head_node.weight,
re_org_weight_threshold: info.re_org_weight_threshold,
}
.into());
}
// Check that the head block arrived late and is vulnerable to a re-org. This check is only
// a heuristic compared to the proper weight check in `get_state_for_re_org`, the reason
// being that we may have only *just* received the block and not yet processed any
// attestations for it. We also can't dequeue attestations for the block during the
// current slot, which would be necessary for determining its weight.
let head_block_late =
self.block_observed_after_attestation_deadline(head_block_root, head_slot);
if !head_block_late {
return Err(DoNotReOrg::HeadNotLate.into());
}
let parent_head_hash = info.parent_node.execution_status.block_hash();
let forkchoice_update_params = ForkchoiceUpdateParameters {
head_root: info.parent_node.root,
head_hash: parent_head_hash,
justified_hash: canonical_forkchoice_params.justified_hash,
finalized_hash: canonical_forkchoice_params.finalized_hash,
};
debug!(
self.log,
"Fork choice update overridden";
"canonical_head" => ?head_block_root,
"override" => ?info.parent_node.root,
"slot" => fork_choice_slot,
);
Ok(forkchoice_update_params)
}
/// Check if the block with `block_root` was observed after the attestation deadline of `slot`.
fn block_observed_after_attestation_deadline(&self, block_root: Hash256, slot: Slot) -> bool {
let block_delays = self.block_times_cache.read().get_block_delays(
block_root,
self.slot_clock
.start_of(slot)
.unwrap_or_else(|| Duration::from_secs(0)),
);
block_delays.observed.map_or(false, |delay| {
delay > self.slot_clock.unagg_attestation_production_delay()
})
}
/// Produce a block for some `slot` upon the given `state`.
///
/// Typically the `self.produce_block()` function should be used, instead of calling this
@ -4265,17 +4747,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// The `PayloadAttributes` are used by the EL to give it a look-ahead for preparing an optimal
/// set of transactions for a new `ExecutionPayload`.
///
/// This function will result in a call to `forkchoiceUpdated` on the EL if:
///
/// 1. We're in the tail-end of the slot (as defined by PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR)
/// 2. The head block is one slot (or less) behind the prepare slot (e.g., we're preparing for
/// the next slot and the block at the current slot is already known).
/// This function will result in a call to `forkchoiceUpdated` on the EL if we're in the
/// tail-end of the slot (as defined by `self.config.prepare_payload_lookahead`).
pub async fn prepare_beacon_proposer(
self: &Arc<Self>,
current_slot: Slot,
) -> Result<(), Error> {
let prepare_slot = current_slot + 1;
let prepare_epoch = prepare_slot.epoch(T::EthSpec::slots_per_epoch());
// There's no need to run the proposer preparation routine before the bellatrix fork.
if self.slot_is_prior_to_bellatrix(prepare_slot) {
@ -4293,127 +4771,68 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
return Ok(());
}
// Atomically read some values from the canonical head, whilst avoiding holding the cached
// head `Arc` any longer than necessary.
// Load the cached head and its forkchoice update parameters.
//
// Use a blocking task since blocking the core executor on the canonical head read lock can
// block the core tokio executor.
let chain = self.clone();
let (head_slot, head_root, head_decision_root, head_random, forkchoice_update_params) =
self.spawn_blocking_handle(
let maybe_prep_data = self
.spawn_blocking_handle(
move || {
let cached_head = chain.canonical_head.cached_head();
let head_block_root = cached_head.head_block_root();
let decision_root = cached_head
.snapshot
.beacon_state
.proposer_shuffling_decision_root(head_block_root)?;
Ok::<_, Error>((
cached_head.head_slot(),
head_block_root,
decision_root,
cached_head.head_random()?,
cached_head.forkchoice_update_parameters(),
))
// Don't bother with proposer prep if the head is more than
// `PREPARE_PROPOSER_HISTORIC_EPOCHS` prior to the current slot.
//
// This prevents the routine from running during sync.
let head_slot = cached_head.head_slot();
if head_slot + T::EthSpec::slots_per_epoch() * PREPARE_PROPOSER_HISTORIC_EPOCHS
< current_slot
{
debug!(
chain.log,
"Head too old for proposer prep";
"head_slot" => head_slot,
"current_slot" => current_slot,
);
return Ok(None);
}
let canonical_fcu_params = cached_head.forkchoice_update_parameters();
let fcu_params =
chain.overridden_forkchoice_update_params(canonical_fcu_params)?;
let pre_payload_attributes = chain.get_pre_payload_attributes(
prepare_slot,
fcu_params.head_root,
&cached_head,
)?;
Ok::<_, Error>(Some((fcu_params, pre_payload_attributes)))
},
"prepare_beacon_proposer_fork_choice_read",
"prepare_beacon_proposer_head_read",
)
.await??;
let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch());
// Don't bother with proposer prep if the head is more than
// `PREPARE_PROPOSER_HISTORIC_EPOCHS` prior to the current slot.
//
// This prevents the routine from running during sync.
if head_slot + T::EthSpec::slots_per_epoch() * PREPARE_PROPOSER_HISTORIC_EPOCHS
< current_slot
{
debug!(
self.log,
"Head too old for proposer prep";
"head_slot" => head_slot,
"current_slot" => current_slot,
);
return Ok(());
}
// Ensure that the shuffling decision root is correct relative to the epoch we wish to
// query.
let shuffling_decision_root = if head_epoch == prepare_epoch {
head_decision_root
} else {
head_root
};
// Read the proposer from the proposer cache.
let cached_proposer = self
.beacon_proposer_cache
.lock()
.get_slot::<T::EthSpec>(shuffling_decision_root, prepare_slot);
let proposer = if let Some(proposer) = cached_proposer {
proposer.index
} else {
if head_epoch + 2 < prepare_epoch {
warn!(
self.log,
"Skipping proposer preparation";
"msg" => "this is a non-critical issue that can happen on unhealthy nodes or \
networks.",
"prepare_epoch" => prepare_epoch,
"head_epoch" => head_epoch,
);
// Don't skip the head forward more than two epochs. This avoids burdening an
// unhealthy node.
//
// Although this node might miss out on preparing for a proposal, they should still
// be able to propose. This will prioritise beacon chain health over efficient
// packing of execution blocks.
let (forkchoice_update_params, pre_payload_attributes) =
if let Some((fcu, Some(pre_payload))) = maybe_prep_data {
(fcu, pre_payload)
} else {
// Appropriate log messages have already been logged above and in
// `get_pre_payload_attributes`.
return Ok(());
}
let (proposers, decision_root, _, fork) =
compute_proposer_duties_from_head(prepare_epoch, self)?;
let proposer_index = prepare_slot.as_usize() % (T::EthSpec::slots_per_epoch() as usize);
let proposer = *proposers
.get(proposer_index)
.ok_or(BeaconChainError::NoProposerForSlot(prepare_slot))?;
self.beacon_proposer_cache.lock().insert(
prepare_epoch,
decision_root,
proposers,
fork,
)?;
// It's possible that the head changes whilst computing these duties. If so, abandon
// this routine since the change of head would have also spawned another instance of
// this routine.
//
// Exit now, after updating the cache.
if decision_root != shuffling_decision_root {
warn!(
self.log,
"Head changed during proposer preparation";
);
return Ok(());
}
proposer
};
};
// If the execution layer doesn't have any proposer data for this validator then we assume
// it's not connected to this BN and no action is required.
let proposer = pre_payload_attributes.proposer_index;
if !execution_layer
.has_proposer_preparation_data(proposer as u64)
.has_proposer_preparation_data(proposer)
.await
{
return Ok(());
}
#[cfg(feature = "withdrawals")]
let withdrawals = match self.spec.fork_name_at_epoch(prepare_epoch) {
let withdrawals = match self.spec.fork_name_at_slot::<T::EthSpec>(prepare_slot) {
ForkName::Base | ForkName::Altair | ForkName::Merge => None,
ForkName::Capella | ForkName::Eip4844 => {
// We must use the advanced state because balances can change at epoch boundaries
@ -4422,27 +4841,28 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Might implement caching here in the future..
let prepare_state = self
.state_at_slot(prepare_slot, StateSkipConfig::WithoutStateRoots)
.or_else(|e| {
.map_err(|e| {
error!(self.log, "State advance for withdrawals failed"; "error" => ?e);
Err(e)
e
})?;
Some(get_expected_withdrawals(&prepare_state, &self.spec))
}
}
.transpose()
.or_else(|e| {
.map_err(|e| {
error!(self.log, "Error preparing beacon proposer"; "error" => ?e);
Err(e)
e
})
.map(|withdrawals_opt| withdrawals_opt.map(|w| w.into()))
.map_err(Error::PrepareProposerFailed)?;
let head_root = forkchoice_update_params.head_root;
let payload_attributes = PayloadAttributes::new(
self.slot_clock
.start_of(prepare_slot)
.ok_or(Error::InvalidSlot(prepare_slot))?
.as_secs(),
head_random,
pre_payload_attributes.prev_randao,
execution_layer
.get_suggested_fee_recipient(proposer as u64)
.await,
@ -4456,23 +4876,24 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self.log,
"Preparing beacon proposer";
"payload_attributes" => ?payload_attributes,
"head_root" => ?head_root,
"prepare_slot" => prepare_slot,
"validator" => proposer,
"parent_root" => ?head_root,
);
let already_known = execution_layer
.insert_proposer(prepare_slot, head_root, proposer as u64, payload_attributes)
.insert_proposer(prepare_slot, head_root, proposer, payload_attributes)
.await;
// Only push a log to the user if this is the first time we've seen this proposer for this
// slot.
if !already_known {
info!(
self.log,
"Prepared beacon proposer";
"already_known" => already_known,
"prepare_slot" => prepare_slot,
"validator" => proposer,
"parent_root" => ?head_root,
);
}
@ -4494,27 +4915,22 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
return Ok(());
};
// If either of the following are true, send a fork-choice update message to the
// EL:
//
// 1. We're in the tail-end of the slot (as defined by
// PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR)
// 2. The head block is one slot (or less) behind the prepare slot (e.g., we're
// preparing for the next slot and the block at the current slot is already
// known).
if till_prepare_slot
<= self.slot_clock.slot_duration() / PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR
|| head_slot + 1 >= prepare_slot
{
// If we are close enough to the proposal slot, send an fcU, which will have payload
// attributes filled in by the execution layer cache we just primed.
if till_prepare_slot <= self.config.prepare_payload_lookahead {
debug!(
self.log,
"Pushing update to prepare proposer";
"Sending forkchoiceUpdate for proposer prep";
"till_prepare_slot" => ?till_prepare_slot,
"prepare_slot" => prepare_slot
);
self.update_execution_engine_forkchoice(current_slot, forkchoice_update_params)
.await?;
self.update_execution_engine_forkchoice(
current_slot,
forkchoice_update_params,
OverrideForkchoiceUpdate::AlreadyApplied,
)
.await?;
}
Ok(())
@ -4523,7 +4939,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
pub async fn update_execution_engine_forkchoice(
self: &Arc<Self>,
current_slot: Slot,
params: ForkchoiceUpdateParameters,
input_params: ForkchoiceUpdateParameters,
override_forkchoice_update: OverrideForkchoiceUpdate,
) -> Result<(), Error> {
let next_slot = current_slot + 1;
@ -4545,6 +4962,19 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.as_ref()
.ok_or(Error::ExecutionLayerMissing)?;
// Determine whether to override the forkchoiceUpdated message if we want to re-org
// the current head at the next slot.
let params = if override_forkchoice_update == OverrideForkchoiceUpdate::Yes {
let chain = self.clone();
self.spawn_blocking_handle(
move || chain.overridden_forkchoice_update_params(input_params),
"update_execution_engine_forkchoice_override",
)
.await??
} else {
input_params
};
// Take the global lock for updating the execution engine fork choice.
//
// Whilst holding this lock we must:

View File

@ -7,6 +7,8 @@
use crate::{metrics, BeaconSnapshot};
use derivative::Derivative;
use fork_choice::ForkChoiceStore;
use proto_array::JustifiedBalances;
use safe_arith::ArithError;
use ssz_derive::{Decode, Encode};
use std::collections::BTreeSet;
use std::marker::PhantomData;
@ -31,6 +33,7 @@ pub enum Error {
MissingState(Hash256),
InvalidPersistedBytes(ssz::DecodeError),
BeaconStateError(BeaconStateError),
Arith(ArithError),
}
impl From<BeaconStateError> for Error {
@ -39,27 +42,15 @@ impl From<BeaconStateError> for Error {
}
}
impl From<ArithError> for Error {
fn from(e: ArithError) -> Self {
Error::Arith(e)
}
}
/// The number of validator balance sets that are cached within `BalancesCache`.
const MAX_BALANCE_CACHE_SIZE: usize = 4;
/// Returns the effective balances for every validator in the given `state`.
///
/// Any validator who is not active in the epoch of the given `state` is assigned a balance of
/// zero.
pub fn get_effective_balances<T: EthSpec>(state: &BeaconState<T>) -> Vec<u64> {
state
.validators()
.iter()
.map(|validator| {
if validator.is_active_at(state.current_epoch()) {
validator.effective_balance
} else {
0
}
})
.collect()
}
#[superstruct(
variants(V8),
variant_attributes(derive(PartialEq, Clone, Debug, Encode, Decode)),
@ -113,7 +104,7 @@ impl BalancesCache {
let item = CacheItem {
block_root: epoch_boundary_root,
epoch,
balances: get_effective_balances(state),
balances: JustifiedBalances::from_justified_state(state)?.effective_balances,
};
if self.items.len() == MAX_BALANCE_CACHE_SIZE {
@ -152,7 +143,7 @@ pub struct BeaconForkChoiceStore<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<
time: Slot,
finalized_checkpoint: Checkpoint,
justified_checkpoint: Checkpoint,
justified_balances: Vec<u64>,
justified_balances: JustifiedBalances,
best_justified_checkpoint: Checkpoint,
unrealized_justified_checkpoint: Checkpoint,
unrealized_finalized_checkpoint: Checkpoint,
@ -181,7 +172,7 @@ where
pub fn get_forkchoice_store(
store: Arc<HotColdDB<E, Hot, Cold>>,
anchor: &BeaconSnapshot<E>,
) -> Self {
) -> Result<Self, Error> {
let anchor_state = &anchor.beacon_state;
let mut anchor_block_header = anchor_state.latest_block_header().clone();
if anchor_block_header.state_root == Hash256::zero() {
@ -194,13 +185,14 @@ where
root: anchor_root,
};
let finalized_checkpoint = justified_checkpoint;
let justified_balances = JustifiedBalances::from_justified_state(anchor_state)?;
Self {
Ok(Self {
store,
balances_cache: <_>::default(),
time: anchor_state.slot(),
justified_checkpoint,
justified_balances: anchor_state.balances().clone().into(),
justified_balances,
finalized_checkpoint,
best_justified_checkpoint: justified_checkpoint,
unrealized_justified_checkpoint: justified_checkpoint,
@ -208,7 +200,7 @@ where
proposer_boost_root: Hash256::zero(),
equivocating_indices: BTreeSet::new(),
_phantom: PhantomData,
}
})
}
/// Save the current state of `Self` to a `PersistedForkChoiceStore` which can be stored to the
@ -219,7 +211,7 @@ where
time: self.time,
finalized_checkpoint: self.finalized_checkpoint,
justified_checkpoint: self.justified_checkpoint,
justified_balances: self.justified_balances.clone(),
justified_balances: self.justified_balances.effective_balances.clone(),
best_justified_checkpoint: self.best_justified_checkpoint,
unrealized_justified_checkpoint: self.unrealized_justified_checkpoint,
unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint,
@ -233,13 +225,15 @@ where
persisted: PersistedForkChoiceStore,
store: Arc<HotColdDB<E, Hot, Cold>>,
) -> Result<Self, Error> {
let justified_balances =
JustifiedBalances::from_effective_balances(persisted.justified_balances)?;
Ok(Self {
store,
balances_cache: persisted.balances_cache,
time: persisted.time,
finalized_checkpoint: persisted.finalized_checkpoint,
justified_checkpoint: persisted.justified_checkpoint,
justified_balances: persisted.justified_balances,
justified_balances,
best_justified_checkpoint: persisted.best_justified_checkpoint,
unrealized_justified_checkpoint: persisted.unrealized_justified_checkpoint,
unrealized_finalized_checkpoint: persisted.unrealized_finalized_checkpoint,
@ -279,7 +273,7 @@ where
&self.justified_checkpoint
}
fn justified_balances(&self) -> &[u64] {
fn justified_balances(&self) -> &JustifiedBalances {
&self.justified_balances
}
@ -314,8 +308,9 @@ where
self.justified_checkpoint.root,
self.justified_checkpoint.epoch,
) {
// NOTE: could avoid this re-calculation by introducing a `PersistedCacheItem`.
metrics::inc_counter(&metrics::BALANCES_CACHE_HITS);
self.justified_balances = balances;
self.justified_balances = JustifiedBalances::from_effective_balances(balances)?;
} else {
metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES);
let justified_block = self
@ -332,7 +327,7 @@ where
.map_err(Error::FailedToReadState)?
.ok_or_else(|| Error::MissingState(justified_block.state_root()))?;
self.justified_balances = get_effective_balances(&state);
self.justified_balances = JustifiedBalances::from_justified_state(&state)?;
}
Ok(())

View File

@ -24,6 +24,7 @@ use futures::channel::mpsc::Sender;
use kzg::Kzg;
use operation_pool::{OperationPool, PersistedOperationPool};
use parking_lot::RwLock;
use proto_array::ReOrgThreshold;
use slasher::Slasher;
use slog::{crit, error, info, Logger};
use slot_clock::{SlotClock, TestingSlotClock};
@ -34,8 +35,8 @@ use std::time::Duration;
use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp};
use task_executor::{ShutdownReason, TaskExecutor};
use types::{
BeaconBlock, BeaconState, ChainSpec, Checkpoint, EthSpec, Graffiti, Hash256, PublicKeyBytes,
Signature, SignedBeaconBlock, Slot,
BeaconBlock, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, Hash256,
PublicKeyBytes, Signature, SignedBeaconBlock, Slot,
};
/// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing
@ -164,6 +165,21 @@ where
self
}
/// Sets the proposer re-org threshold.
pub fn proposer_re_org_threshold(mut self, threshold: Option<ReOrgThreshold>) -> Self {
self.chain_config.re_org_threshold = threshold;
self
}
/// Sets the proposer re-org max epochs since finalization.
pub fn proposer_re_org_max_epochs_since_finalization(
mut self,
epochs_since_finalization: Epoch,
) -> Self {
self.chain_config.re_org_max_epochs_since_finalization = epochs_since_finalization;
self
}
/// Sets the store (database).
///
/// Should generally be called early in the build chain.
@ -363,7 +379,8 @@ where
let (genesis, updated_builder) = self.set_genesis_state(beacon_state)?;
self = updated_builder;
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis);
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis)
.map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?;
let current_slot = None;
let fork_choice = ForkChoice::from_anchor(
@ -481,7 +498,8 @@ where
beacon_state: weak_subj_state,
};
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot);
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot)
.map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?;
let current_slot = Some(snapshot.beacon_block.slot());
let fork_choice = ForkChoice::from_anchor(
@ -800,6 +818,8 @@ where
observed_attester_slashings: <_>::default(),
#[cfg(feature = "withdrawals-processing")]
observed_bls_to_execution_changes: <_>::default(),
latest_seen_finality_update: <_>::default(),
latest_seen_optimistic_update: <_>::default(),
eth1_chain: self.eth1_chain,
execution_layer: self.execution_layer,
genesis_validators_root,

View File

@ -34,7 +34,8 @@
use crate::persisted_fork_choice::PersistedForkChoice;
use crate::{
beacon_chain::{
BeaconForkChoice, BeaconStore, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, FORK_CHOICE_DB_KEY,
BeaconForkChoice, BeaconStore, OverrideForkchoiceUpdate,
BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, FORK_CHOICE_DB_KEY,
},
block_times_cache::BlockTimesCache,
events::ServerSentEventHandler,
@ -114,6 +115,11 @@ impl<E: EthSpec> CachedHead<E> {
self.snapshot.beacon_block_root
}
/// Returns the root of the parent of the head block.
pub fn parent_block_root(&self) -> Hash256 {
self.snapshot.beacon_block.parent_root()
}
/// Returns root of the `BeaconState` at the head of the beacon chain.
///
/// ## Note
@ -146,6 +152,21 @@ impl<E: EthSpec> CachedHead<E> {
Ok(root)
}
/// Returns the randao mix for the parent of the block at the head of the chain.
///
/// This is useful for re-orging the current head. The parent's RANDAO value is read from
/// the head's execution payload because it is unavailable in the beacon state's RANDAO mixes
/// array after being overwritten by the head block's RANDAO mix.
///
/// This will error if the head block is not execution-enabled (post Bellatrix).
pub fn parent_random(&self) -> Result<Hash256, BeaconStateError> {
self.snapshot
.beacon_block
.message()
.execution_payload()
.map(|payload| payload.prev_randao())
}
/// Returns the active validator count for the current epoch of the head state.
///
/// Should only return `None` if the caches have not been built on the head state (this should
@ -765,6 +786,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
new_cached_head: &CachedHead<T::EthSpec>,
new_head_proto_block: ProtoBlock,
) -> Result<(), Error> {
let _timer = metrics::start_timer(&metrics::FORK_CHOICE_AFTER_NEW_HEAD_TIMES);
let old_snapshot = &old_cached_head.snapshot;
let new_snapshot = &new_cached_head.snapshot;
let new_head_is_optimistic = new_head_proto_block
@ -902,6 +924,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
new_view: ForkChoiceView,
finalized_proto_block: ProtoBlock,
) -> Result<(), Error> {
let _timer = metrics::start_timer(&metrics::FORK_CHOICE_AFTER_FINALIZATION_TIMES);
let new_snapshot = &new_cached_head.snapshot;
let finalized_block_is_optimistic = finalized_proto_block
.execution_status
@ -1128,7 +1151,11 @@ fn spawn_execution_layer_updates<T: BeaconChainTypes>(
}
if let Err(e) = chain
.update_execution_engine_forkchoice(current_slot, forkchoice_update_params)
.update_execution_engine_forkchoice(
current_slot,
forkchoice_update_params,
OverrideForkchoiceUpdate::Yes,
)
.await
{
crit!(

View File

@ -1,9 +1,18 @@
pub use proto_array::CountUnrealizedFull;
pub use proto_array::{CountUnrealizedFull, ReOrgThreshold};
use serde_derive::{Deserialize, Serialize};
use types::Checkpoint;
use std::time::Duration;
use types::{Checkpoint, Epoch};
pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20);
pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2);
pub const DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT: u64 = 250;
/// Default fraction of a slot lookahead for payload preparation (12/3 = 4 seconds on mainnet).
pub const DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR: u32 = 3;
/// Fraction of a slot lookahead for fork choice in the state advance timer (500ms on mainnet).
pub const FORK_CHOICE_LOOKAHEAD_FACTOR: u32 = 24;
#[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)]
pub struct ChainConfig {
/// Maximum number of slots to skip when importing a consensus message (e.g., block,
@ -21,6 +30,10 @@ pub struct ChainConfig {
pub enable_lock_timeouts: bool,
/// The max size of a message that can be sent over the network.
pub max_network_size: usize,
/// Maximum percentage of committee weight at which to attempt re-orging the canonical head.
pub re_org_threshold: Option<ReOrgThreshold>,
/// Maximum number of epochs since finalization for attempting a proposer re-org.
pub re_org_max_epochs_since_finalization: Epoch,
/// Number of milliseconds to wait for fork choice before proposing a block.
///
/// If set to 0 then block proposal will not wait for fork choice at all.
@ -47,6 +60,11 @@ pub struct ChainConfig {
pub count_unrealized_full: CountUnrealizedFull,
/// Optionally set timeout for calls to checkpoint sync endpoint.
pub checkpoint_sync_url_timeout: u64,
/// The offset before the start of a proposal slot at which payload attributes should be sent.
///
/// Low values are useful for execution engines which don't improve their payload after the
/// first call, and high values are useful for ensuring the EL is given ample notice.
pub prepare_payload_lookahead: Duration,
}
impl Default for ChainConfig {
@ -57,6 +75,8 @@ impl Default for ChainConfig {
reconstruct_historic_states: false,
enable_lock_timeouts: true,
max_network_size: 10 * 1_048_576, // 10M
re_org_threshold: Some(DEFAULT_RE_ORG_THRESHOLD),
re_org_max_epochs_since_finalization: DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION,
fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT,
// Builder fallback configs that are set in `clap` will override these.
builder_fallback_skips: 3,
@ -68,6 +88,7 @@ impl Default for ChainConfig {
paranoid_block_proposal: false,
count_unrealized_full: CountUnrealizedFull::default(),
checkpoint_sync_url_timeout: 60,
prepare_payload_lookahead: Duration::from_secs(4),
}
}
}

View File

@ -206,6 +206,9 @@ pub enum BeaconChainError {
MissingPersistedForkChoice,
CommitteePromiseFailed(oneshot_broadcast::Error),
MaxCommitteePromises(usize),
BlsToExecutionChangeBadFork(ForkName),
InconsistentFork(InconsistentFork),
ProposerHeadForkChoiceError(fork_choice::Error<proto_array::Error>),
}
easy_from_to!(SlotProcessingError, BeaconChainError);
@ -229,6 +232,7 @@ easy_from_to!(ForkChoiceStoreError, BeaconChainError);
easy_from_to!(HistoricalBlockError, BeaconChainError);
easy_from_to!(StateAdvanceError, BeaconChainError);
easy_from_to!(BlockReplayError, BeaconChainError);
easy_from_to!(InconsistentFork, BeaconChainError);
#[derive(Debug)]
pub enum BlockProductionError {
@ -237,6 +241,7 @@ pub enum BlockProductionError {
UnableToProduceAtSlot(Slot),
SlotProcessingError(SlotProcessingError),
BlockProcessingError(BlockProcessingError),
ForkChoiceError(ForkChoiceError),
Eth1ChainError(Eth1ChainError),
BeaconStateError(BeaconStateError),
StateAdvanceError(StateAdvanceError),
@ -260,7 +265,6 @@ pub enum BlockProductionError {
FailedToReadFinalizedBlock(store::Error),
MissingFinalizedBlock(Hash256),
BlockTooLarge(usize),
ForkChoiceError(BeaconChainError),
ShuttingDown,
MissingSyncAggregate,
MissingExecutionPayload,
@ -277,3 +281,4 @@ easy_from_to!(BeaconStateError, BlockProductionError);
easy_from_to!(SlotProcessingError, BlockProductionError);
easy_from_to!(Eth1ChainError, BlockProductionError);
easy_from_to!(StateAdvanceError, BlockProductionError);
easy_from_to!(ForkChoiceError, BlockProductionError);

View File

@ -147,7 +147,8 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
beacon_state: finalized_state,
};
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store.clone(), &finalized_snapshot);
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store.clone(), &finalized_snapshot)
.map_err(|e| format!("Unable to reset fork choice store for revert: {e:?}"))?;
let mut fork_choice = ForkChoice::from_anchor(
fc_store,

View File

@ -23,9 +23,11 @@ pub mod fork_choice_signal;
pub mod fork_revert;
mod head_tracker;
pub mod historical_blocks;
pub mod light_client_finality_update_verification;
pub mod light_client_optimistic_update_verification;
pub mod kzg_utils;
pub mod merge_readiness;
mod metrics;
pub mod metrics;
pub mod migrate;
mod naive_aggregation_pool;
mod observed_aggregates;
@ -49,8 +51,8 @@ pub mod validator_pubkey_cache;
pub use self::beacon_chain::{
AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult,
CountUnrealized, ForkChoiceError, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped,
INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
CountUnrealized, ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification,
StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
};
pub use self::beacon_snapshot::BeaconSnapshot;

View File

@ -0,0 +1,135 @@
use crate::{
beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes,
};
use derivative::Derivative;
use slot_clock::SlotClock;
use std::time::Duration;
use strum::AsRefStr;
use types::{
light_client_update::Error as LightClientUpdateError, LightClientFinalityUpdate, Slot,
};
/// Returned when a light client finality update was not successfully verified. It might not have been verified for
/// two reasons:
///
/// - The light client finality message is malformed or inappropriate for the context (indicated by all variants
/// other than `BeaconChainError`).
/// - The application encountered an internal error whilst attempting to determine validity
/// (the `BeaconChainError` variant)
#[derive(Debug, AsRefStr)]
pub enum Error {
/// Light client finality update message with a lower or equal finalized_header slot already forwarded.
FinalityUpdateAlreadySeen,
/// The light client finality message was received is prior to one-third of slot duration passage. (with
/// respect to the gossip clock disparity and slot clock duration).
///
/// ## Peer scoring
///
/// Assuming the local clock is correct, the peer has sent an invalid message.
TooEarly,
/// Light client finality update message does not match the locally constructed one.
///
/// ## Peer Scoring
///
InvalidLightClientFinalityUpdate,
/// Signature slot start time is none.
SigSlotStartIsNone,
/// Failed to construct a LightClientFinalityUpdate from state.
FailedConstructingUpdate,
/// Beacon chain error occured.
BeaconChainError(BeaconChainError),
LightClientUpdateError(LightClientUpdateError),
}
impl From<BeaconChainError> for Error {
fn from(e: BeaconChainError) -> Self {
Error::BeaconChainError(e)
}
}
impl From<LightClientUpdateError> for Error {
fn from(e: LightClientUpdateError) -> Self {
Error::LightClientUpdateError(e)
}
}
/// Wraps a `LightClientFinalityUpdate` that has been verified for propagation on the gossip network.
#[derive(Derivative)]
#[derivative(Clone(bound = "T: BeaconChainTypes"))]
pub struct VerifiedLightClientFinalityUpdate<T: BeaconChainTypes> {
light_client_finality_update: LightClientFinalityUpdate<T::EthSpec>,
seen_timestamp: Duration,
}
impl<T: BeaconChainTypes> VerifiedLightClientFinalityUpdate<T> {
/// Returns `Ok(Self)` if the `light_client_finality_update` is valid to be (re)published on the gossip
/// network.
pub fn verify(
light_client_finality_update: LightClientFinalityUpdate<T::EthSpec>,
chain: &BeaconChain<T>,
seen_timestamp: Duration,
) -> Result<Self, Error> {
let gossiped_finality_slot = light_client_finality_update.finalized_header.slot;
let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0);
let signature_slot = light_client_finality_update.signature_slot;
let start_time = chain.slot_clock.start_of(signature_slot);
let mut latest_seen_finality_update = chain.latest_seen_finality_update.lock();
let head = chain.canonical_head.cached_head();
let head_block = &head.snapshot.beacon_block;
let attested_block_root = head_block.message().parent_root();
let attested_block = chain
.get_blinded_block(&attested_block_root)?
.ok_or(Error::FailedConstructingUpdate)?;
let mut attested_state = chain
.get_state(&attested_block.state_root(), Some(attested_block.slot()))?
.ok_or(Error::FailedConstructingUpdate)?;
let finalized_block_root = attested_state.finalized_checkpoint().root;
let finalized_block = chain
.get_blinded_block(&finalized_block_root)?
.ok_or(Error::FailedConstructingUpdate)?;
let latest_seen_finality_update_slot = match latest_seen_finality_update.as_ref() {
Some(update) => update.finalized_header.slot,
None => Slot::new(0),
};
// verify that no other finality_update with a lower or equal
// finalized_header.slot was already forwarded on the network
if gossiped_finality_slot <= latest_seen_finality_update_slot {
return Err(Error::FinalityUpdateAlreadySeen);
}
// verify that enough time has passed for the block to have been propagated
match start_time {
Some(time) => {
if seen_timestamp + MAXIMUM_GOSSIP_CLOCK_DISPARITY < time + one_third_slot_duration
{
return Err(Error::TooEarly);
}
}
None => return Err(Error::SigSlotStartIsNone),
}
let head_state = &head.snapshot.beacon_state;
let finality_update = LightClientFinalityUpdate::new(
&chain.spec,
head_state,
head_block,
&mut attested_state,
&finalized_block,
)?;
// verify that the gossiped finality update is the same as the locally constructed one.
if finality_update != light_client_finality_update {
return Err(Error::InvalidLightClientFinalityUpdate);
}
*latest_seen_finality_update = Some(light_client_finality_update.clone());
Ok(Self {
light_client_finality_update,
seen_timestamp,
})
}
}

View File

@ -0,0 +1,125 @@
use crate::{
beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes,
};
use derivative::Derivative;
use slot_clock::SlotClock;
use std::time::Duration;
use strum::AsRefStr;
use types::{
light_client_update::Error as LightClientUpdateError, LightClientOptimisticUpdate, Slot,
};
/// Returned when a light client optimistic update was not successfully verified. It might not have been verified for
/// two reasons:
///
/// - The light client optimistic message is malformed or inappropriate for the context (indicated by all variants
/// other than `BeaconChainError`).
/// - The application encountered an internal error whilst attempting to determine validity
/// (the `BeaconChainError` variant)
#[derive(Debug, AsRefStr)]
pub enum Error {
/// Light client optimistic update message with a lower or equal optimistic_header slot already forwarded.
OptimisticUpdateAlreadySeen,
/// The light client optimistic message was received is prior to one-third of slot duration passage. (with
/// respect to the gossip clock disparity and slot clock duration).
///
/// ## Peer scoring
///
/// Assuming the local clock is correct, the peer has sent an invalid message.
TooEarly,
/// Light client optimistic update message does not match the locally constructed one.
///
/// ## Peer Scoring
///
InvalidLightClientOptimisticUpdate,
/// Signature slot start time is none.
SigSlotStartIsNone,
/// Failed to construct a LightClientOptimisticUpdate from state.
FailedConstructingUpdate,
/// Beacon chain error occured.
BeaconChainError(BeaconChainError),
LightClientUpdateError(LightClientUpdateError),
}
impl From<BeaconChainError> for Error {
fn from(e: BeaconChainError) -> Self {
Error::BeaconChainError(e)
}
}
impl From<LightClientUpdateError> for Error {
fn from(e: LightClientUpdateError) -> Self {
Error::LightClientUpdateError(e)
}
}
/// Wraps a `LightClientOptimisticUpdate` that has been verified for propagation on the gossip network.
#[derive(Derivative)]
#[derivative(Clone(bound = "T: BeaconChainTypes"))]
pub struct VerifiedLightClientOptimisticUpdate<T: BeaconChainTypes> {
light_client_optimistic_update: LightClientOptimisticUpdate<T::EthSpec>,
seen_timestamp: Duration,
}
impl<T: BeaconChainTypes> VerifiedLightClientOptimisticUpdate<T> {
/// Returns `Ok(Self)` if the `light_client_optimistic_update` is valid to be (re)published on the gossip
/// network.
pub fn verify(
light_client_optimistic_update: LightClientOptimisticUpdate<T::EthSpec>,
chain: &BeaconChain<T>,
seen_timestamp: Duration,
) -> Result<Self, Error> {
let gossiped_optimistic_slot = light_client_optimistic_update.attested_header.slot;
let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0);
let signature_slot = light_client_optimistic_update.signature_slot;
let start_time = chain.slot_clock.start_of(signature_slot);
let mut latest_seen_optimistic_update = chain.latest_seen_optimistic_update.lock();
let head = chain.canonical_head.cached_head();
let head_block = &head.snapshot.beacon_block;
let attested_block_root = head_block.message().parent_root();
let attested_block = chain
.get_blinded_block(&attested_block_root)?
.ok_or(Error::FailedConstructingUpdate)?;
let attested_state = chain
.get_state(&attested_block.state_root(), Some(attested_block.slot()))?
.ok_or(Error::FailedConstructingUpdate)?;
let latest_seen_optimistic_update_slot = match latest_seen_optimistic_update.as_ref() {
Some(update) => update.attested_header.slot,
None => Slot::new(0),
};
// verify that no other optimistic_update with a lower or equal
// optimistic_header.slot was already forwarded on the network
if gossiped_optimistic_slot <= latest_seen_optimistic_update_slot {
return Err(Error::OptimisticUpdateAlreadySeen);
}
// verify that enough time has passed for the block to have been propagated
match start_time {
Some(time) => {
if seen_timestamp + MAXIMUM_GOSSIP_CLOCK_DISPARITY < time + one_third_slot_duration
{
return Err(Error::TooEarly);
}
}
None => return Err(Error::SigSlotStartIsNone),
}
let optimistic_update =
LightClientOptimisticUpdate::new(&chain.spec, head_block, &attested_state)?;
// verify that the gossiped optimistic update is the same as the locally constructed one.
if optimistic_update != light_client_optimistic_update {
return Err(Error::InvalidLightClientOptimisticUpdate);
}
*latest_seen_optimistic_update = Some(light_client_optimistic_update.clone());
Ok(Self {
light_client_optimistic_update,
seen_timestamp,
})
}
}

View File

@ -77,6 +77,11 @@ lazy_static! {
"beacon_block_processing_attestation_observation_seconds",
"Time spent hashing and remembering all the attestations in the block"
);
pub static ref BLOCK_PROCESSING_FORK_CHOICE: Result<Histogram> = try_create_histogram_with_buckets(
"beacon_block_processing_fork_choice_seconds",
"Time spent running fork choice's `get_head` during block import",
exponential_buckets(1e-3, 2.0, 8)
);
pub static ref BLOCK_SYNC_AGGREGATE_SET_BITS: Result<IntGauge> = try_create_int_gauge(
"block_sync_aggregate_set_bits",
"The number of true bits in the last sync aggregate in a block"
@ -99,6 +104,11 @@ lazy_static! {
"beacon_block_production_fork_choice_seconds",
"Time taken to run fork choice before block production"
);
pub static ref BLOCK_PRODUCTION_GET_PROPOSER_HEAD_TIMES: Result<Histogram> = try_create_histogram_with_buckets(
"beacon_block_production_get_proposer_head_times",
"Time taken for fork choice to compute the proposer head before block production",
exponential_buckets(1e-3, 2.0, 8)
);
pub static ref BLOCK_PRODUCTION_STATE_LOAD_TIMES: Result<Histogram> = try_create_histogram(
"beacon_block_production_state_load_seconds",
"Time taken to load the base state for block production"
@ -322,10 +332,26 @@ lazy_static! {
"beacon_reorgs_total",
"Count of occasions fork choice has switched to a different chain"
);
pub static ref FORK_CHOICE_TIMES: Result<Histogram> =
try_create_histogram("beacon_fork_choice_seconds", "Full runtime of fork choice");
pub static ref FORK_CHOICE_FIND_HEAD_TIMES: Result<Histogram> =
try_create_histogram("beacon_fork_choice_find_head_seconds", "Full runtime of fork choice find_head function");
pub static ref FORK_CHOICE_TIMES: Result<Histogram> = try_create_histogram_with_buckets(
"beacon_fork_choice_seconds",
"Full runtime of fork choice",
linear_buckets(10e-3, 20e-3, 10)
);
pub static ref FORK_CHOICE_OVERRIDE_FCU_TIMES: Result<Histogram> = try_create_histogram_with_buckets(
"beacon_fork_choice_override_fcu_seconds",
"Time taken to compute the optional forkchoiceUpdated override",
exponential_buckets(1e-3, 2.0, 8)
);
pub static ref FORK_CHOICE_AFTER_NEW_HEAD_TIMES: Result<Histogram> = try_create_histogram_with_buckets(
"beacon_fork_choice_after_new_head_seconds",
"Time taken to run `after_new_head`",
exponential_buckets(1e-3, 2.0, 10)
);
pub static ref FORK_CHOICE_AFTER_FINALIZATION_TIMES: Result<Histogram> = try_create_histogram_with_buckets(
"beacon_fork_choice_after_finalization_seconds",
"Time taken to run `after_finalization`",
exponential_buckets(1e-3, 2.0, 10)
);
pub static ref FORK_CHOICE_PROCESS_BLOCK_TIMES: Result<Histogram> = try_create_histogram(
"beacon_fork_choice_process_block_seconds",
"Time taken to add a block and all attestations to fork choice"
@ -964,6 +990,24 @@ lazy_static! {
);
}
// Fifth lazy-static block is used to account for macro recursion limit.
lazy_static! {
/*
* Light server message verification
*/
pub static ref FINALITY_UPDATE_PROCESSING_SUCCESSES: Result<IntCounter> = try_create_int_counter(
"light_client_finality_update_verification_success_total",
"Number of light client finality updates verified for gossip"
);
/*
* Light server message verification
*/
pub static ref OPTIMISTIC_UPDATE_PROCESSING_SUCCESSES: Result<IntCounter> = try_create_int_counter(
"light_client_optimistic_update_verification_success_total",
"Number of light client optimistic updates verified for gossip"
);
}
/// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot,
/// head state info, etc) and update the Prometheus `DEFAULT_REGISTRY`.
pub fn scrape_for_metrics<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>) {

View File

@ -5,13 +5,9 @@ use std::sync::Arc;
use task_executor::TaskExecutor;
use tokio::time::sleep;
/// At 12s slot times, the means that the payload preparation routine will run 4s before the start
/// of each slot (`12 / 3 = 4`).
pub const PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR: u32 = 3;
/// Spawns a routine which ensures the EL is provided advance notice of any block producers.
///
/// This routine will run once per slot, at `slot_duration / PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR`
/// This routine will run once per slot, at `chain.prepare_payload_lookahead()`
/// before the start of each slot.
///
/// The service will not be started if there is no `execution_layer` on the `chain`.
@ -38,8 +34,8 @@ async fn proposer_prep_service<T: BeaconChainTypes>(
loop {
match chain.slot_clock.duration_to_next_slot() {
Some(duration) => {
let additional_delay = slot_duration
- chain.slot_clock.slot_duration() / PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR;
let additional_delay =
slot_duration.saturating_sub(chain.config.prepare_payload_lookahead);
sleep(duration + additional_delay).await;
debug!(
@ -65,14 +61,11 @@ async fn proposer_prep_service<T: BeaconChainTypes>(
},
"proposer_prep_update",
);
continue;
}
None => {
error!(chain.log, "Failed to read slot clock");
// If we can't read the slot clock, just wait another slot.
sleep(slot_duration).await;
continue;
}
};
}

View File

@ -13,7 +13,10 @@ pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 4;
/// The minimum block delay to clone the state in the cache instead of removing it.
/// This helps keep block processing fast during re-orgs from late blocks.
const MINIMUM_BLOCK_DELAY_FOR_CLONE: Duration = Duration::from_secs(6);
fn minimum_block_delay_for_clone(seconds_per_slot: u64) -> Duration {
// If the block arrived at the attestation deadline or later, it might get re-orged.
Duration::from_secs(seconds_per_slot) / 3
}
/// This snapshot is to be used for verifying a child of `self.beacon_block`.
#[derive(Debug)]
@ -256,7 +259,7 @@ impl<T: EthSpec> SnapshotCache<T> {
if let Some(cache) = self.snapshots.get(i) {
// Avoid cloning the block during sync (when the `block_delay` is `None`).
if let Some(delay) = block_delay {
if delay >= MINIMUM_BLOCK_DELAY_FOR_CLONE
if delay >= minimum_block_delay_for_clone(spec.seconds_per_slot)
&& delay <= Duration::from_secs(spec.seconds_per_slot) * 4
|| block_slot > cache.beacon_block.slot() + 1
{

View File

@ -16,6 +16,7 @@
use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS;
use crate::{
beacon_chain::{ATTESTATION_CACHE_LOCK_TIMEOUT, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT},
chain_config::FORK_CHOICE_LOOKAHEAD_FACTOR,
snapshot_cache::StateAdvance,
BeaconChain, BeaconChainError, BeaconChainTypes,
};
@ -133,7 +134,7 @@ async fn state_advance_timer<T: BeaconChainTypes>(
// Run fork choice 23/24s of the way through the slot (11.5s on mainnet).
// We need to run after the state advance, so use the same condition as above.
let fork_choice_offset = slot_duration / 24;
let fork_choice_offset = slot_duration / FORK_CHOICE_LOOKAHEAD_FACTOR;
let fork_choice_instant = if duration_to_next_slot > state_advance_offset {
Instant::now() + duration_to_next_slot - fork_choice_offset
} else {
@ -224,8 +225,20 @@ async fn state_advance_timer<T: BeaconChainTypes>(
return;
}
// Re-compute the head, dequeuing attestations for the current slot early.
beacon_chain.recompute_head_at_slot(next_slot).await;
// Prepare proposers so that the node can send payload attributes in the case where
// it decides to abandon a proposer boost re-org.
if let Err(e) = beacon_chain.prepare_beacon_proposer(current_slot).await {
warn!(
log,
"Unable to prepare proposer with lookahead";
"error" => ?e,
"slot" => next_slot,
);
}
// Use a blocking task to avoid blocking the core executor whilst waiting for locks
// in `ForkChoiceSignalTx`.
beacon_chain.task_executor.clone().spawn_blocking(

View File

@ -32,7 +32,7 @@ use rand::SeedableRng;
use rayon::prelude::*;
use sensitive_url::SensitiveUrl;
use slog::Logger;
use slot_clock::TestingSlotClock;
use slot_clock::{SlotClock, TestingSlotClock};
use state_processing::per_block_processing::compute_timestamp_at_slot;
use state_processing::{
state_advance::{complete_state_advance, partial_state_advance},
@ -319,6 +319,12 @@ where
self
}
pub fn logger(mut self, log: Logger) -> Self {
self.log = log.clone();
self.runtime.set_logger(log);
self
}
/// This mutator will be run before the `store_mutator`.
pub fn initial_mutator(mut self, mutator: BoxedMutator<E, Hot, Cold>) -> Self {
assert!(
@ -524,10 +530,9 @@ pub struct BeaconChainHarness<T: BeaconChainTypes> {
pub rng: Mutex<StdRng>,
}
pub type HarnessAttestations<E> = Vec<(
Vec<(Attestation<E>, SubnetId)>,
Option<SignedAggregateAndProof<E>>,
)>;
pub type CommitteeAttestations<E> = Vec<(Attestation<E>, SubnetId)>;
pub type HarnessAttestations<E> =
Vec<(CommitteeAttestations<E>, Option<SignedAggregateAndProof<E>>)>;
pub type HarnessSyncContributions<E> = Vec<(
Vec<(SyncCommitteeMessage, usize)>,
@ -778,6 +783,21 @@ where
sk.sign(message)
}
/// Sign a beacon block using the proposer's key.
pub fn sign_beacon_block(
&self,
block: BeaconBlock<E>,
state: &BeaconState<E>,
) -> SignedBeaconBlock<E> {
let proposer_index = block.proposer_index() as usize;
block.sign(
&self.validator_keypairs[proposer_index].sk,
&state.fork(),
state.genesis_validators_root(),
&self.spec,
)
}
/// Produces an "unaggregated" attestation for the given `slot` and `index` that attests to
/// `beacon_block_root`. The provided `state` should match the `block.state_root` for the
/// `block` identified by `beacon_block_root`.
@ -851,13 +871,35 @@ where
state_root: Hash256,
head_block_root: SignedBeaconBlockHash,
attestation_slot: Slot,
) -> Vec<Vec<(Attestation<E>, SubnetId)>> {
) -> Vec<CommitteeAttestations<E>> {
self.make_unaggregated_attestations_with_limit(
attesting_validators,
state,
state_root,
head_block_root,
attestation_slot,
None,
)
.0
}
pub fn make_unaggregated_attestations_with_limit(
&self,
attesting_validators: &[usize],
state: &BeaconState<E>,
state_root: Hash256,
head_block_root: SignedBeaconBlockHash,
attestation_slot: Slot,
limit: Option<usize>,
) -> (Vec<CommitteeAttestations<E>>, Vec<usize>) {
let committee_count = state.get_committee_count_at_slot(state.slot()).unwrap();
let fork = self
.spec
.fork_at_epoch(attestation_slot.epoch(E::slots_per_epoch()));
state
let attesters = Mutex::new(vec![]);
let attestations = state
.get_beacon_committees_at_slot(attestation_slot)
.expect("should get committees")
.iter()
@ -869,6 +911,15 @@ where
if !attesting_validators.contains(validator_index) {
return None;
}
let mut attesters = attesters.lock();
if let Some(limit) = limit {
if attesters.len() >= limit {
return None;
}
}
attesters.push(*validator_index);
let mut attestation = self
.produce_unaggregated_attestation_for_block(
attestation_slot,
@ -909,9 +960,19 @@ where
Some((attestation, subnet_id))
})
.collect()
.collect::<Vec<_>>()
})
.collect()
.collect::<Vec<_>>();
let attesters = attesters.into_inner();
if let Some(limit) = limit {
assert_eq!(
limit,
attesters.len(),
"failed to generate `limit` attestations"
);
}
(attestations, attesters)
}
/// A list of sync messages for the given state.
@ -1004,13 +1065,38 @@ where
block_hash: SignedBeaconBlockHash,
slot: Slot,
) -> HarnessAttestations<E> {
let unaggregated_attestations = self.make_unaggregated_attestations(
self.make_attestations_with_limit(
attesting_validators,
state,
state_root,
block_hash,
slot,
);
None,
)
.0
}
/// Produce exactly `limit` attestations.
///
/// Return attestations and vec of validator indices that attested.
pub fn make_attestations_with_limit(
&self,
attesting_validators: &[usize],
state: &BeaconState<E>,
state_root: Hash256,
block_hash: SignedBeaconBlockHash,
slot: Slot,
limit: Option<usize>,
) -> (HarnessAttestations<E>, Vec<usize>) {
let (unaggregated_attestations, attesters) = self
.make_unaggregated_attestations_with_limit(
attesting_validators,
state,
state_root,
block_hash,
slot,
limit,
);
let fork = self.spec.fork_at_epoch(slot.epoch(E::slots_per_epoch()));
let aggregated_attestations: Vec<Option<SignedAggregateAndProof<E>>> =
@ -1029,7 +1115,7 @@ where
.committee
.iter()
.find(|&validator_index| {
if !attesting_validators.contains(validator_index) {
if !attesters.contains(validator_index) {
return false;
}
@ -1080,10 +1166,13 @@ where
})
.collect();
unaggregated_attestations
.into_iter()
.zip(aggregated_attestations)
.collect()
(
unaggregated_attestations
.into_iter()
.zip(aggregated_attestations)
.collect(),
attesters,
)
}
pub fn make_sync_contributions(
@ -1736,6 +1825,12 @@ where
self.chain.slot_clock.advance_slot();
}
/// Advance the clock to `lookahead` before the start of `slot`.
pub fn advance_to_slot_lookahead(&self, slot: Slot, lookahead: Duration) {
let time = self.chain.slot_clock.start_of(slot).unwrap() - lookahead;
self.chain.slot_clock.set_current_time(time);
}
/// Deprecated: Use make_block() instead
///
/// Returns a newly created block, signed by the proposer for the given slot.

View File

@ -4,7 +4,7 @@
use crate::metrics;
use parking_lot::RwLock;
use slog::{crit, debug, error, info, warn, Logger};
use slog::{crit, debug, info, Logger};
use slot_clock::SlotClock;
use state_processing::per_epoch_processing::{
errors::EpochProcessingError, EpochProcessingSummary,
@ -580,7 +580,7 @@ impl<T: EthSpec> ValidatorMonitor<T> {
);
}
if !attestation_miss.is_empty() {
error!(
info!(
self.log,
"Previous epoch attestation(s) missing";
"epoch" => prev_epoch,
@ -589,7 +589,7 @@ impl<T: EthSpec> ValidatorMonitor<T> {
}
if !head_miss.is_empty() {
warn!(
info!(
self.log,
"Previous epoch attestation(s) failed to match head";
"epoch" => prev_epoch,
@ -598,7 +598,7 @@ impl<T: EthSpec> ValidatorMonitor<T> {
}
if !target_miss.is_empty() {
warn!(
info!(
self.log,
"Previous epoch attestation(s) failed to match target";
"epoch" => prev_epoch,
@ -607,7 +607,7 @@ impl<T: EthSpec> ValidatorMonitor<T> {
}
if !suboptimal_inclusion.is_empty() {
warn!(
info!(
self.log,
"Previous epoch attestation(s) had sub-optimal inclusion delay";
"epoch" => prev_epoch,
@ -629,6 +629,14 @@ impl<T: EthSpec> ValidatorMonitor<T> {
self.validators.len()
}
// Return the `id`'s of all monitored validators.
pub fn get_all_monitored_validators(&self) -> Vec<String> {
self.validators
.iter()
.map(|(_, val)| val.id.clone())
.collect()
}
/// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`.
/// Otherwise, do nothing.
pub fn auto_register_local_validator(&mut self, validator_index: u64) {

View File

@ -53,6 +53,7 @@ async fn merge_with_terminal_block_hash_override() {
let harness = BeaconChainHarness::builder(E::default())
.spec(spec)
.logger(logging::test_logger())
.deterministic_keypairs(VALIDATOR_COUNT)
.fresh_ephemeral_store()
.mock_execution_layer()
@ -109,6 +110,7 @@ async fn base_altair_merge_with_terminal_block_after_fork() {
let harness = BeaconChainHarness::builder(E::default())
.spec(spec)
.logger(logging::test_logger())
.deterministic_keypairs(VALIDATOR_COUNT)
.fresh_ephemeral_store()
.mock_execution_layer()

View File

@ -7,8 +7,9 @@ use beacon_chain::otb_verification_service::{
use beacon_chain::{
canonical_head::{CachedHead, CanonicalHead},
test_utils::{BeaconChainHarness, EphemeralHarnessType},
BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer, StateSkipConfig,
WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer,
OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped,
INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON,
};
use execution_layer::{
@ -19,6 +20,7 @@ use execution_layer::{
use fork_choice::{
CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus,
};
use logging::test_logger;
use proto_array::{Error as ProtoArrayError, ExecutionStatus};
use slot_clock::SlotClock;
use std::collections::HashMap;
@ -59,6 +61,7 @@ impl InvalidPayloadRig {
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.spec(spec)
.logger(test_logger())
.deterministic_keypairs(VALIDATOR_COUNT)
.mock_execution_layer()
.fresh_ephemeral_store()
@ -386,7 +389,7 @@ impl InvalidPayloadRig {
.fork_choice_write_lock()
.get_head(self.harness.chain.slot().unwrap(), &self.harness.chain.spec)
{
Err(ForkChoiceError::ProtoArrayError(e)) if e.contains(s) => (),
Err(ForkChoiceError::ProtoArrayStringError(e)) if e.contains(s) => (),
other => panic!("expected {} error, got {:?}", s, other),
};
}
@ -981,6 +984,10 @@ async fn payload_preparation() {
)
.await;
rig.harness.advance_to_slot_lookahead(
next_slot,
rig.harness.chain.config.prepare_payload_lookahead,
);
rig.harness
.chain
.prepare_beacon_proposer(rig.harness.chain.slot().unwrap())
@ -1059,7 +1066,7 @@ async fn invalid_parent() {
&rig.harness.chain.spec,
CountUnrealized::True,
),
Err(ForkChoiceError::ProtoArrayError(message))
Err(ForkChoiceError::ProtoArrayStringError(message))
if message.contains(&format!(
"{:?}",
ProtoArrayError::ParentExecutionStatusIsInvalid {
@ -1126,7 +1133,11 @@ async fn payload_preparation_before_transition_block() {
.get_forkchoice_update_parameters();
rig.harness
.chain
.update_execution_engine_forkchoice(current_slot, forkchoice_update_params)
.update_execution_engine_forkchoice(
current_slot,
forkchoice_update_params,
OverrideForkchoiceUpdate::Yes,
)
.await
.unwrap();

View File

@ -775,7 +775,11 @@ where
runtime_context.executor.spawn(
async move {
let result = inner_chain
.update_execution_engine_forkchoice(current_slot, params)
.update_execution_engine_forkchoice(
current_slot,
params,
Default::default(),
)
.await;
// No need to exit early if setting the head fails. It will be set again if/when the

View File

@ -1,10 +1,11 @@
use crate::engines::ForkchoiceState;
pub use ethers_core::types::Transaction;
use ethers_core::utils::rlp::{Decodable, Rlp};
use ethers_core::utils::rlp::{self, Decodable, Rlp};
use http::deposit_methods::RpcError;
pub use json_structures::TransitionConfigurationV1;
pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1};
use reqwest::StatusCode;
use serde::{Deserialize, Serialize};
use std::convert::TryFrom;
use strum::IntoStaticStr;
use superstruct::superstruct;
pub use types::{
@ -46,6 +47,7 @@ pub enum Error {
RequiredMethodUnsupported(&'static str),
UnsupportedForkVariant(String),
BadConversion(String),
RlpDecoderError(rlp::DecoderError),
}
impl From<reqwest::Error> for Error {
@ -79,6 +81,12 @@ impl From<builder_client::Error> for Error {
}
}
impl From<rlp::DecoderError> for Error {
fn from(e: rlp::DecoderError) -> Self {
Error::RlpDecoderError(e)
}
}
#[derive(Clone, Copy, Debug, PartialEq, IntoStaticStr)]
#[strum(serialize_all = "snake_case")]
pub enum PayloadStatusV1Status {
@ -159,12 +167,14 @@ pub struct ExecutionBlockWithTransactions<T: EthSpec> {
pub transactions: Vec<Transaction>,
#[cfg(feature = "withdrawals")]
#[superstruct(only(Capella, Eip4844))]
pub withdrawals: Vec<Withdrawal>,
pub withdrawals: Vec<JsonWithdrawal>,
}
impl<T: EthSpec> From<ExecutionPayload<T>> for ExecutionBlockWithTransactions<T> {
fn from(payload: ExecutionPayload<T>) -> Self {
match payload {
impl<T: EthSpec> TryFrom<ExecutionPayload<T>> for ExecutionBlockWithTransactions<T> {
type Error = Error;
fn try_from(payload: ExecutionPayload<T>) -> Result<Self, Error> {
let json_payload = match payload {
ExecutionPayload::Merge(block) => Self::Merge(ExecutionBlockWithTransactionsMerge {
parent_hash: block.parent_hash,
fee_recipient: block.fee_recipient,
@ -183,8 +193,7 @@ impl<T: EthSpec> From<ExecutionPayload<T>> for ExecutionBlockWithTransactions<T>
.transactions
.iter()
.map(|tx| Transaction::decode(&Rlp::new(tx)))
.collect::<Result<Vec<_>, _>>()
.unwrap_or_else(|_| Vec::new()),
.collect::<Result<Vec<_>, _>>()?,
}),
ExecutionPayload::Capella(block) => {
Self::Capella(ExecutionBlockWithTransactionsCapella {
@ -205,10 +214,12 @@ impl<T: EthSpec> From<ExecutionPayload<T>> for ExecutionBlockWithTransactions<T>
.transactions
.iter()
.map(|tx| Transaction::decode(&Rlp::new(tx)))
.collect::<Result<Vec<_>, _>>()
.unwrap_or_else(|_| Vec::new()),
.collect::<Result<Vec<_>, _>>()?,
#[cfg(feature = "withdrawals")]
withdrawals: block.withdrawals.into(),
withdrawals: Vec::from(block.withdrawals)
.into_iter()
.map(|withdrawal| withdrawal.into())
.collect(),
})
}
ExecutionPayload::Eip4844(block) => {
@ -231,13 +242,16 @@ impl<T: EthSpec> From<ExecutionPayload<T>> for ExecutionBlockWithTransactions<T>
.transactions
.iter()
.map(|tx| Transaction::decode(&Rlp::new(tx)))
.collect::<Result<Vec<_>, _>>()
.unwrap_or_else(|_| Vec::new()),
.collect::<Result<Vec<_>, _>>()?,
#[cfg(feature = "withdrawals")]
withdrawals: block.withdrawals.into(),
withdrawals: Vec::from(block.withdrawals)
.into_iter()
.map(|withdrawal| withdrawal.into())
.collect(),
})
}
}
};
Ok(json_payload)
}
}

View File

@ -666,14 +666,41 @@ impl HttpJsonRpc {
pub async fn get_block_by_hash_with_txns<T: EthSpec>(
&self,
block_hash: ExecutionBlockHash,
fork: ForkName,
) -> Result<Option<ExecutionBlockWithTransactions<T>>, Error> {
let params = json!([block_hash, true]);
self.rpc_request(
ETH_GET_BLOCK_BY_HASH,
params,
ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier,
)
.await
Ok(Some(match fork {
ForkName::Merge => ExecutionBlockWithTransactions::Merge(
self.rpc_request(
ETH_GET_BLOCK_BY_HASH,
params,
ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier,
)
.await?,
),
ForkName::Capella => ExecutionBlockWithTransactions::Capella(
self.rpc_request(
ETH_GET_BLOCK_BY_HASH,
params,
ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier,
)
.await?,
),
ForkName::Eip4844 => ExecutionBlockWithTransactions::Eip4844(
self.rpc_request(
ETH_GET_BLOCK_BY_HASH,
params,
ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier,
)
.await?,
),
ForkName::Base | ForkName::Altair => {
return Err(Error::UnsupportedForkVariant(format!(
"called get_block_by_hash_with_txns with fork {:?}",
fork
)))
}
}))
}
pub async fn new_payload_v1<T: EthSpec>(

View File

@ -36,7 +36,7 @@ pub struct JsonResponseBody {
pub id: serde_json::Value,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
#[serde(transparent)]
pub struct TransparentJsonPayloadId(#[serde(with = "eth2_serde_utils::bytes_8_hex")] pub PayloadId);
@ -174,7 +174,7 @@ impl<T: EthSpec> JsonExecutionPayload<T> {
.collect::<Vec<_>>()
.into()
})
.ok_or(Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadCapella".to_string()))?
.ok_or_else(|| Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadCapella".to_string()))?
})),
ForkName::Eip4844 => Ok(ExecutionPayload::Eip4844(ExecutionPayloadEip4844 {
parent_hash: v2.parent_hash,
@ -189,7 +189,7 @@ impl<T: EthSpec> JsonExecutionPayload<T> {
timestamp: v2.timestamp,
extra_data: v2.extra_data,
base_fee_per_gas: v2.base_fee_per_gas,
excess_data_gas: v2.excess_data_gas.ok_or(Error::BadConversion("Null `excess_data_gas` field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))?,
excess_data_gas: v2.excess_data_gas.ok_or_else(|| Error::BadConversion("Null `excess_data_gas` field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))?,
block_hash: v2.block_hash,
transactions: v2.transactions,
#[cfg(feature = "withdrawals")]
@ -202,7 +202,7 @@ impl<T: EthSpec> JsonExecutionPayload<T> {
.collect::<Vec<_>>()
.into()
})
.ok_or(Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))?
.ok_or_else(|| Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))?
})),
_ => Err(Error::UnsupportedForkVariant(format!("Unsupported conversion from JsonExecutionPayloadV2 for {}", fork_name))),
}
@ -322,7 +322,7 @@ impl<T: EthSpec> TryFrom<ExecutionPayload<T>> for JsonExecutionPayloadV2<T> {
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct JsonWithdrawal {
#[serde(with = "eth2_serde_utils::u64_hex_be")]
@ -360,13 +360,13 @@ impl From<JsonWithdrawal> for Withdrawal {
#[superstruct(
variants(V1, V2),
variant_attributes(
derive(Clone, Debug, PartialEq, Serialize, Deserialize),
derive(Debug, Clone, PartialEq, Serialize, Deserialize),
serde(rename_all = "camelCase")
),
cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"),
partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant")
)]
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(untagged)]
pub struct JsonPayloadAttributes {
#[serde(with = "eth2_serde_utils::u64_hex_be")]
@ -428,7 +428,7 @@ pub struct JsonBlobsBundle<T: EthSpec> {
pub blobs: VariableList<Blob<T>, T::MaxBlobsPerBlock>,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct JsonForkchoiceStateV1 {
pub head_block_hash: ExecutionBlockHash,
@ -481,7 +481,7 @@ pub enum JsonPayloadStatusV1Status {
InvalidBlockHash,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct JsonPayloadStatusV1 {
pub status: JsonPayloadStatusV1Status,
@ -546,7 +546,7 @@ impl From<JsonPayloadStatusV1> for PayloadStatusV1 {
}
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct JsonForkchoiceUpdatedV1Response {
pub payload_status: JsonPayloadStatusV1,

View File

@ -349,7 +349,7 @@ impl Engine {
impl PayloadIdCacheKey {
fn new(head_block_hash: &ExecutionBlockHash, attributes: &PayloadAttributes) -> Self {
Self {
head_block_hash: head_block_hash.clone(),
head_block_hash: *head_block_hash,
payload_attributes: attributes.clone(),
}
}

View File

@ -584,6 +584,16 @@ impl<T: EthSpec> ExecutionLayer<T> {
.contains_key(&proposer_index)
}
/// Check if a proposer is registered as a local validator, *from a synchronous context*.
///
/// This method MUST NOT be called from an async task.
pub fn has_proposer_preparation_data_blocking(&self, proposer_index: u64) -> bool {
self.inner
.proposer_preparation_data
.blocking_lock()
.contains_key(&proposer_index)
}
/// Returns the fee-recipient address that should be used to build a block
pub async fn get_suggested_fee_recipient(&self, proposer_index: u64) -> Address {
if let Some(preparation_data_entry) =
@ -1232,12 +1242,14 @@ impl<T: EthSpec> ExecutionLayer<T> {
&[metrics::FORKCHOICE_UPDATED],
);
trace!(
debug!(
self.log(),
"Issuing engine_forkchoiceUpdated";
"finalized_block_hash" => ?finalized_block_hash,
"justified_block_hash" => ?justified_block_hash,
"head_block_hash" => ?head_block_hash,
"head_block_root" => ?head_block_root,
"current_slot" => current_slot,
);
let next_slot = current_slot + 1;
@ -1559,10 +1571,11 @@ impl<T: EthSpec> ExecutionLayer<T> {
pub async fn get_payload_by_block_hash(
&self,
hash: ExecutionBlockHash,
fork: ForkName,
) -> Result<Option<ExecutionPayload<T>>, Error> {
self.engine()
.request(|engine| async move {
self.get_payload_by_block_hash_from_engine(engine, hash)
self.get_payload_by_block_hash_from_engine(engine, hash, fork)
.await
})
.await
@ -1574,15 +1587,26 @@ impl<T: EthSpec> ExecutionLayer<T> {
&self,
engine: &Engine,
hash: ExecutionBlockHash,
fork: ForkName,
) -> Result<Option<ExecutionPayload<T>>, ApiError> {
let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH);
if hash == ExecutionBlockHash::zero() {
// FIXME: how to handle forks properly here?
return Ok(Some(ExecutionPayloadMerge::default().into()));
return match fork {
ForkName::Merge => Ok(Some(ExecutionPayloadMerge::default().into())),
ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())),
ForkName::Eip4844 => Ok(Some(ExecutionPayloadEip4844::default().into())),
ForkName::Base | ForkName::Altair => Err(ApiError::UnsupportedForkVariant(
format!("called get_payload_by_block_hash_from_engine with {}", fork),
)),
};
}
let block = if let Some(block) = engine.api.get_block_by_hash_with_txns::<T>(hash).await? {
let block = if let Some(block) = engine
.api
.get_block_by_hash_with_txns::<T>(hash, fork)
.await?
{
block
} else {
return Ok(None);
@ -1591,7 +1615,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
let transactions = VariableList::new(
block
.transactions()
.into_iter()
.iter()
.map(|transaction| VariableList::new(transaction.rlp().to_vec()))
.collect::<Result<_, _>>()
.map_err(ApiError::DeserializeTransaction)?,
@ -1619,8 +1643,14 @@ impl<T: EthSpec> ExecutionLayer<T> {
}
ExecutionBlockWithTransactions::Capella(capella_block) => {
#[cfg(feature = "withdrawals")]
let withdrawals = VariableList::new(capella_block.withdrawals.clone())
.map_err(ApiError::DeserializeWithdrawals)?;
let withdrawals = VariableList::new(
capella_block
.withdrawals
.into_iter()
.map(|w| w.into())
.collect(),
)
.map_err(ApiError::DeserializeWithdrawals)?;
ExecutionPayload::Capella(ExecutionPayloadCapella {
parent_hash: capella_block.parent_hash,
@ -1643,8 +1673,14 @@ impl<T: EthSpec> ExecutionLayer<T> {
}
ExecutionBlockWithTransactions::Eip4844(eip4844_block) => {
#[cfg(feature = "withdrawals")]
let withdrawals = VariableList::new(eip4844_block.withdrawals.clone())
.map_err(ApiError::DeserializeWithdrawals)?;
let withdrawals = VariableList::new(
eip4844_block
.withdrawals
.into_iter()
.map(|w| w.into())
.collect(),
)
.map_err(ApiError::DeserializeWithdrawals)?;
ExecutionPayload::Eip4844(ExecutionPayloadEip4844 {
parent_hash: eip4844_block.parent_hash,
@ -1714,7 +1750,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
&metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME,
&[metrics::FAILURE],
);
crit!(
error!(
self.log(),
"Builder failed to reveal payload";
"info" => "this relay failure may cause a missed proposal",
@ -1920,6 +1956,20 @@ mod test {
.await;
}
#[tokio::test]
async fn test_forked_terminal_block() {
let runtime = TestRuntime::default();
let (mock, block_hash) = MockExecutionLayer::default_params(runtime.task_executor.clone())
.move_to_terminal_block()
.produce_forked_pow_block();
assert!(mock
.el
.is_valid_terminal_pow_block_hash(block_hash, &mock.spec)
.await
.unwrap()
.unwrap());
}
#[tokio::test]
async fn finds_valid_terminal_block_hash() {
let runtime = TestRuntime::default();

View File

@ -76,7 +76,7 @@ impl<T: EthSpec> Block<T> {
pub fn as_execution_block_with_tx(&self) -> Option<ExecutionBlockWithTransactions<T>> {
match self {
Block::PoS(payload) => Some(payload.clone().into()),
Block::PoS(payload) => Some(payload.clone().try_into().unwrap()),
Block::PoW(_) => None,
}
}
@ -92,13 +92,15 @@ pub struct PoWBlock {
pub timestamp: u64,
}
#[derive(Clone)]
#[derive(Debug, Clone)]
pub struct ExecutionBlockGenerator<T: EthSpec> {
/*
* Common database
*/
head_block: Option<Block<T>>,
finalized_block_hash: Option<ExecutionBlockHash>,
blocks: HashMap<ExecutionBlockHash, Block<T>>,
block_hashes: HashMap<u64, ExecutionBlockHash>,
block_hashes: HashMap<u64, Vec<ExecutionBlockHash>>,
/*
* PoW block parameters
*/
@ -120,6 +122,8 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
terminal_block_hash: ExecutionBlockHash,
) -> Self {
let mut gen = Self {
head_block: <_>::default(),
finalized_block_hash: <_>::default(),
blocks: <_>::default(),
block_hashes: <_>::default(),
terminal_total_difficulty,
@ -136,13 +140,7 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
}
pub fn latest_block(&self) -> Option<Block<T>> {
let hash = *self
.block_hashes
.iter()
.max_by_key(|(number, _)| *number)
.map(|(_, hash)| hash)?;
self.block_by_hash(hash)
self.head_block.clone()
}
pub fn latest_execution_block(&self) -> Option<ExecutionBlock> {
@ -151,8 +149,18 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
}
pub fn block_by_number(&self, number: u64) -> Option<Block<T>> {
let hash = *self.block_hashes.get(&number)?;
self.block_by_hash(hash)
// Get the latest canonical head block
let mut latest_block = self.latest_block()?;
loop {
let block_number = latest_block.block_number();
if block_number < number {
return None;
}
if block_number == number {
return Some(latest_block);
}
latest_block = self.block_by_hash(latest_block.parent_hash())?;
}
}
pub fn execution_block_by_number(&self, number: u64) -> Option<ExecutionBlock> {
@ -213,10 +221,16 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
}
pub fn insert_pow_block(&mut self, block_number: u64) -> Result<(), String> {
if let Some(finalized_block_hash) = self.finalized_block_hash {
return Err(format!(
"terminal block {} has been finalized. PoW chain has stopped building",
finalized_block_hash
));
}
let parent_hash = if block_number == 0 {
ExecutionBlockHash::zero()
} else if let Some(hash) = self.block_hashes.get(&(block_number - 1)) {
*hash
} else if let Some(block) = self.block_by_number(block_number - 1) {
block.block_hash()
} else {
return Err(format!(
"parent with block number {} not found",
@ -231,42 +245,102 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
parent_hash,
)?;
self.insert_block(Block::PoW(block))
}
// Insert block into block tree
self.insert_block(Block::PoW(block))?;
pub fn insert_block(&mut self, block: Block<T>) -> Result<(), String> {
if self.blocks.contains_key(&block.block_hash()) {
return Err(format!("{:?} is already known", block.block_hash()));
} else if self.block_hashes.contains_key(&block.block_number()) {
return Err(format!(
"block {} is already known, forking is not supported",
block.block_number()
));
} else if block.block_number() != 0 && !self.blocks.contains_key(&block.parent_hash()) {
return Err(format!("parent block {:?} is unknown", block.parent_hash()));
// Set head
if let Some(head_total_difficulty) =
self.head_block.as_ref().and_then(|b| b.total_difficulty())
{
if block.total_difficulty >= head_total_difficulty {
self.head_block = Some(Block::PoW(block));
}
} else {
self.head_block = Some(Block::PoW(block));
}
self.insert_block_without_checks(block)
}
pub fn insert_block_without_checks(&mut self, block: Block<T>) -> Result<(), String> {
self.block_hashes
.insert(block.block_number(), block.block_hash());
self.blocks.insert(block.block_hash(), block);
Ok(())
}
/// Insert a PoW block given the parent hash.
///
/// Returns `Ok(hash)` of the inserted block.
/// Returns an error if the `parent_hash` does not exist in the block tree or
/// if the parent block is the terminal block.
pub fn insert_pow_block_by_hash(
&mut self,
parent_hash: ExecutionBlockHash,
unique_id: u64,
) -> Result<ExecutionBlockHash, String> {
let parent_block = self.block_by_hash(parent_hash).ok_or_else(|| {
format!(
"Block corresponding to parent hash does not exist: {}",
parent_hash
)
})?;
let mut block = generate_pow_block(
self.terminal_total_difficulty,
self.terminal_block_number,
parent_block.block_number() + 1,
parent_hash,
)?;
// Hack the block hash to make this block distinct from any other block with a different
// `unique_id` (the default is 0).
block.block_hash = ExecutionBlockHash::from_root(Hash256::from_low_u64_be(unique_id));
block.block_hash = ExecutionBlockHash::from_root(block.tree_hash_root());
let hash = self.insert_block(Block::PoW(block))?;
// Set head
if let Some(head_total_difficulty) =
self.head_block.as_ref().and_then(|b| b.total_difficulty())
{
if block.total_difficulty >= head_total_difficulty {
self.head_block = Some(Block::PoW(block));
}
} else {
self.head_block = Some(Block::PoW(block));
}
Ok(hash)
}
pub fn insert_block(&mut self, block: Block<T>) -> Result<ExecutionBlockHash, String> {
if self.blocks.contains_key(&block.block_hash()) {
return Err(format!("{:?} is already known", block.block_hash()));
} else if block.parent_hash() != ExecutionBlockHash::zero()
&& !self.blocks.contains_key(&block.parent_hash())
{
return Err(format!("parent block {:?} is unknown", block.parent_hash()));
}
Ok(self.insert_block_without_checks(block))
}
pub fn insert_block_without_checks(&mut self, block: Block<T>) -> ExecutionBlockHash {
let block_hash = block.block_hash();
self.block_hashes
.entry(block.block_number())
.or_insert_with(Vec::new)
.push(block_hash);
self.blocks.insert(block_hash, block);
block_hash
}
pub fn modify_last_block(&mut self, block_modifier: impl FnOnce(&mut Block<T>)) {
if let Some((last_block_hash, block_number)) =
self.block_hashes.keys().max().and_then(|block_number| {
self.block_hashes
.get(block_number)
.map(|block| (block, *block_number))
if let Some(last_block_hash) = self
.block_hashes
.iter_mut()
.max_by_key(|(block_number, _)| *block_number)
.and_then(|(_, block_hashes)| {
// Remove block hash, we will re-insert with the new block hash after modifying it.
block_hashes.pop()
})
{
let mut block = self.blocks.remove(last_block_hash).unwrap();
let mut block = self.blocks.remove(&last_block_hash).unwrap();
block_modifier(&mut block);
// Update the block hash after modifying the block
match &mut block {
Block::PoW(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()),
@ -274,8 +348,17 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
*b.block_hash_mut() = ExecutionBlockHash::from_root(b.tree_hash_root())
}
}
self.block_hashes.insert(block_number, block.block_hash());
self.blocks.insert(block.block_hash(), block);
// Update head.
if self
.head_block
.as_ref()
.map_or(true, |head| head.block_hash() == last_block_hash)
{
self.head_block = Some(block.clone());
}
self.insert_block_without_checks(block);
}
}
@ -415,6 +498,17 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
}
};
self.head_block = Some(
self.blocks
.get(&forkchoice_state.head_block_hash)
.unwrap()
.clone(),
);
if forkchoice_state.finalized_block_hash != ExecutionBlockHash::zero() {
self.finalized_block_hash = Some(forkchoice_state.finalized_block_hash);
}
Ok(JsonForkchoiceUpdatedV1Response {
payload_status: JsonPayloadStatusV1 {
status: JsonPayloadStatusV1Status::Valid,

View File

@ -75,10 +75,28 @@ pub async fn handle_rpc<T: EthSpec>(
}
}
ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 => {
let request: JsonExecutionPayload<T> = get_param(params, 0)?;
let request = match method {
ENGINE_NEW_PAYLOAD_V1 => {
JsonExecutionPayload::V1(get_param::<JsonExecutionPayloadV1<T>>(params, 0)?)
}
ENGINE_NEW_PAYLOAD_V2 => {
JsonExecutionPayload::V2(get_param::<JsonExecutionPayloadV2<T>>(params, 0)?)
}
_ => unreachable!(),
};
let fork = match request {
JsonExecutionPayload::V1(_) => ForkName::Merge,
JsonExecutionPayload::V2(ref payload) => {
if payload.withdrawals.is_none() {
ForkName::Merge
} else {
ForkName::Capella
}
}
};
// Canned responses set by block hash take priority.
if let Some(status) = ctx.get_new_payload_status(&request.block_hash()) {
if let Some(status) = ctx.get_new_payload_status(request.block_hash()) {
return Ok(serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap());
}
@ -97,8 +115,7 @@ pub async fn handle_rpc<T: EthSpec>(
Some(
ctx.execution_block_generator
.write()
// FIXME: should this worry about other forks?
.new_payload(request.try_into_execution_payload(ForkName::Merge).unwrap()),
.new_payload(request.try_into_execution_payload(fork).unwrap()),
)
} else {
None
@ -120,10 +137,19 @@ pub async fn handle_rpc<T: EthSpec>(
Ok(serde_json::to_value(JsonExecutionPayloadV1::try_from(response).unwrap()).unwrap())
}
ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 => {
// FIXME(capella): handle fcu version 2
ENGINE_FORKCHOICE_UPDATED_V1 => {
let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0)?;
let payload_attributes: Option<JsonPayloadAttributes> = get_param(params, 1)?;
if let Some(hook_response) = ctx
.hook
.lock()
.on_forkchoice_updated(forkchoice_state.clone(), payload_attributes.clone())
{
return Ok(serde_json::to_value(hook_response).unwrap());
}
let head_block_hash = forkchoice_state.head_block_hash;
// Canned responses set by block hash take priority.
@ -153,19 +179,6 @@ pub async fn handle_rpc<T: EthSpec>(
Ok(serde_json::to_value(response).unwrap())
}
ENGINE_GET_PAYLOAD_V2 => {
let request: JsonPayloadIdRequest = get_param(params, 0)?;
let id = request.into();
let response = ctx
.execution_block_generator
.write()
.get_payload(&id)
.ok_or_else(|| format!("no payload for id {:?}", id))?;
Ok(serde_json::to_value(JsonExecutionPayloadV2::try_from(response).unwrap()).unwrap())
}
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1 => {
let block_generator = ctx.execution_block_generator.read();
let transition_config: TransitionConfigurationV1 = TransitionConfigurationV1 {

View File

@ -0,0 +1,27 @@
use crate::json_structures::*;
type ForkChoiceUpdatedHook = dyn Fn(
JsonForkchoiceStateV1,
Option<JsonPayloadAttributes>,
) -> Option<JsonForkchoiceUpdatedV1Response>
+ Send
+ Sync;
#[derive(Default)]
pub struct Hook {
forkchoice_updated: Option<Box<ForkChoiceUpdatedHook>>,
}
impl Hook {
pub fn on_forkchoice_updated(
&self,
state: JsonForkchoiceStateV1,
payload_attributes: Option<JsonPayloadAttributes>,
) -> Option<JsonForkchoiceUpdatedV1Response> {
(self.forkchoice_updated.as_ref()?)(state, payload_attributes)
}
pub fn set_forkchoice_updated_hook(&mut self, f: Box<ForkChoiceUpdatedHook>) {
self.forkchoice_updated = Some(f);
}
}

View File

@ -244,6 +244,21 @@ impl<T: EthSpec> MockExecutionLayer<T> {
self
}
pub fn produce_forked_pow_block(self) -> (Self, ExecutionBlockHash) {
let head_block = self
.server
.execution_block_generator()
.latest_block()
.unwrap();
let block_hash = self
.server
.execution_block_generator()
.insert_pow_block_by_hash(head_block.parent_hash(), 1)
.unwrap();
(self, block_hash)
}
pub async fn with_terminal_block<'a, U, V>(self, func: U) -> Self
where
U: Fn(ChainSpec, ExecutionLayer<T>, Option<ExecutionBlock>) -> V,

View File

@ -23,6 +23,7 @@ use types::{EthSpec, ExecutionBlockHash, Uint256};
use warp::{http::StatusCode, Filter, Rejection};
pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator};
pub use hook::Hook;
pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder};
pub use mock_execution_layer::MockExecutionLayer;
@ -33,6 +34,7 @@ pub const DEFAULT_BUILDER_THRESHOLD_WEI: u128 = 1_000_000_000_000_000_000;
mod execution_block_generator;
mod handle_rpc;
mod hook;
mod mock_builder;
mod mock_execution_layer;
@ -99,6 +101,7 @@ impl<T: EthSpec> MockServer<T> {
static_new_payload_response: <_>::default(),
static_forkchoice_updated_response: <_>::default(),
static_get_block_by_hash_response: <_>::default(),
hook: <_>::default(),
new_payload_statuses: <_>::default(),
fcu_payload_statuses: <_>::default(),
_phantom: PhantomData,
@ -359,8 +362,7 @@ impl<T: EthSpec> MockServer<T> {
.write()
// The EF tests supply blocks out of order, so we must import them "without checks" and
// trust they form valid chains.
.insert_block_without_checks(block)
.unwrap()
.insert_block_without_checks(block);
}
pub fn get_block(&self, block_hash: ExecutionBlockHash) -> Option<Block<T>> {
@ -441,6 +443,7 @@ pub struct Context<T: EthSpec> {
pub static_new_payload_response: Arc<Mutex<Option<StaticNewPayloadResponse>>>,
pub static_forkchoice_updated_response: Arc<Mutex<Option<PayloadStatusV1>>>,
pub static_get_block_by_hash_response: Arc<Mutex<Option<Option<ExecutionBlock>>>>,
pub hook: Arc<Mutex<Hook>>,
// Canned responses by block hash.
//

View File

@ -1670,36 +1670,62 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end())
.and(warp::body::json())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.and_then(
|chain: Arc<BeaconChain<T>>,
address_change: SignedBlsToExecutionChange,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| {
address_changes: Vec<SignedBlsToExecutionChange>,
#[allow(unused)] network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
blocking_json_task(move || {
let outcome = chain
.verify_bls_to_execution_change_for_gossip(address_change)
.map_err(|e| {
warp_utils::reject::object_invalid(format!(
"gossip verification failed: {:?}",
e
))
})?;
let mut failures = vec![];
if let ObservationOutcome::New(address_change) = outcome {
#[cfg(feature = "withdrawals-processing")]
{
publish_pubsub_message(
&network_tx,
PubsubMessage::BlsToExecutionChange(Box::new(
address_change.as_inner().clone(),
)),
)?;
for (index, address_change) in address_changes.into_iter().enumerate() {
let validator_index = address_change.message.validator_index;
match chain.verify_bls_to_execution_change_for_gossip(address_change) {
Ok(ObservationOutcome::New(verified_address_change)) => {
#[cfg(feature = "withdrawals-processing")]
{
publish_pubsub_message(
&network_tx,
PubsubMessage::BlsToExecutionChange(Box::new(
verified_address_change.as_inner().clone(),
)),
)?;
}
chain.import_bls_to_execution_change(verified_address_change);
}
Ok(ObservationOutcome::AlreadyKnown) => {
debug!(
log,
"BLS to execution change already known";
"validator_index" => validator_index,
);
}
Err(e) => {
error!(
log,
"Invalid BLS to execution change";
"validator_index" => validator_index,
"source" => "HTTP API",
);
failures.push(api_types::Failure::new(
index,
format!("invalid: {e:?}"),
));
}
}
drop(network_tx);
chain.import_bls_to_execution_change(address_change);
}
Ok(())
if failures.is_empty() {
Ok(())
} else {
Err(warp_utils::reject::indexed_bad_request(
"some BLS to execution changes failed to verify".into(),
failures,
))
}
})
},
);
@ -2951,6 +2977,22 @@ pub fn serve<T: BeaconChainTypes>(
})
});
// POST lighthouse/ui/validator_metrics
let post_lighthouse_ui_validator_metrics = warp::path("lighthouse")
.and(warp::path("ui"))
.and(warp::path("validator_metrics"))
.and(warp::path::end())
.and(warp::body::json())
.and(chain_filter.clone())
.and_then(
|request_data: ui::ValidatorMetricsRequestData, chain: Arc<BeaconChain<T>>| {
blocking_json_task(move || {
ui::post_validator_monitor_metrics(request_data, chain)
.map(api_types::GenericResponse::from)
})
},
);
// GET lighthouse/syncing
let get_lighthouse_syncing = warp::path("lighthouse")
.and(warp::path("syncing"))
@ -3402,6 +3444,7 @@ pub fn serve<T: BeaconChainTypes>(
.or(get_validator_sync_committee_contribution.boxed())
.or(get_lighthouse_health.boxed())
.or(get_lighthouse_ui_health.boxed())
.or(get_lighthouse_ui_validator_count.boxed())
.or(get_lighthouse_syncing.boxed())
.or(get_lighthouse_nat.boxed())
.or(get_lighthouse_peers.boxed())
@ -3419,7 +3462,6 @@ pub fn serve<T: BeaconChainTypes>(
.or(get_lighthouse_attestation_performance.boxed())
.or(get_lighthouse_block_packing_efficiency.boxed())
.or(get_lighthouse_merge_readiness.boxed())
.or(get_lighthouse_ui_validator_count.boxed())
.or(get_events.boxed()),
)
.boxed()
@ -3444,7 +3486,8 @@ pub fn serve<T: BeaconChainTypes>(
.or(post_lighthouse_liveness.boxed())
.or(post_lighthouse_database_reconstruct.boxed())
.or(post_lighthouse_database_historical_blocks.boxed())
.or(post_lighthouse_block_rewards.boxed()),
.or(post_lighthouse_block_rewards.boxed())
.or(post_lighthouse_ui_validator_metrics.boxed()),
))
.recover(warp_utils::reject::handle_rejection)
.with(slog_logging(log.clone()))

View File

@ -4,7 +4,7 @@ use beacon_chain::NotifyExecutionLayer;
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized};
use lighthouse_network::PubsubMessage;
use network::NetworkMessage;
use slog::{crit, error, info, warn, Logger};
use slog::{error, info, warn, Logger};
use slot_clock::SlotClock;
use std::sync::Arc;
use tokio::sync::mpsc::UnboundedSender;
@ -89,10 +89,10 @@ pub async fn publish_block<T: BeaconChainTypes>(
//
// Check to see the thresholds are non-zero to avoid logging errors with small
// slot times (e.g., during testing)
let crit_threshold = chain.slot_clock.unagg_attestation_production_delay();
let error_threshold = crit_threshold / 2;
if delay >= crit_threshold {
crit!(
let too_late_threshold = chain.slot_clock.unagg_attestation_production_delay();
let delayed_threshold = too_late_threshold / 2;
if delay >= too_late_threshold {
error!(
log,
"Block was broadcast too late";
"msg" => "system may be overloaded, block likely to be orphaned",
@ -100,7 +100,7 @@ pub async fn publish_block<T: BeaconChainTypes>(
"slot" => block.slot(),
"root" => ?root,
)
} else if delay >= error_threshold {
} else if delay >= delayed_threshold {
error!(
log,
"Block broadcast was delayed";

View File

@ -1,10 +1,11 @@
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
use beacon_chain::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes};
use eth2::types::ValidatorStatus;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use warp_utils::reject::beacon_chain_error;
#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)]
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
pub struct ValidatorCountResponse {
pub active_ongoing: u64,
pub active_exiting: u64,
@ -69,3 +70,126 @@ pub fn get_validator_count<T: BeaconChainTypes>(
exited_slashed,
})
}
#[derive(PartialEq, Serialize, Deserialize)]
pub struct ValidatorMetricsRequestData {
indices: Vec<u64>,
}
#[derive(PartialEq, Serialize, Deserialize)]
pub struct ValidatorMetrics {
attestation_hits: u64,
attestation_misses: u64,
attestation_hit_percentage: f64,
attestation_head_hits: u64,
attestation_head_misses: u64,
attestation_head_hit_percentage: f64,
attestation_target_hits: u64,
attestation_target_misses: u64,
attestation_target_hit_percentage: f64,
}
#[derive(PartialEq, Serialize, Deserialize)]
pub struct ValidatorMetricsResponse {
validators: HashMap<String, ValidatorMetrics>,
}
pub fn post_validator_monitor_metrics<T: BeaconChainTypes>(
request_data: ValidatorMetricsRequestData,
chain: Arc<BeaconChain<T>>,
) -> Result<ValidatorMetricsResponse, warp::Rejection> {
let validator_ids = chain
.validator_monitor
.read()
.get_all_monitored_validators()
.iter()
.cloned()
.collect::<HashSet<String>>();
let indices = request_data
.indices
.iter()
.map(|index| index.to_string())
.collect::<HashSet<String>>();
let ids = validator_ids
.intersection(&indices)
.collect::<HashSet<&String>>();
let mut validators = HashMap::new();
for id in ids {
let attestation_hits = metrics::get_int_counter(
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_HIT,
&[id],
)
.map(|counter| counter.get())
.unwrap_or(0);
let attestation_misses = metrics::get_int_counter(
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_MISS,
&[id],
)
.map(|counter| counter.get())
.unwrap_or(0);
let attestations = attestation_hits + attestation_misses;
let attestation_hit_percentage: f64 = if attestations == 0 {
0.0
} else {
(100 * attestation_hits / attestations) as f64
};
let attestation_head_hits = metrics::get_int_counter(
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_HIT,
&[id],
)
.map(|counter| counter.get())
.unwrap_or(0);
let attestation_head_misses = metrics::get_int_counter(
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_MISS,
&[id],
)
.map(|counter| counter.get())
.unwrap_or(0);
let head_attestations = attestation_head_hits + attestation_head_misses;
let attestation_head_hit_percentage: f64 = if head_attestations == 0 {
0.0
} else {
(100 * attestation_head_hits / head_attestations) as f64
};
let attestation_target_hits = metrics::get_int_counter(
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_HIT,
&[id],
)
.map(|counter| counter.get())
.unwrap_or(0);
let attestation_target_misses = metrics::get_int_counter(
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_MISS,
&[id],
)
.map(|counter| counter.get())
.unwrap_or(0);
let target_attestations = attestation_target_hits + attestation_target_misses;
let attestation_target_hit_percentage: f64 = if target_attestations == 0 {
0.0
} else {
(100 * attestation_target_hits / target_attestations) as f64
};
let metrics = ValidatorMetrics {
attestation_hits,
attestation_misses,
attestation_hit_percentage,
attestation_head_hits,
attestation_head_misses,
attestation_head_hit_percentage,
attestation_target_hits,
attestation_target_misses,
attestation_target_hit_percentage,
};
validators.insert(id.clone(), metrics);
}
Ok(ValidatorMetricsResponse { validators })
}

View File

@ -1,5 +1,5 @@
use beacon_chain::{
test_utils::{BeaconChainHarness, EphemeralHarnessType},
test_utils::{BeaconChainHarness, BoxedMutator, EphemeralHarnessType},
BeaconChain, BeaconChainTypes,
};
use directory::DEFAULT_ROOT_DIR;
@ -12,6 +12,7 @@ use lighthouse_network::{
types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState},
ConnectedPoint, Enr, NetworkGlobals, PeerId, PeerManager,
};
use logging::test_logger;
use network::{NetworkReceivers, NetworkSenders};
use sensitive_url::SensitiveUrl;
use slog::Logger;
@ -19,6 +20,7 @@ use std::future::Future;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::Arc;
use std::time::Duration;
use store::MemoryStore;
use tokio::sync::oneshot;
use types::{ChainSpec, EthSpec};
@ -47,13 +49,30 @@ pub struct ApiServer<E: EthSpec, SFut: Future<Output = ()>> {
pub external_peer_id: PeerId,
}
type Mutator<E> = BoxedMutator<E, MemoryStore<E>, MemoryStore<E>>;
impl<E: EthSpec> InteractiveTester<E> {
pub async fn new(spec: Option<ChainSpec>, validator_count: usize) -> Self {
let harness = BeaconChainHarness::builder(E::default())
Self::new_with_mutator(spec, validator_count, None).await
}
pub async fn new_with_mutator(
spec: Option<ChainSpec>,
validator_count: usize,
mutator: Option<Mutator<E>>,
) -> Self {
let mut harness_builder = BeaconChainHarness::builder(E::default())
.spec_or_default(spec)
.deterministic_keypairs(validator_count)
.fresh_ephemeral_store()
.build();
.logger(test_logger())
.mock_execution_layer()
.fresh_ephemeral_store();
if let Some(mutator) = mutator {
harness_builder = harness_builder.initial_mutator(mutator);
}
let harness = harness_builder.build();
let ApiServer {
server,

View File

@ -1,9 +1,22 @@
//! Generic tests that make use of the (newer) `InteractiveApiTester`
use crate::common::*;
use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy};
use beacon_chain::{
chain_config::ReOrgThreshold,
test_utils::{AttestationStrategy, BlockStrategy},
};
use eth2::types::DepositContractData;
use execution_layer::{ForkChoiceState, PayloadAttributes};
use parking_lot::Mutex;
use slot_clock::SlotClock;
use state_processing::state_advance::complete_state_advance;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use tree_hash::TreeHash;
use types::{EthSpec, FullPayload, MainnetEthSpec, Slot};
use types::{
Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload,
MainnetEthSpec, ProposerPreparationData, Slot,
};
type E = MainnetEthSpec;
@ -33,6 +46,495 @@ async fn deposit_contract_custom_network() {
assert_eq!(result, expected);
}
/// Data structure for tracking fork choice updates received by the mock execution layer.
#[derive(Debug, Default)]
struct ForkChoiceUpdates {
updates: HashMap<ExecutionBlockHash, Vec<ForkChoiceUpdateMetadata>>,
}
#[derive(Debug, Clone)]
struct ForkChoiceUpdateMetadata {
received_at: Duration,
state: ForkChoiceState,
payload_attributes: Option<PayloadAttributes>,
}
impl ForkChoiceUpdates {
fn insert(&mut self, update: ForkChoiceUpdateMetadata) {
self.updates
.entry(update.state.head_block_hash)
.or_insert_with(Vec::new)
.push(update);
}
fn contains_update_for(&self, block_hash: ExecutionBlockHash) -> bool {
self.updates.contains_key(&block_hash)
}
/// Find the first fork choice update for `head_block_hash` with payload attributes for a
/// block proposal at `proposal_timestamp`.
fn first_update_with_payload_attributes(
&self,
head_block_hash: ExecutionBlockHash,
proposal_timestamp: u64,
) -> Option<ForkChoiceUpdateMetadata> {
self.updates
.get(&head_block_hash)?
.iter()
.find(|update| {
update
.payload_attributes
.as_ref()
.map_or(false, |payload_attributes| {
payload_attributes.timestamp == proposal_timestamp
})
})
.cloned()
}
}
pub struct ReOrgTest {
head_slot: Slot,
/// Number of slots between parent block and canonical head.
parent_distance: u64,
/// Number of slots between head block and block proposal slot.
head_distance: u64,
re_org_threshold: u64,
max_epochs_since_finalization: u64,
percent_parent_votes: usize,
percent_empty_votes: usize,
percent_head_votes: usize,
should_re_org: bool,
misprediction: bool,
}
impl Default for ReOrgTest {
/// Default config represents a regular easy re-org.
fn default() -> Self {
Self {
head_slot: Slot::new(30),
parent_distance: 1,
head_distance: 1,
re_org_threshold: 20,
max_epochs_since_finalization: 2,
percent_parent_votes: 100,
percent_empty_votes: 100,
percent_head_votes: 0,
should_re_org: true,
misprediction: false,
}
}
}
// Test that the beacon node will try to perform proposer boost re-orgs on late blocks when
// configured.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn proposer_boost_re_org_zero_weight() {
proposer_boost_re_org_test(ReOrgTest::default()).await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn proposer_boost_re_org_epoch_boundary() {
proposer_boost_re_org_test(ReOrgTest {
head_slot: Slot::new(31),
should_re_org: false,
..Default::default()
})
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn proposer_boost_re_org_slot_after_epoch_boundary() {
proposer_boost_re_org_test(ReOrgTest {
head_slot: Slot::new(33),
..Default::default()
})
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn proposer_boost_re_org_bad_ffg() {
proposer_boost_re_org_test(ReOrgTest {
head_slot: Slot::new(64 + 22),
should_re_org: false,
..Default::default()
})
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn proposer_boost_re_org_no_finality() {
proposer_boost_re_org_test(ReOrgTest {
head_slot: Slot::new(96),
percent_parent_votes: 100,
percent_empty_votes: 0,
percent_head_votes: 100,
should_re_org: false,
..Default::default()
})
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn proposer_boost_re_org_finality() {
proposer_boost_re_org_test(ReOrgTest {
head_slot: Slot::new(129),
..Default::default()
})
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn proposer_boost_re_org_parent_distance() {
proposer_boost_re_org_test(ReOrgTest {
head_slot: Slot::new(30),
parent_distance: 2,
should_re_org: false,
..Default::default()
})
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn proposer_boost_re_org_head_distance() {
proposer_boost_re_org_test(ReOrgTest {
head_slot: Slot::new(29),
head_distance: 2,
should_re_org: false,
..Default::default()
})
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn proposer_boost_re_org_very_unhealthy() {
proposer_boost_re_org_test(ReOrgTest {
head_slot: Slot::new(31),
parent_distance: 2,
head_distance: 2,
percent_parent_votes: 10,
percent_empty_votes: 10,
percent_head_votes: 10,
should_re_org: false,
..Default::default()
})
.await;
}
/// The head block is late but still receives 30% of the committee vote, leading to a misprediction.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn proposer_boost_re_org_weight_misprediction() {
proposer_boost_re_org_test(ReOrgTest {
head_slot: Slot::new(30),
percent_empty_votes: 70,
percent_head_votes: 30,
should_re_org: false,
misprediction: true,
..Default::default()
})
.await;
}
/// Run a proposer boost re-org test.
///
/// - `head_slot`: the slot of the canonical head to be reorged
/// - `reorg_threshold`: committee percentage value for reorging
/// - `num_empty_votes`: percentage of comm of attestations for the parent block
/// - `num_head_votes`: number of attestations for the head block
/// - `should_re_org`: whether the proposer should build on the parent rather than the head
pub async fn proposer_boost_re_org_test(
ReOrgTest {
head_slot,
parent_distance,
head_distance,
re_org_threshold,
max_epochs_since_finalization,
percent_parent_votes,
percent_empty_votes,
percent_head_votes,
should_re_org,
misprediction,
}: ReOrgTest,
) {
assert!(head_slot > 0);
// We require a network with execution enabled so we can check EL message timings.
let mut spec = ForkName::Merge.make_genesis_spec(E::default_spec());
spec.terminal_total_difficulty = 1.into();
// Ensure there are enough validators to have `attesters_per_slot`.
let attesters_per_slot = 10;
let validator_count = E::slots_per_epoch() as usize * attesters_per_slot;
let all_validators = (0..validator_count).collect::<Vec<usize>>();
let num_initial = head_slot.as_u64().checked_sub(parent_distance + 1).unwrap();
// Check that the required vote percentages can be satisfied exactly using `attesters_per_slot`.
assert_eq!(100 % attesters_per_slot, 0);
let percent_per_attester = 100 / attesters_per_slot;
assert_eq!(percent_parent_votes % percent_per_attester, 0);
assert_eq!(percent_empty_votes % percent_per_attester, 0);
assert_eq!(percent_head_votes % percent_per_attester, 0);
let num_parent_votes = Some(attesters_per_slot * percent_parent_votes / 100);
let num_empty_votes = Some(attesters_per_slot * percent_empty_votes / 100);
let num_head_votes = Some(attesters_per_slot * percent_head_votes / 100);
let tester = InteractiveTester::<E>::new_with_mutator(
Some(spec),
validator_count,
Some(Box::new(move |builder| {
builder
.proposer_re_org_threshold(Some(ReOrgThreshold(re_org_threshold)))
.proposer_re_org_max_epochs_since_finalization(Epoch::new(
max_epochs_since_finalization,
))
})),
)
.await;
let harness = &tester.harness;
let mock_el = harness.mock_execution_layer.as_ref().unwrap();
let execution_ctx = mock_el.server.ctx.clone();
let slot_clock = &harness.chain.slot_clock;
// Move to terminal block.
mock_el.server.all_payloads_valid();
execution_ctx
.execution_block_generator
.write()
.move_to_terminal_block()
.unwrap();
// Send proposer preparation data for all validators.
let proposer_preparation_data = all_validators
.iter()
.map(|i| ProposerPreparationData {
validator_index: *i as u64,
fee_recipient: Address::from_low_u64_be(*i as u64),
})
.collect::<Vec<_>>();
harness
.chain
.execution_layer
.as_ref()
.unwrap()
.update_proposer_preparation(
head_slot.epoch(E::slots_per_epoch()) + 1,
&proposer_preparation_data,
)
.await;
// Create some chain depth.
harness.advance_slot();
harness
.extend_chain(
num_initial as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
// Start collecting fork choice updates.
let forkchoice_updates = Arc::new(Mutex::new(ForkChoiceUpdates::default()));
let forkchoice_updates_inner = forkchoice_updates.clone();
let chain_inner = harness.chain.clone();
execution_ctx
.hook
.lock()
.set_forkchoice_updated_hook(Box::new(move |state, payload_attributes| {
let received_at = chain_inner.slot_clock.now_duration().unwrap();
let state = ForkChoiceState::from(state);
let payload_attributes = payload_attributes.map(Into::into);
let update = ForkChoiceUpdateMetadata {
received_at,
state,
payload_attributes,
};
forkchoice_updates_inner.lock().insert(update);
None
}));
// We set up the following block graph, where B is a block that arrives late and is re-orged
// by C.
//
// A | B | - |
// ^ | - | C |
let slot_a = Slot::new(num_initial + 1);
let slot_b = slot_a + parent_distance;
let slot_c = slot_b + head_distance;
harness.advance_slot();
let (block_a_root, block_a, state_a) = harness
.add_block_at_slot(slot_a, harness.get_current_state())
.await
.unwrap();
// Attest to block A during slot A.
let (block_a_parent_votes, _) = harness.make_attestations_with_limit(
&all_validators,
&state_a,
state_a.canonical_root(),
block_a_root,
slot_a,
num_parent_votes,
);
harness.process_attestations(block_a_parent_votes);
// Attest to block A during slot B.
for _ in 0..parent_distance {
harness.advance_slot();
}
let (block_a_empty_votes, block_a_attesters) = harness.make_attestations_with_limit(
&all_validators,
&state_a,
state_a.canonical_root(),
block_a_root,
slot_b,
num_empty_votes,
);
harness.process_attestations(block_a_empty_votes);
let remaining_attesters = all_validators
.iter()
.copied()
.filter(|index| !block_a_attesters.contains(index))
.collect::<Vec<_>>();
// Produce block B and process it halfway through the slot.
let (block_b, mut state_b) = harness.make_block(state_a.clone(), slot_b).await;
let block_b_root = block_b.canonical_root();
let obs_time = slot_clock.start_of(slot_b).unwrap() + slot_clock.slot_duration() / 2;
slot_clock.set_current_time(obs_time);
harness.chain.block_times_cache.write().set_time_observed(
block_b_root,
slot_b,
obs_time,
None,
None,
);
harness.process_block_result(block_b.clone()).await.unwrap();
// Add attestations to block B.
let (block_b_head_votes, _) = harness.make_attestations_with_limit(
&remaining_attesters,
&state_b,
state_b.canonical_root(),
block_b_root.into(),
slot_b,
num_head_votes,
);
harness.process_attestations(block_b_head_votes);
let payload_lookahead = harness.chain.config.prepare_payload_lookahead;
let fork_choice_lookahead = Duration::from_millis(500);
while harness.get_current_slot() != slot_c {
let current_slot = harness.get_current_slot();
let next_slot = current_slot + 1;
// Simulate the scheduled call to prepare proposers at 8 seconds into the slot.
harness.advance_to_slot_lookahead(next_slot, payload_lookahead);
harness
.chain
.prepare_beacon_proposer(current_slot)
.await
.unwrap();
// Simulate the scheduled call to fork choice + prepare proposers 500ms before the
// next slot.
harness.advance_to_slot_lookahead(next_slot, fork_choice_lookahead);
harness.chain.recompute_head_at_slot(next_slot).await;
harness
.chain
.prepare_beacon_proposer(current_slot)
.await
.unwrap();
harness.advance_slot();
harness.chain.per_slot_task().await;
}
// Produce block C.
// Advance state_b so we can get the proposer.
complete_state_advance(&mut state_b, None, slot_c, &harness.chain.spec).unwrap();
let proposer_index = state_b
.get_beacon_proposer_index(slot_c, &harness.chain.spec)
.unwrap();
let randao_reveal = harness
.sign_randao_reveal(&state_b, proposer_index, slot_c)
.into();
let unsigned_block_c = tester
.client
.get_validator_blocks(slot_c, &randao_reveal, None)
.await
.unwrap()
.data;
let block_c = harness.sign_beacon_block(unsigned_block_c, &state_b);
if should_re_org {
// Block C should build on A.
assert_eq!(block_c.parent_root(), block_a_root.into());
} else {
// Block C should build on B.
assert_eq!(block_c.parent_root(), block_b_root);
}
// Applying block C should cause it to become head regardless (re-org or continuation).
let block_root_c = harness
.process_block_result(block_c.clone())
.await
.unwrap()
.into();
assert_eq!(harness.head_block_root(), block_root_c);
// Check the fork choice updates that were sent.
let forkchoice_updates = forkchoice_updates.lock();
let block_a_exec_hash = block_a.message().execution_payload().unwrap().block_hash();
let block_b_exec_hash = block_b.message().execution_payload().unwrap().block_hash();
let block_c_timestamp = block_c.message().execution_payload().unwrap().timestamp();
// If we re-orged then no fork choice update for B should have been sent.
assert_eq!(
should_re_org,
!forkchoice_updates.contains_update_for(block_b_exec_hash),
"{block_b_exec_hash:?}"
);
// Check the timing of the first fork choice update with payload attributes for block C.
let c_parent_hash = if should_re_org {
block_a_exec_hash
} else {
block_b_exec_hash
};
let first_update = forkchoice_updates
.first_update_with_payload_attributes(c_parent_hash, block_c_timestamp)
.unwrap();
let payload_attribs = first_update.payload_attributes.as_ref().unwrap();
let lookahead = slot_clock
.start_of(slot_c)
.unwrap()
.checked_sub(first_update.received_at)
.unwrap();
if !misprediction {
assert_eq!(
lookahead, payload_lookahead,
"lookahead={lookahead:?}, timestamp={}, prev_randao={:?}",
payload_attribs.timestamp, payload_attribs.prev_randao,
);
} else {
// On a misprediction we issue the first fcU 500ms before creating a block!
assert_eq!(
lookahead, fork_choice_lookahead,
"timestamp={}, prev_randao={:?}",
payload_attribs.timestamp, payload_attribs.prev_randao,
);
}
}
// Test that running fork choice before proposing results in selection of the correct head.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn fork_choice_before_proposal() {

View File

@ -38,6 +38,10 @@ pub struct GossipCache {
sync_committee_message: Option<Duration>,
/// Timeout for signed BLS to execution changes.
bls_to_execution_change: Option<Duration>,
/// Timeout for light client finality updates.
light_client_finality_update: Option<Duration>,
/// Timeout for light client optimistic updates.
light_client_optimistic_update: Option<Duration>,
}
#[derive(Default)]
@ -63,6 +67,10 @@ pub struct GossipCacheBuilder {
sync_committee_message: Option<Duration>,
/// Timeout for signed BLS to execution changes.
bls_to_execution_change: Option<Duration>,
/// Timeout for light client finality updates.
light_client_finality_update: Option<Duration>,
/// Timeout for light client optimistic updates.
light_client_optimistic_update: Option<Duration>,
}
#[allow(dead_code)]
@ -127,6 +135,18 @@ impl GossipCacheBuilder {
self
}
/// Timeout for light client finality update messages.
pub fn light_client_finality_update_timeout(mut self, timeout: Duration) -> Self {
self.light_client_finality_update = Some(timeout);
self
}
/// Timeout for light client optimistic update messages.
pub fn light_client_optimistic_update_timeout(mut self, timeout: Duration) -> Self {
self.light_client_optimistic_update = Some(timeout);
self
}
pub fn build(self) -> GossipCache {
let GossipCacheBuilder {
default_timeout,
@ -140,6 +160,8 @@ impl GossipCacheBuilder {
signed_contribution_and_proof,
sync_committee_message,
bls_to_execution_change,
light_client_finality_update,
light_client_optimistic_update,
} = self;
GossipCache {
expirations: DelayQueue::default(),
@ -154,6 +176,8 @@ impl GossipCacheBuilder {
signed_contribution_and_proof: signed_contribution_and_proof.or(default_timeout),
sync_committee_message: sync_committee_message.or(default_timeout),
bls_to_execution_change: bls_to_execution_change.or(default_timeout),
light_client_finality_update: light_client_finality_update.or(default_timeout),
light_client_optimistic_update: light_client_optimistic_update.or(default_timeout),
}
}
}
@ -178,6 +202,8 @@ impl GossipCache {
GossipKind::SignedContributionAndProof => self.signed_contribution_and_proof,
GossipKind::SyncCommitteeMessage(_) => self.sync_committee_message,
GossipKind::BlsToExecutionChange => self.bls_to_execution_change,
GossipKind::LightClientFinalityUpdate => self.light_client_finality_update,
GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update,
};
let expire_timeout = match expire_timeout {
Some(expire_timeout) => expire_timeout,

View File

@ -254,6 +254,8 @@ pub(crate) fn create_whitelist_filter(
add(AttesterSlashing);
add(SignedContributionAndProof);
add(BlsToExecutionChange);
add(LightClientFinalityUpdate);
add(LightClientOptimisticUpdate);
add(BeaconBlocksAndBlobsSidecar);
for id in 0..attestation_subnet_count {
add(Attestation(SubnetId::new(id)));

View File

@ -16,4 +16,7 @@ pub use globals::NetworkGlobals;
pub use pubsub::{PubsubMessage, SnappyTransform};
pub use subnet::{Subnet, SubnetDiscovery};
pub use sync_state::{BackFillState, SyncState};
pub use topics::{subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS};
pub use topics::{
subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS,
LIGHT_CLIENT_GOSSIP_TOPICS,
};

View File

@ -12,11 +12,13 @@ use std::io::{Error, ErrorKind};
use std::sync::Arc;
use tree_hash_derive::TreeHash;
use types::{
Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ForkContext, ForkName, ProposerSlashing,
Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ForkContext, ForkName,
SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair,
SignedBeaconBlockAndBlobsSidecar, SignedBeaconBlockBase, SignedBeaconBlockCapella,
SignedBeaconBlockEip4844, SignedBeaconBlockMerge, SignedBlsToExecutionChange,
SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId,
SignedBeaconBlockEip4844, SignedBeaconBlockMerge,
LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing,
SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId,
SyncCommitteeMessage, SyncSubnetId,
};
#[derive(Debug, Clone, PartialEq)]
@ -41,6 +43,10 @@ pub enum PubsubMessage<T: EthSpec> {
SyncCommitteeMessage(Box<(SyncSubnetId, SyncCommitteeMessage)>),
/// Gossipsub message for BLS to execution change messages.
BlsToExecutionChange(Box<SignedBlsToExecutionChange>),
/// Gossipsub message providing notification of a light client finality update.
LightClientFinalityUpdate(Box<LightClientFinalityUpdate<T>>),
/// Gossipsub message providing notification of a light client optimistic update.
LightClientOptimisticUpdate(Box<LightClientOptimisticUpdate<T>>),
}
// Implements the `DataTransform` trait of gossipsub to employ snappy compression
@ -127,6 +133,10 @@ impl<T: EthSpec> PubsubMessage<T> {
PubsubMessage::SignedContributionAndProof(_) => GossipKind::SignedContributionAndProof,
PubsubMessage::SyncCommitteeMessage(data) => GossipKind::SyncCommitteeMessage(data.0),
PubsubMessage::BlsToExecutionChange(_) => GossipKind::BlsToExecutionChange,
PubsubMessage::LightClientFinalityUpdate(_) => GossipKind::LightClientFinalityUpdate,
PubsubMessage::LightClientOptimisticUpdate(_) => {
GossipKind::LightClientOptimisticUpdate
}
}
}
@ -214,12 +224,10 @@ impl<T: EthSpec> PubsubMessage<T> {
| ForkName::Merge
| ForkName::Capella,
)
| None => {
return Err(format!(
| None => Err(format!(
"beacon_blobs_and_sidecar topic invalid for given fork digest {:?}",
gossip_topic.fork_digest
))
}
)),
}
}
GossipKind::VoluntaryExit => {
@ -260,6 +268,22 @@ impl<T: EthSpec> PubsubMessage<T> {
bls_to_execution_change,
)))
}
GossipKind::LightClientFinalityUpdate => {
let light_client_finality_update =
LightClientFinalityUpdate::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?;
Ok(PubsubMessage::LightClientFinalityUpdate(Box::new(
light_client_finality_update,
)))
}
GossipKind::LightClientOptimisticUpdate => {
let light_client_optimistic_update =
LightClientOptimisticUpdate::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?;
Ok(PubsubMessage::LightClientOptimisticUpdate(Box::new(
light_client_optimistic_update,
)))
}
}
}
}
@ -283,6 +307,8 @@ impl<T: EthSpec> PubsubMessage<T> {
PubsubMessage::SignedContributionAndProof(data) => data.as_ssz_bytes(),
PubsubMessage::SyncCommitteeMessage(data) => data.1.as_ssz_bytes(),
PubsubMessage::BlsToExecutionChange(data) => data.as_ssz_bytes(),
PubsubMessage::LightClientFinalityUpdate(data) => data.as_ssz_bytes(),
PubsubMessage::LightClientOptimisticUpdate(data) => data.as_ssz_bytes(),
}
}
}
@ -330,6 +356,12 @@ impl<T: EthSpec> std::fmt::Display for PubsubMessage<T> {
data.message.validator_index, data.message.to_execution_address
)
}
PubsubMessage::LightClientFinalityUpdate(_data) => {
write!(f, "Light CLient Finality Update")
}
PubsubMessage::LightClientOptimisticUpdate(_data) => {
write!(f, "Light CLient Optimistic Update")
}
}
}
}

View File

@ -20,6 +20,8 @@ pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing";
pub const SIGNED_CONTRIBUTION_AND_PROOF_TOPIC: &str = "sync_committee_contribution_and_proof";
pub const SYNC_COMMITTEE_PREFIX_TOPIC: &str = "sync_committee_";
pub const BLS_TO_EXECUTION_CHANGE_TOPIC: &str = "bls_to_execution_change";
pub const LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update";
pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update";
pub const CORE_TOPICS: [GossipKind; 8] = [
GossipKind::BeaconBlock,
@ -32,6 +34,11 @@ pub const CORE_TOPICS: [GossipKind; 8] = [
GossipKind::BlsToExecutionChange,
];
pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [
GossipKind::LightClientFinalityUpdate,
GossipKind::LightClientOptimisticUpdate,
];
/// A gossipsub topic which encapsulates the type of messages that should be sent and received over
/// the pubsub protocol and the way the messages should be encoded.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
@ -71,6 +78,10 @@ pub enum GossipKind {
SyncCommitteeMessage(SyncSubnetId),
/// Topic for validator messages which change their withdrawal address.
BlsToExecutionChange,
/// Topic for publishing finality updates for light clients.
LightClientFinalityUpdate,
/// Topic for publishing optimistic updates for light clients.
LightClientOptimisticUpdate,
}
impl std::fmt::Display for GossipKind {
@ -146,6 +157,8 @@ impl GossipTopic {
PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing,
ATTESTER_SLASHING_TOPIC => GossipKind::AttesterSlashing,
BLS_TO_EXECUTION_CHANGE_TOPIC => GossipKind::BlsToExecutionChange,
LIGHT_CLIENT_FINALITY_UPDATE => GossipKind::LightClientFinalityUpdate,
LIGHT_CLIENT_OPTIMISTIC_UPDATE => GossipKind::LightClientOptimisticUpdate,
topic => match committee_topic_index(topic) {
Some(subnet) => match subnet {
Subnet::Attestation(s) => GossipKind::Attestation(s),
@ -206,6 +219,8 @@ impl std::fmt::Display for GossipTopic {
format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index)
}
GossipKind::BlsToExecutionChange => BLS_TO_EXECUTION_CHANGE_TOPIC.into(),
GossipKind::LightClientFinalityUpdate => LIGHT_CLIENT_FINALITY_UPDATE.into(),
GossipKind::LightClientOptimisticUpdate => LIGHT_CLIENT_OPTIMISTIC_UPDATE.into(),
};
write!(
f,

View File

@ -64,7 +64,8 @@ use task_executor::TaskExecutor;
use tokio::sync::mpsc;
use types::signed_block_and_blobs::BlockWrapper;
use types::{
Attestation, AttesterSlashing, Hash256, ProposerSlashing, SignedAggregateAndProof,
Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate,
ProposerSlashing, SignedAggregateAndProof,
SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, SignedBlsToExecutionChange,
SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId,
};
@ -134,6 +135,14 @@ const MAX_GOSSIP_PROPOSER_SLASHING_QUEUE_LEN: usize = 4_096;
/// before we start dropping them.
const MAX_GOSSIP_ATTESTER_SLASHING_QUEUE_LEN: usize = 4_096;
/// The maximum number of queued `LightClientFinalityUpdate` objects received on gossip that will be stored
/// before we start dropping them.
const MAX_GOSSIP_FINALITY_UPDATE_QUEUE_LEN: usize = 1_024;
/// The maximum number of queued `LightClientOptimisticUpdate` objects received on gossip that will be stored
/// before we start dropping them.
const MAX_GOSSIP_OPTIMISTIC_UPDATE_QUEUE_LEN: usize = 1_024;
/// The maximum number of queued `SyncCommitteeMessage` objects that will be stored before we start dropping
/// them.
const MAX_SYNC_MESSAGE_QUEUE_LEN: usize = 2048;
@ -211,6 +220,8 @@ pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing";
pub const GOSSIP_ATTESTER_SLASHING: &str = "gossip_attester_slashing";
pub const GOSSIP_SYNC_SIGNATURE: &str = "gossip_sync_signature";
pub const GOSSIP_SYNC_CONTRIBUTION: &str = "gossip_sync_contribution";
pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update";
pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update";
pub const RPC_BLOCK: &str = "rpc_block";
pub const CHAIN_SEGMENT: &str = "chain_segment";
pub const STATUS_PROCESSING: &str = "status_processing";
@ -515,6 +526,42 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
}
}
/// Create a new `Work` event for some light client finality update.
pub fn gossip_light_client_finality_update(
message_id: MessageId,
peer_id: PeerId,
light_client_finality_update: Box<LightClientFinalityUpdate<T::EthSpec>>,
seen_timestamp: Duration,
) -> Self {
Self {
drop_during_sync: true,
work: Work::GossipLightClientFinalityUpdate {
message_id,
peer_id,
light_client_finality_update,
seen_timestamp,
},
}
}
/// Create a new `Work` event for some light client optimistic update.
pub fn gossip_light_client_optimistic_update(
message_id: MessageId,
peer_id: PeerId,
light_client_optimistic_update: Box<LightClientOptimisticUpdate<T::EthSpec>>,
seen_timestamp: Duration,
) -> Self {
Self {
drop_during_sync: true,
work: Work::GossipLightClientOptimisticUpdate {
message_id,
peer_id,
light_client_optimistic_update,
seen_timestamp,
},
}
}
/// Create a new `Work` event for some attester slashing.
pub fn gossip_attester_slashing(
message_id: MessageId,
@ -822,6 +869,18 @@ pub enum Work<T: BeaconChainTypes> {
sync_contribution: Box<SignedContributionAndProof<T::EthSpec>>,
seen_timestamp: Duration,
},
GossipLightClientFinalityUpdate {
message_id: MessageId,
peer_id: PeerId,
light_client_finality_update: Box<LightClientFinalityUpdate<T::EthSpec>>,
seen_timestamp: Duration,
},
GossipLightClientOptimisticUpdate {
message_id: MessageId,
peer_id: PeerId,
light_client_optimistic_update: Box<LightClientOptimisticUpdate<T::EthSpec>>,
seen_timestamp: Duration,
},
RpcBlock {
block_root: Hash256,
block: BlockWrapper<T::EthSpec>,
@ -885,6 +944,8 @@ impl<T: BeaconChainTypes> Work<T> {
Work::GossipAttesterSlashing { .. } => GOSSIP_ATTESTER_SLASHING,
Work::GossipSyncSignature { .. } => GOSSIP_SYNC_SIGNATURE,
Work::GossipSyncContribution { .. } => GOSSIP_SYNC_CONTRIBUTION,
Work::GossipLightClientFinalityUpdate { .. } => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE,
Work::GossipLightClientOptimisticUpdate { .. } => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE,
Work::RpcBlock { .. } => RPC_BLOCK,
Work::ChainSegment { .. } => CHAIN_SEGMENT,
Work::Status { .. } => STATUS_PROCESSING,
@ -1027,6 +1088,10 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
let mut gossip_attester_slashing_queue =
FifoQueue::new(MAX_GOSSIP_ATTESTER_SLASHING_QUEUE_LEN);
// Using a FIFO queue for light client updates to maintain sequence order.
let mut finality_update_queue = FifoQueue::new(MAX_GOSSIP_FINALITY_UPDATE_QUEUE_LEN);
let mut optimistic_update_queue = FifoQueue::new(MAX_GOSSIP_OPTIMISTIC_UPDATE_QUEUE_LEN);
// Using a FIFO queue since blocks need to be imported sequentially.
let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN);
let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN);
@ -1383,6 +1448,12 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
Work::GossipSyncContribution { .. } => {
sync_contribution_queue.push(work)
}
Work::GossipLightClientFinalityUpdate { .. } => {
finality_update_queue.push(work, work_id, &self.log)
}
Work::GossipLightClientOptimisticUpdate { .. } => {
optimistic_update_queue.push(work, work_id, &self.log)
}
Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log),
Work::ChainSegment { ref process_id, .. } => match process_id {
ChainSegmentProcessId::RangeBatchId { .. }
@ -1719,7 +1790,7 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
)
}),
/*
* Syn contribution verification.
* Sync contribution verification.
*/
Work::GossipSyncContribution {
message_id,
@ -1748,6 +1819,38 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
*bls_to_execution_change,
)
}),
/*
* Light client finality update verification.
*/
Work::GossipLightClientFinalityUpdate {
message_id,
peer_id,
light_client_finality_update,
seen_timestamp,
} => task_spawner.spawn_blocking(move || {
worker.process_gossip_finality_update(
message_id,
peer_id,
*light_client_finality_update,
seen_timestamp,
)
}),
/*
* Light client optimistic update verification.
*/
Work::GossipLightClientOptimisticUpdate {
message_id,
peer_id,
light_client_optimistic_update,
seen_timestamp,
} => task_spawner.spawn_blocking(move || {
worker.process_gossip_optimistic_update(
message_id,
peer_id,
*light_client_optimistic_update,
seen_timestamp,
)
}),
/*
* Verification for beacon blocks received during syncing via RPC.
*/

View File

@ -3,6 +3,8 @@ use crate::{metrics, service::NetworkMessage, sync::SyncMessage};
use beacon_chain::store::Error;
use beacon_chain::{
attestation_verification::{self, Error as AttnError, VerifiedAttestation},
light_client_finality_update_verification::Error as LightClientFinalityUpdateError,
light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError,
observed_operations::ObservationOutcome,
sync_committee_verification::{self, Error as SyncCommitteeError},
validator_monitor::get_block_delay_ms,
@ -19,9 +21,11 @@ use store::hot_cold_store::HotColdDBError;
use tokio::sync::mpsc;
use types::signed_block_and_blobs::BlockWrapper;
use types::{
ightClientFinalityUpdate,
LightClientOptimisticUpdate,
SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId,
Attestation, AttesterSlashing, BlobsSidecar, EthSpec, Hash256, IndexedAttestation,
ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar,
SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId,
SyncCommitteeMessage, SyncSubnetId,
};
@ -1376,6 +1380,138 @@ impl<T: BeaconChainTypes> Worker<T> {
metrics::inc_counter(&metrics::BEACON_PROCESSOR_SYNC_CONTRIBUTION_IMPORTED_TOTAL);
}
pub fn process_gossip_finality_update(
self,
message_id: MessageId,
peer_id: PeerId,
light_client_finality_update: LightClientFinalityUpdate<T::EthSpec>,
seen_timestamp: Duration,
) {
match self
.chain
.verify_finality_update_for_gossip(light_client_finality_update, seen_timestamp)
{
Ok(_verified_light_client_finality_update) => {
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept);
}
Err(e) => {
metrics::register_finality_update_error(&e);
match e {
LightClientFinalityUpdateError::InvalidLightClientFinalityUpdate => {
debug!(
self.log,
"LC invalid finality update";
"peer" => %peer_id,
"error" => ?e,
);
self.gossip_penalize_peer(
peer_id,
PeerAction::LowToleranceError,
"light_client_gossip_error",
);
}
LightClientFinalityUpdateError::TooEarly => {
debug!(
self.log,
"LC finality update too early";
"peer" => %peer_id,
"error" => ?e,
);
self.gossip_penalize_peer(
peer_id,
PeerAction::HighToleranceError,
"light_client_gossip_error",
);
}
LightClientFinalityUpdateError::FinalityUpdateAlreadySeen => debug!(
self.log,
"LC finality update already seen";
"peer" => %peer_id,
"error" => ?e,
),
LightClientFinalityUpdateError::BeaconChainError(_)
| LightClientFinalityUpdateError::LightClientUpdateError(_)
| LightClientFinalityUpdateError::SigSlotStartIsNone
| LightClientFinalityUpdateError::FailedConstructingUpdate => debug!(
self.log,
"LC error constructing finality update";
"peer" => %peer_id,
"error" => ?e,
),
}
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
}
};
}
pub fn process_gossip_optimistic_update(
self,
message_id: MessageId,
peer_id: PeerId,
light_client_optimistic_update: LightClientOptimisticUpdate<T::EthSpec>,
seen_timestamp: Duration,
) {
match self
.chain
.verify_optimistic_update_for_gossip(light_client_optimistic_update, seen_timestamp)
{
Ok(_verified_light_client_optimistic_update) => {
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept);
}
Err(e) => {
metrics::register_optimistic_update_error(&e);
match e {
LightClientOptimisticUpdateError::InvalidLightClientOptimisticUpdate => {
debug!(
self.log,
"LC invalid optimistic update";
"peer" => %peer_id,
"error" => ?e,
);
self.gossip_penalize_peer(
peer_id,
PeerAction::HighToleranceError,
"light_client_gossip_error",
)
}
LightClientOptimisticUpdateError::TooEarly => {
debug!(
self.log,
"LC optimistic update too early";
"peer" => %peer_id,
"error" => ?e,
);
self.gossip_penalize_peer(
peer_id,
PeerAction::HighToleranceError,
"light_client_gossip_error",
);
}
LightClientOptimisticUpdateError::OptimisticUpdateAlreadySeen => debug!(
self.log,
"LC optimistic update already seen";
"peer" => %peer_id,
"error" => ?e,
),
LightClientOptimisticUpdateError::BeaconChainError(_)
| LightClientOptimisticUpdateError::LightClientUpdateError(_)
| LightClientOptimisticUpdateError::SigSlotStartIsNone
| LightClientOptimisticUpdateError::FailedConstructingUpdate => debug!(
self.log,
"LC error constructing optimistic update";
"peer" => %peer_id,
"error" => ?e,
),
}
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
}
};
}
/// Handle an error whilst verifying an `Attestation` or `SignedAggregateAndProof` from the
/// network.
fn handle_attestation_verification_failure(

View File

@ -1,5 +1,7 @@
use beacon_chain::{
attestation_verification::Error as AttnError,
light_client_finality_update_verification::Error as LightClientFinalityUpdateError,
light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError,
sync_committee_verification::Error as SyncCommitteeError,
};
use fnv::FnvHashMap;
@ -274,6 +276,19 @@ lazy_static! {
"Gossipsub sync_committee errors per error type",
&["type"]
);
pub static ref GOSSIP_FINALITY_UPDATE_ERRORS_PER_TYPE: Result<IntCounterVec> =
try_create_int_counter_vec(
"gossipsub_light_client_finality_update_errors_per_type",
"Gossipsub light_client_finality_update errors per error type",
&["type"]
);
pub static ref GOSSIP_OPTIMISTIC_UPDATE_ERRORS_PER_TYPE: Result<IntCounterVec> =
try_create_int_counter_vec(
"gossipsub_light_client_optimistic_update_errors_per_type",
"Gossipsub light_client_optimistic_update errors per error type",
&["type"]
);
/*
* Network queue metrics
@ -380,6 +395,14 @@ pub fn update_bandwidth_metrics(bandwidth: Arc<BandwidthSinks>) {
);
}
pub fn register_finality_update_error(error: &LightClientFinalityUpdateError) {
inc_counter_vec(&GOSSIP_FINALITY_UPDATE_ERRORS_PER_TYPE, &[error.as_ref()]);
}
pub fn register_optimistic_update_error(error: &LightClientOptimisticUpdateError) {
inc_counter_vec(&GOSSIP_OPTIMISTIC_UPDATE_ERRORS_PER_TYPE, &[error.as_ref()]);
}
pub fn register_attestation_error(error: &AttnError) {
inc_counter_vec(&GOSSIP_ATTESTATION_ERRORS_PER_TYPE, &[error.as_ref()]);
}

View File

@ -314,6 +314,30 @@ impl<T: BeaconChainTypes> Router<T> {
bls_to_execution_change,
);
}
PubsubMessage::LightClientFinalityUpdate(light_client_finality_update) => {
trace!(
self.log,
"Received light client finality update";
"peer_id" => %peer_id
);
self.processor.on_light_client_finality_update_gossip(
id,
peer_id,
light_client_finality_update,
);
}
PubsubMessage::LightClientOptimisticUpdate(light_client_optimistic_update) => {
trace!(
self.log,
"Received light client optimistic update";
"peer_id" => %peer_id
);
self.processor.on_light_client_optimistic_update_gossip(
id,
peer_id,
light_client_optimistic_update,
);
}
}
}
}

View File

@ -18,10 +18,11 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH};
use store::SyncCommitteeMessage;
use tokio::sync::mpsc;
use types::{
Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ProposerSlashing,
SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar,
Attestation, AttesterSlashing, BlobsSidecar, EthSpec, LightClientFinalityUpdate,
LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock,
SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId,
SyncSubnetId,
SignedBeaconBlockAndBlobsSidecar,
};
/// Processes validated messages from the network. It relays necessary data to the syncing thread
@ -488,6 +489,34 @@ impl<T: BeaconChainTypes> Processor<T> {
))
}
pub fn on_light_client_finality_update_gossip(
&mut self,
message_id: MessageId,
peer_id: PeerId,
light_client_finality_update: Box<LightClientFinalityUpdate<T::EthSpec>>,
) {
self.send_beacon_processor_work(BeaconWorkEvent::gossip_light_client_finality_update(
message_id,
peer_id,
light_client_finality_update,
timestamp_now(),
))
}
pub fn on_light_client_optimistic_update_gossip(
&mut self,
message_id: MessageId,
peer_id: PeerId,
light_client_optimistic_update: Box<LightClientOptimisticUpdate<T::EthSpec>>,
) {
self.send_beacon_processor_work(BeaconWorkEvent::gossip_light_client_optimistic_update(
message_id,
peer_id,
light_client_optimistic_update,
timestamp_now(),
))
}
fn send_beacon_processor_work(&mut self, work: BeaconWorkEvent<T>) {
self.beacon_processor_send
.try_send(work)

View File

@ -208,6 +208,8 @@ pub struct NetworkService<T: BeaconChainTypes> {
metrics_update: tokio::time::Interval,
/// gossipsub_parameter_update timer
gossipsub_parameter_update: tokio::time::Interval,
/// enable_light_client_server indicator
enable_light_client_server: bool,
/// The logger for the network service.
fork_context: Arc<ForkContext>,
log: slog::Logger,
@ -345,6 +347,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
gossipsub_parameter_update,
fork_context,
log: network_log,
enable_light_client_server: config.enable_light_client_server,
};
network_service.spawn_service(executor);
@ -679,6 +682,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
}
return;
}
let mut subscribed_topics: Vec<GossipTopic> = vec![];
for topic_kind in lighthouse_network::types::CORE_TOPICS.iter() {
for fork_digest in self.required_gossip_fork_digests() {
@ -695,6 +699,25 @@ impl<T: BeaconChainTypes> NetworkService<T> {
}
}
if self.enable_light_client_server {
for light_client_topic_kind in
lighthouse_network::types::LIGHT_CLIENT_GOSSIP_TOPICS.iter()
{
for fork_digest in self.required_gossip_fork_digests() {
let light_client_topic = GossipTopic::new(
light_client_topic_kind.clone(),
GossipEncoding::default(),
fork_digest,
);
if self.libp2p.subscribe(light_client_topic.clone()) {
subscribed_topics.push(light_client_topic);
} else {
warn!(self.log, "Could not subscribe to topic"; "topic" => %light_client_topic);
}
}
}
}
// If we are to subscribe to all subnets we do it here
if self.subscribe_all_subnets {
for subnet_id in 0..<<T as BeaconChainTypes>::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() {

View File

@ -558,8 +558,10 @@ impl<T: EthSpec> OperationPool<T> {
)
}
// TODO: remove this whole block once withdrwals-processing is removed
#[cfg(not(feature = "withdrawals-processing"))]
{
#[allow(clippy::drop_copy)]
drop((state, spec));
vec![]
}
@ -597,8 +599,10 @@ impl<T: EthSpec> OperationPool<T> {
);
}
// TODO: remove this whole block once withdrwals-processing is removed
#[cfg(not(feature = "withdrawals-processing"))]
{
#[allow(clippy::drop_copy)]
drop((head_block, head_state, spec));
}
}

View File

@ -772,6 +772,38 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
experimental as it may obscure performance issues.")
.takes_value(false)
)
.arg(
Arg::with_name("disable-proposer-reorgs")
.long("disable-proposer-reorgs")
.help("Do not attempt to reorg late blocks from other validators when proposing.")
.takes_value(false)
)
.arg(
Arg::with_name("proposer-reorg-threshold")
.long("proposer-reorg-threshold")
.value_name("PERCENT")
.help("Percentage of vote weight below which to attempt a proposer reorg. \
Default: 20%")
.conflicts_with("disable-proposer-reorgs")
)
.arg(
Arg::with_name("proposer-reorg-epochs-since-finalization")
.long("proposer-reorg-epochs-since-finalization")
.value_name("EPOCHS")
.help("Maximum number of epochs since finalization at which proposer reorgs are \
allowed. Default: 2")
.conflicts_with("disable-proposer-reorgs")
)
.arg(
Arg::with_name("prepare-payload-lookahead")
.long("prepare-payload-lookahead")
.value_name("MILLISECONDS")
.help("The time before the start of a proposal slot at which payload attributes \
should be sent. Low values are useful for execution nodes which don't \
improve their payload after the first call, and high values are useful \
for ensuring the EL is given ample notice. Default: 1/3 of a slot.")
.takes_value(true)
)
.arg(
Arg::with_name("fork-choice-before-proposal-timeout")
.long("fork-choice-before-proposal-timeout")

View File

@ -1,3 +1,7 @@
use beacon_chain::chain_config::{
ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR,
DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD,
};
use clap::ArgMatches;
use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG;
use client::{ClientConfig, ClientGenesis};
@ -18,6 +22,7 @@ use std::net::Ipv6Addr;
use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs};
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::time::Duration;
use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes, GRAFFITI_BYTES_LEN};
use unused_port::{unused_tcp_port, unused_udp_port};
@ -680,11 +685,32 @@ pub fn get_config<E: EthSpec>(
client_config.chain.enable_lock_timeouts = false;
}
if cli_args.is_present("disable-proposer-reorgs") {
client_config.chain.re_org_threshold = None;
} else {
client_config.chain.re_org_threshold = Some(
clap_utils::parse_optional(cli_args, "proposer-reorg-threshold")?
.map(ReOrgThreshold)
.unwrap_or(DEFAULT_RE_ORG_THRESHOLD),
);
client_config.chain.re_org_max_epochs_since_finalization =
clap_utils::parse_optional(cli_args, "proposer-reorg-epochs-since-finalization")?
.unwrap_or(DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION);
}
// Note: This overrides any previous flags that enable this option.
if cli_args.is_present("disable-deposit-contract-sync") {
client_config.sync_eth1_chain = false;
}
client_config.chain.prepare_payload_lookahead =
clap_utils::parse_optional(cli_args, "prepare-payload-lookahead")?
.map(Duration::from_millis)
.unwrap_or_else(|| {
Duration::from_secs(spec.seconds_per_slot)
/ DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR
});
if let Some(timeout) =
clap_utils::parse_optional(cli_args, "fork-choice-before-proposal-timeout")?
{

View File

@ -47,6 +47,7 @@
* [Release Candidates](./advanced-release-candidates.md)
* [MEV and Lighthouse](./builders.md)
* [Merge Migration](./merge-migration.md)
* [Late Block Re-orgs](./late-block-re-orgs.md)
* [Contributing](./contributing.md)
* [Development Environment](./setup.md)
* [FAQs](./faq.md)

View File

@ -121,6 +121,33 @@ curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: ap
}
```
### `/lighthouse/ui/validator_metrics`
Re-exposes certain metrics from the validator monitor to the HTTP API.
Will only return metrics for the validators currently being monitored and are present in the POST data.
```bash
curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indices": [12345]}' -H "Content-Type: application/json" | jq
```
```json
{
"data": {
"validators": {
"12345": {
"attestation_hits": 10,
"attestation_misses": 0,
"attestation_hit_percentage": 100,
"attestation_head_hits": 10,
"attestation_head_misses": 0,
"attestation_head_hit_percentage": 100,
"attestation_target_hits": 5,
"attestation_target_misses": 5,
"attestation_target_hit_percentage": 50
}
}
}
}
```
### `/lighthouse/syncing`
```bash
@ -474,6 +501,102 @@ The endpoint will return immediately. See the beacon node logs for an indication
Manually provide `SignedBeaconBlock`s to backfill the database. This is intended
for use by Lighthouse developers during testing only.
### `/lighthouse/merge_readiness`
```bash
curl -X GET "http://localhost:5052/lighthouse/merge_readiness" | jq
```
```
{
"data":{
"type":"ready",
"config":{
"terminal_total_difficulty":"6400"
},
"current_difficulty":"4800"
}
}
```
### `/lighthouse/analysis/attestation_performance/{index}`
Fetch information about the attestation performance of a validator index or all validators for a
range of consecutive epochs.
Two query parameters are required:
* `start_epoch` (inclusive): the first epoch to compute attestation performance for.
* `end_epoch` (inclusive): the final epoch to compute attestation performance for.
Example:
```bash
curl -X GET "http://localhost:5052/lighthouse/analysis/attestation_performance/1?start_epoch=1&end_epoch=1" | jq
```
```json
[
{
"index": 1,
"epochs": {
"1": {
"active": true,
"head": true,
"target": true,
"source": true,
"delay": 1
}
}
}
]
```
Instead of specifying a validator index, you can specify the entire validator set by using `global`:
```bash
curl -X GET "http://localhost:5052/lighthouse/analysis/attestation_performance/global?start_epoch=1&end_epoch=1" | jq
```
```json
[
{
"index": 0,
"epochs": {
"1": {
"active": true,
"head": true,
"target": true,
"source": true,
"delay": 1
}
}
},
{
"index": 1,
"epochs": {
"1": {
"active": true,
"head": true,
"target": true,
"source": true,
"delay": 1
}
}
},
{
..
}
]
```
Caveats:
* For maximum efficiency the start_epoch should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`.
This is because the state _prior_ to the `start_epoch` needs to be loaded from the database,
and loading a state on a boundary is most efficient.
### `/lighthouse/analysis/block_rewards`
Fetch information about the block rewards paid to proposers for a range of consecutive blocks.
@ -486,7 +609,7 @@ Two query parameters are required:
Example:
```bash
curl "http://localhost:5052/lighthouse/analysis/block_rewards?start_slot=1&end_slot=32" | jq
curl -X GET "http://localhost:5052/lighthouse/analysis/block_rewards?start_slot=1&end_slot=32" | jq
```
```json
@ -514,21 +637,43 @@ Caveats:
[block_reward_src]:
https://github.com/sigp/lighthouse/tree/unstable/common/eth2/src/lighthouse/block_rewards.rs
### `/lighthouse/analysis/block_packing`
### `/lighthouse/merge_readiness`
Fetch information about the block packing efficiency of blocks for a range of consecutive
epochs.
Two query parameters are required:
* `start_epoch` (inclusive): the epoch of the first block to compute packing efficiency for.
* `end_epoch` (inclusive): the epoch of the last block to compute packing efficiency for.
```bash
curl -X GET "http://localhost:5052/lighthouse/merge_readiness"
curl -X GET "http://localhost:5052/lighthouse/analysis/block_packing_efficiency?start_epoch=1&end_epoch=1" | jq
```
```json
[
{
"slot": "33",
"block_hash": "0xb20970bb97c6c6de6b1e2b689d6381dd15b3d3518fbaee032229495f963bd5da",
"proposer_info": {
"validator_index": 855,
"graffiti": "poapZoJ7zWNfK7F3nWjEausWVBvKa6gA"
},
"available_attestations": 3805,
"included_attestations": 1143,
"prior_skip_slots": 1
},
{
..
}
]
```
{
"data":{
"type":"ready",
"config":{
"terminal_total_difficulty":"6400"
},
"current_difficulty":"4800"
}
}
```
Caveats:
* `start_epoch` must not be `0`.
* For maximum efficiency the `start_epoch` should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`.
This is because the state _prior_ to the `start_epoch` needs to be loaded from the database, and
loading a state on a boundary is most efficient.

View File

@ -117,6 +117,31 @@ Returns information regarding the health of the host machine.
}
```
## `GET /lighthouse/ui/graffiti`
Returns the graffiti that will be used for the next block proposal of each validator.
### HTTP Specification
| Property | Specification |
|-------------------|--------------------------------------------|
| Path | `/lighthouse/ui/graffiti` |
| Method | GET |
| Required Headers | [`Authorization`](./api-vc-auth-header.md) |
| Typical Responses | 200 |
### Example Response Body
```json
{
"data": {
"0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e": "mr f was here",
"0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b": "mr v was here",
"0x872c61b4a7f8510ec809e5b023f5fdda2105d024c470ddbbeca4bc74e8280af0d178d749853e8f6a841083ac1b4db98f": null
}
}
```
## `GET /lighthouse/spec`
Returns the Ethereum proof-of-stake consensus specification loaded for this validator.

View File

@ -6,4 +6,4 @@ RESTful HTTP/JSON APIs.
There are two APIs served by Lighthouse:
- [Beacon Node API](./api-bn.md)
- [Validator Client API](./api-vc.md) (not yet released).
- [Validator Client API](./api-vc.md)

View File

@ -200,19 +200,23 @@ for `INFO` and `WARN` messages indicating why the builder was not used.
Examples of messages indicating fallback to a locally produced block are:
```
INFO No payload provided by connected builder.
INFO Builder did not return a payload
```
```
WARN Unable to retrieve a payload from a connected builder
WARN Builder error when requesting payload
```
```
INFO The value offered by the connected builder does not meet the configured profit threshold.
WARN Builder returned invalid payload
```
```
INFO Due to poor chain health the local execution engine will be used for payload construction.
INFO Builder payload ignored
```
```
INFO Chain is unhealthy, using local payload
```
In case of fallback you should see a log indicating that the locally produced payload was

View File

@ -0,0 +1,60 @@
# Late Block Re-orgs
Since v3.4.0 Lighthouse will opportunistically re-org late blocks when proposing.
This feature is intended to disincentivise late blocks and improve network health. Proposing a
re-orging block is also more profitable for the proposer because it increases the number of
attestations and transactions that can be included.
## Command line flags
There are three flags which control the re-orging behaviour:
* `--disable-proposer-reorgs`: turn re-orging off (it's on by default).
* `--proposer-reorg-threshold N`: attempt to orphan blocks with less than N% of the committee vote. If this parameter isn't set then N defaults to 20% when the feature is enabled.
* `--proposer-reorg-epochs-since-finalization N`: only attempt to re-org late blocks when the number of epochs since finalization is less than or equal to N. The default is 2 epochs,
meaning re-orgs will only be attempted when the chain is finalizing optimally.
All flags should be applied to `lighthouse bn`. The default configuration is recommended as it
balances the chance of the re-org succeeding against the chance of failure due to attestations
arriving late and making the re-org block non-viable.
## Safeguards
To prevent excessive re-orgs there are several safeguards in place that limit when a re-org
will be attempted.
The full conditions are described in [the spec][] but the most important ones are:
* Only single-slot re-orgs: Lighthouse will build a block at N + 1 to re-org N by building on the
parent N - 1. The result is a chain with exactly one skipped slot.
* No epoch boundaries: to ensure that the selected proposer does not change, Lighthouse will
not propose a re-orging block in the 0th slot of an epoch.
## Logs
You can track the reasons for re-orgs being attempted (or not) via Lighthouse's logs.
A pair of messages at `INFO` level will be logged if a re-org opportunity is detected:
> INFO Attempting re-org due to weak head threshold_weight: 45455983852725, head_weight: 0, parent: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, weak_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49
> INFO Proposing block to re-org current head head_to_reorg: 0xf64f…2b49, slot: 1105320
This should be followed shortly after by a `WARN` log indicating that a re-org occurred. This is
expected and normal:
> WARN Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49
In case a re-org is not viable (which should be most of the time), Lighthouse will just propose a
block as normal and log the reason the re-org was not attempted at debug level:
> DEBG Not attempting re-org reason: head not late
If you are interested in digging into the timing of `forkchoiceUpdated` messages sent to the
execution layer, there is also a debug log for the suppression of `forkchoiceUpdated` messages
when Lighthouse thinks that a re-org is likely:
> DEBG Fork choice update overridden slot: 1105320, override: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, canonical_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49
[the spec]: https://github.com/ethereum/consensus-specs/pull/3034

View File

@ -60,6 +60,13 @@ impl Drop for TestRuntime {
}
}
impl TestRuntime {
pub fn set_logger(&mut self, log: Logger) {
self.log = log.clone();
self.task_executor.log = log;
}
}
pub fn null_logger() -> Result<Logger, String> {
let log_builder = NullLoggerBuilder;
log_builder

View File

@ -1,6 +1,7 @@
use crate::{ForkChoiceStore, InvalidationOperation};
use proto_array::{
Block as ProtoBlock, CountUnrealizedFull, ExecutionStatus, ProtoArrayForkChoice,
Block as ProtoBlock, CountUnrealizedFull, ExecutionStatus, ProposerHeadError, ProposerHeadInfo,
ProtoArrayForkChoice, ReOrgThreshold,
};
use slog::{crit, debug, warn, Logger};
use ssz_derive::{Decode, Encode};
@ -23,7 +24,8 @@ pub enum Error<T> {
InvalidAttestation(InvalidAttestation),
InvalidAttesterSlashing(AttesterSlashingValidationError),
InvalidBlock(InvalidBlock),
ProtoArrayError(String),
ProtoArrayStringError(String),
ProtoArrayError(proto_array::Error),
InvalidProtoArrayBytes(String),
InvalidLegacyProtoArrayBytes(String),
FailedToProcessInvalidExecutionPayload(String),
@ -45,6 +47,7 @@ pub enum Error<T> {
ForkChoiceStoreError(T),
UnableToSetJustifiedCheckpoint(T),
AfterBlockFailed(T),
ProposerHeadError(T),
InvalidAnchor {
block_slot: Slot,
state_slot: Slot,
@ -60,6 +63,13 @@ pub enum Error<T> {
MissingFinalizedBlock {
finalized_checkpoint: Checkpoint,
},
WrongSlotForGetProposerHead {
current_slot: Slot,
fc_store_slot: Slot,
},
ProposerBoostNotExpiredForGetProposerHead {
proposer_boost_root: Hash256,
},
UnrealizedVoteProcessing(state_processing::EpochProcessingError),
ParticipationCacheBuild(BeaconStateError),
ValidatorStatuses(BeaconStateError),
@ -154,6 +164,12 @@ pub enum InvalidAttestation {
impl<T> From<String> for Error<T> {
fn from(e: String) -> Self {
Error::ProtoArrayStringError(e)
}
}
impl<T> From<proto_array::Error> for Error<T> {
fn from(e: proto_array::Error) -> Self {
Error::ProtoArrayError(e)
}
}
@ -555,6 +571,69 @@ where
Ok(head_root)
}
/// Get the block to build on as proposer, taking into account proposer re-orgs.
///
/// You *must* call `get_head` for the proposal slot prior to calling this function and pass
/// in the result of `get_head` as `canonical_head`.
pub fn get_proposer_head(
&self,
current_slot: Slot,
canonical_head: Hash256,
re_org_threshold: ReOrgThreshold,
max_epochs_since_finalization: Epoch,
) -> Result<ProposerHeadInfo, ProposerHeadError<Error<proto_array::Error>>> {
// Ensure that fork choice has already been updated for the current slot. This prevents
// us from having to take a write lock or do any dequeueing of attestations in this
// function.
let fc_store_slot = self.fc_store.get_current_slot();
if current_slot != fc_store_slot {
return Err(ProposerHeadError::Error(
Error::WrongSlotForGetProposerHead {
current_slot,
fc_store_slot,
},
));
}
// Similarly, the proposer boost for the previous head should already have expired.
let proposer_boost_root = self.fc_store.proposer_boost_root();
if !proposer_boost_root.is_zero() {
return Err(ProposerHeadError::Error(
Error::ProposerBoostNotExpiredForGetProposerHead {
proposer_boost_root,
},
));
}
self.proto_array
.get_proposer_head::<E>(
current_slot,
canonical_head,
self.fc_store.justified_balances(),
re_org_threshold,
max_epochs_since_finalization,
)
.map_err(ProposerHeadError::convert_inner_error)
}
pub fn get_preliminary_proposer_head(
&self,
canonical_head: Hash256,
re_org_threshold: ReOrgThreshold,
max_epochs_since_finalization: Epoch,
) -> Result<ProposerHeadInfo, ProposerHeadError<Error<proto_array::Error>>> {
let current_slot = self.fc_store.get_current_slot();
self.proto_array
.get_proposer_head_info::<E>(
current_slot,
canonical_head,
self.fc_store.justified_balances(),
re_org_threshold,
max_epochs_since_finalization,
)
.map_err(ProposerHeadError::convert_inner_error)
}
/// Return information about:
///
/// - The LMD head of the chain.

View File

@ -1,3 +1,4 @@
use proto_array::JustifiedBalances;
use std::collections::BTreeSet;
use std::fmt::Debug;
use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, Checkpoint, EthSpec, Hash256, Slot};
@ -44,7 +45,7 @@ pub trait ForkChoiceStore<T: EthSpec>: Sized {
fn justified_checkpoint(&self) -> &Checkpoint;
/// Returns balances from the `state` identified by `justified_checkpoint.root`.
fn justified_balances(&self) -> &[u64];
fn justified_balances(&self) -> &JustifiedBalances;
/// Returns the `best_justified_checkpoint`.
fn best_justified_checkpoint(&self) -> &Checkpoint;

View File

@ -378,9 +378,13 @@ impl ForkChoiceTest {
assert_eq!(
&balances[..],
fc.fc_store().justified_balances(),
&fc.fc_store().justified_balances().effective_balances,
"balances should match"
)
);
assert_eq!(
balances.iter().sum::<u64>(),
fc.fc_store().justified_balances().total_effective_balance
);
}
/// Returns an attestation that is valid for some slot in the given `chain`.

View File

@ -15,3 +15,4 @@ eth2_ssz_derive = "0.3.1"
serde = "1.0.116"
serde_derive = "1.0.116"
serde_yaml = "0.8.13"
safe_arith = { path = "../safe_arith" }

View File

@ -1,3 +1,4 @@
use safe_arith::ArithError;
use types::{Checkpoint, Epoch, ExecutionBlockHash, Hash256, Slot};
#[derive(Clone, PartialEq, Debug)]
@ -15,6 +16,7 @@ pub enum Error {
InvalidNodeDelta(usize),
DeltaOverflow(usize),
ProposerBoostOverflow(usize),
ReOrgThresholdOverflow,
IndexOverflow(&'static str),
InvalidExecutionDeltaOverflow(usize),
InvalidDeltaLen {
@ -48,6 +50,13 @@ pub enum Error {
block_root: Hash256,
parent_root: Hash256,
},
Arith(ArithError),
}
impl From<ArithError> for Error {
fn from(e: ArithError) -> Self {
Error::Arith(e)
}
}
#[derive(Clone, PartialEq, Debug)]

View File

@ -5,7 +5,7 @@ mod votes;
use crate::proto_array::CountUnrealizedFull;
use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice};
use crate::InvalidationOperation;
use crate::{InvalidationOperation, JustifiedBalances};
use serde_derive::{Deserialize, Serialize};
use std::collections::BTreeSet;
use types::{
@ -101,11 +101,14 @@ impl ForkChoiceTestDefinition {
justified_state_balances,
expected_head,
} => {
let justified_balances =
JustifiedBalances::from_effective_balances(justified_state_balances)
.unwrap();
let head = fork_choice
.find_head::<MainnetEthSpec>(
justified_checkpoint,
finalized_checkpoint,
&justified_state_balances,
&justified_balances,
Hash256::zero(),
&equivocating_indices,
Slot::new(0),
@ -129,11 +132,14 @@ impl ForkChoiceTestDefinition {
expected_head,
proposer_boost_root,
} => {
let justified_balances =
JustifiedBalances::from_effective_balances(justified_state_balances)
.unwrap();
let head = fork_choice
.find_head::<MainnetEthSpec>(
justified_checkpoint,
finalized_checkpoint,
&justified_state_balances,
&justified_balances,
proposer_boost_root,
&equivocating_indices,
Slot::new(0),
@ -155,10 +161,13 @@ impl ForkChoiceTestDefinition {
finalized_checkpoint,
justified_state_balances,
} => {
let justified_balances =
JustifiedBalances::from_effective_balances(justified_state_balances)
.unwrap();
let result = fork_choice.find_head::<MainnetEthSpec>(
justified_checkpoint,
finalized_checkpoint,
&justified_state_balances,
&justified_balances,
Hash256::zero(),
&equivocating_indices,
Slot::new(0),

View File

@ -999,7 +999,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition {
});
ops.push(Operation::AssertWeight {
block_root: get_root(3),
// This is a "magic number" generated from `calculate_proposer_boost`.
// This is a "magic number" generated from `calculate_committee_fraction`.
weight: 31_000,
});

View File

@ -0,0 +1,62 @@
use safe_arith::{ArithError, SafeArith};
use types::{BeaconState, EthSpec};
#[derive(Debug, PartialEq, Clone, Default)]
pub struct JustifiedBalances {
/// The effective balances for every validator in a given justified state.
///
/// Any validator who is not active in the epoch of the justified state is assigned a balance of
/// zero.
pub effective_balances: Vec<u64>,
/// The sum of `self.effective_balances`.
pub total_effective_balance: u64,
/// The number of active validators included in `self.effective_balances`.
pub num_active_validators: u64,
}
impl JustifiedBalances {
pub fn from_justified_state<T: EthSpec>(state: &BeaconState<T>) -> Result<Self, ArithError> {
let current_epoch = state.current_epoch();
let mut total_effective_balance = 0u64;
let mut num_active_validators = 0u64;
let effective_balances = state
.validators()
.iter()
.map(|validator| {
if validator.is_active_at(current_epoch) {
total_effective_balance.safe_add_assign(validator.effective_balance)?;
num_active_validators.safe_add_assign(1)?;
Ok(validator.effective_balance)
} else {
Ok(0)
}
})
.collect::<Result<Vec<_>, _>>()?;
Ok(Self {
effective_balances,
total_effective_balance,
num_active_validators,
})
}
pub fn from_effective_balances(effective_balances: Vec<u64>) -> Result<Self, ArithError> {
let mut total_effective_balance = 0;
let mut num_active_validators = 0;
for &balance in &effective_balances {
if balance != 0 {
total_effective_balance.safe_add_assign(balance)?;
num_active_validators.safe_add_assign(1)?;
}
}
Ok(Self {
effective_balances,
total_effective_balance,
num_active_validators,
})
}
}

View File

@ -1,11 +1,18 @@
mod error;
pub mod fork_choice_test_definition;
mod justified_balances;
mod proto_array;
mod proto_array_fork_choice;
mod ssz_container;
pub use crate::proto_array::{CountUnrealizedFull, InvalidationOperation};
pub use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice};
pub use crate::justified_balances::JustifiedBalances;
pub use crate::proto_array::{
calculate_committee_fraction, CountUnrealizedFull, InvalidationOperation,
};
pub use crate::proto_array_fork_choice::{
Block, DoNotReOrg, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice,
ReOrgThreshold,
};
pub use error::Error;
pub mod core {

View File

@ -1,5 +1,5 @@
use crate::error::InvalidBestNodeInfo;
use crate::{error::Error, Block, ExecutionStatus};
use crate::{error::Error, Block, ExecutionStatus, JustifiedBalances};
use serde_derive::{Deserialize, Serialize};
use ssz::four_byte_option_impl;
use ssz::Encode;
@ -169,7 +169,7 @@ impl ProtoArray {
mut deltas: Vec<i64>,
justified_checkpoint: Checkpoint,
finalized_checkpoint: Checkpoint,
new_balances: &[u64],
new_justified_balances: &JustifiedBalances,
proposer_boost_root: Hash256,
current_slot: Slot,
spec: &ChainSpec,
@ -241,9 +241,11 @@ impl ProtoArray {
// Invalid nodes (or their ancestors) should not receive a proposer boost.
&& !execution_status_is_invalid
{
proposer_score =
calculate_proposer_boost::<E>(new_balances, proposer_score_boost)
.ok_or(Error::ProposerBoostOverflow(node_index))?;
proposer_score = calculate_committee_fraction::<E>(
new_justified_balances,
proposer_score_boost,
)
.ok_or(Error::ProposerBoostOverflow(node_index))?;
node_delta = node_delta
.checked_add(proposer_score as i64)
.ok_or(Error::DeltaOverflow(node_index))?;
@ -1006,32 +1008,19 @@ impl ProtoArray {
}
}
/// A helper method to calculate the proposer boost based on the given `validator_balances`.
/// This does *not* do any verification about whether a boost should or should not be applied.
/// The `validator_balances` array used here is assumed to be structured like the one stored in
/// the `BalancesCache`, where *effective* balances are stored and inactive balances are defaulted
/// to zero.
///
/// Returns `None` if there is an overflow or underflow when calculating the score.
/// A helper method to calculate the proposer boost based on the given `justified_balances`.
///
/// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance
pub fn calculate_proposer_boost<E: EthSpec>(
validator_balances: &[u64],
pub fn calculate_committee_fraction<E: EthSpec>(
justified_balances: &JustifiedBalances,
proposer_score_boost: u64,
) -> Option<u64> {
let mut total_balance: u64 = 0;
let mut num_validators: u64 = 0;
for &balance in validator_balances {
// We need to filter zero balances here to get an accurate active validator count.
// This is because we default inactive validator balances to zero when creating
// this balances array.
if balance != 0 {
total_balance = total_balance.checked_add(balance)?;
num_validators = num_validators.checked_add(1)?;
}
}
let average_balance = total_balance.checked_div(num_validators)?;
let committee_size = num_validators.checked_div(E::slots_per_epoch())?;
let average_balance = justified_balances
.total_effective_balance
.checked_div(justified_balances.num_active_validators)?;
let committee_size = justified_balances
.num_active_validators
.checked_div(E::slots_per_epoch())?;
let committee_weight = committee_size.checked_mul(average_balance)?;
committee_weight
.checked_mul(proposer_score_boost)?

View File

@ -1,9 +1,12 @@
use crate::error::Error;
use crate::proto_array::CountUnrealizedFull;
use crate::proto_array::{
calculate_proposer_boost, InvalidationOperation, Iter, ProposerBoost, ProtoArray, ProtoNode,
use crate::{
error::Error,
proto_array::{
calculate_committee_fraction, CountUnrealizedFull, InvalidationOperation, Iter,
ProposerBoost, ProtoArray, ProtoNode,
},
ssz_container::SszContainer,
JustifiedBalances,
};
use crate::ssz_container::SszContainer;
use serde_derive::{Deserialize, Serialize};
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
@ -170,11 +173,128 @@ where
}
}
/// Information about the proposer head used for opportunistic re-orgs.
#[derive(Clone)]
pub struct ProposerHeadInfo {
/// Information about the *current* head block, which may be re-orged.
pub head_node: ProtoNode,
/// Information about the parent of the current head, which should be selected as the parent
/// for a new proposal *if* a re-org is decided on.
pub parent_node: ProtoNode,
/// The computed fraction of the active committee balance below which we can re-org.
pub re_org_weight_threshold: u64,
/// The current slot from fork choice's point of view, may lead the wall-clock slot by upto
/// 500ms.
pub current_slot: Slot,
}
/// Error type to enable short-circuiting checks in `get_proposer_head`.
///
/// This type intentionally does not implement `Debug` so that callers are forced to handle the
/// enum.
#[derive(Clone, PartialEq)]
pub enum ProposerHeadError<E> {
DoNotReOrg(DoNotReOrg),
Error(E),
}
impl<E> From<DoNotReOrg> for ProposerHeadError<E> {
fn from(e: DoNotReOrg) -> ProposerHeadError<E> {
Self::DoNotReOrg(e)
}
}
impl From<Error> for ProposerHeadError<Error> {
fn from(e: Error) -> Self {
Self::Error(e)
}
}
impl<E1> ProposerHeadError<E1> {
pub fn convert_inner_error<E2>(self) -> ProposerHeadError<E2>
where
E2: From<E1>,
{
self.map_inner_error(E2::from)
}
pub fn map_inner_error<E2>(self, f: impl FnOnce(E1) -> E2) -> ProposerHeadError<E2> {
match self {
ProposerHeadError::DoNotReOrg(reason) => ProposerHeadError::DoNotReOrg(reason),
ProposerHeadError::Error(error) => ProposerHeadError::Error(f(error)),
}
}
}
/// Reasons why a re-org should not be attempted.
///
/// This type intentionally does not implement `Debug` so that the `Display` impl must be used.
#[derive(Clone, PartialEq)]
pub enum DoNotReOrg {
MissingHeadOrParentNode,
MissingHeadFinalizedCheckpoint,
ParentDistance,
HeadDistance,
ShufflingUnstable,
JustificationAndFinalizationNotCompetitive,
ChainNotFinalizing {
epochs_since_finalization: u64,
},
HeadNotWeak {
head_weight: u64,
re_org_weight_threshold: u64,
},
HeadNotLate,
NotProposing,
ReOrgsDisabled,
}
impl std::fmt::Display for DoNotReOrg {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::MissingHeadOrParentNode => write!(f, "unknown head or parent"),
Self::MissingHeadFinalizedCheckpoint => write!(f, "finalized checkpoint missing"),
Self::ParentDistance => write!(f, "parent too far from head"),
Self::HeadDistance => write!(f, "head too far from current slot"),
Self::ShufflingUnstable => write!(f, "shuffling unstable at epoch boundary"),
Self::JustificationAndFinalizationNotCompetitive => {
write!(f, "justification or finalization not competitive")
}
Self::ChainNotFinalizing {
epochs_since_finalization,
} => write!(
f,
"chain not finalizing ({epochs_since_finalization} epochs since finalization)"
),
Self::HeadNotWeak {
head_weight,
re_org_weight_threshold,
} => {
write!(f, "head not weak ({head_weight}/{re_org_weight_threshold})")
}
Self::HeadNotLate => {
write!(f, "head arrived on time")
}
Self::NotProposing => {
write!(f, "not proposing at next slot")
}
Self::ReOrgsDisabled => {
write!(f, "re-orgs disabled in config")
}
}
}
}
/// New-type for the re-org threshold percentage.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(transparent)]
pub struct ReOrgThreshold(pub u64);
#[derive(PartialEq)]
pub struct ProtoArrayForkChoice {
pub(crate) proto_array: ProtoArray,
pub(crate) votes: ElasticList<VoteTracker>,
pub(crate) balances: Vec<u64>,
pub(crate) balances: JustifiedBalances,
}
impl ProtoArrayForkChoice {
@ -223,7 +343,7 @@ impl ProtoArrayForkChoice {
Ok(Self {
proto_array,
votes: ElasticList::default(),
balances: vec![],
balances: JustifiedBalances::default(),
})
}
@ -282,21 +402,20 @@ impl ProtoArrayForkChoice {
&mut self,
justified_checkpoint: Checkpoint,
finalized_checkpoint: Checkpoint,
justified_state_balances: &[u64],
justified_state_balances: &JustifiedBalances,
proposer_boost_root: Hash256,
equivocating_indices: &BTreeSet<u64>,
current_slot: Slot,
spec: &ChainSpec,
) -> Result<Hash256, String> {
let old_balances = &mut self.balances;
let new_balances = justified_state_balances;
let deltas = compute_deltas(
&self.proto_array.indices,
&mut self.votes,
old_balances,
new_balances,
&old_balances.effective_balances,
&new_balances.effective_balances,
equivocating_indices,
)
.map_err(|e| format!("find_head compute_deltas failed: {:?}", e))?;
@ -313,13 +432,129 @@ impl ProtoArrayForkChoice {
)
.map_err(|e| format!("find_head apply_score_changes failed: {:?}", e))?;
*old_balances = new_balances.to_vec();
*old_balances = new_balances.clone();
self.proto_array
.find_head::<E>(&justified_checkpoint.root, current_slot)
.map_err(|e| format!("find_head failed: {:?}", e))
}
/// Get the block to propose on during `current_slot`.
///
/// This function returns a *definitive* result which should be acted on.
pub fn get_proposer_head<E: EthSpec>(
&self,
current_slot: Slot,
canonical_head: Hash256,
justified_balances: &JustifiedBalances,
re_org_threshold: ReOrgThreshold,
max_epochs_since_finalization: Epoch,
) -> Result<ProposerHeadInfo, ProposerHeadError<Error>> {
let info = self.get_proposer_head_info::<E>(
current_slot,
canonical_head,
justified_balances,
re_org_threshold,
max_epochs_since_finalization,
)?;
// Only re-org a single slot. This prevents cascading failures during asynchrony.
let head_slot_ok = info.head_node.slot + 1 == current_slot;
if !head_slot_ok {
return Err(DoNotReOrg::HeadDistance.into());
}
// Only re-org if the head's weight is less than the configured committee fraction.
let head_weight = info.head_node.weight;
let re_org_weight_threshold = info.re_org_weight_threshold;
let weak_head = head_weight < re_org_weight_threshold;
if !weak_head {
return Err(DoNotReOrg::HeadNotWeak {
head_weight,
re_org_weight_threshold,
}
.into());
}
// All checks have passed, build upon the parent to re-org the head.
Ok(info)
}
/// Get information about the block to propose on during `current_slot`.
///
/// This function returns a *partial* result which must be processed further.
pub fn get_proposer_head_info<E: EthSpec>(
&self,
current_slot: Slot,
canonical_head: Hash256,
justified_balances: &JustifiedBalances,
re_org_threshold: ReOrgThreshold,
max_epochs_since_finalization: Epoch,
) -> Result<ProposerHeadInfo, ProposerHeadError<Error>> {
let mut nodes = self
.proto_array
.iter_nodes(&canonical_head)
.take(2)
.cloned()
.collect::<Vec<_>>();
let parent_node = nodes.pop().ok_or(DoNotReOrg::MissingHeadOrParentNode)?;
let head_node = nodes.pop().ok_or(DoNotReOrg::MissingHeadOrParentNode)?;
let parent_slot = parent_node.slot;
let head_slot = head_node.slot;
let re_org_block_slot = head_slot + 1;
// Check finalization distance.
let proposal_epoch = re_org_block_slot.epoch(E::slots_per_epoch());
let finalized_epoch = head_node
.unrealized_finalized_checkpoint
.ok_or(DoNotReOrg::MissingHeadFinalizedCheckpoint)?
.epoch;
let epochs_since_finalization = proposal_epoch.saturating_sub(finalized_epoch).as_u64();
if epochs_since_finalization > max_epochs_since_finalization.as_u64() {
return Err(DoNotReOrg::ChainNotFinalizing {
epochs_since_finalization,
}
.into());
}
// Check parent distance from head.
// Do not check head distance from current slot, as that condition needs to be
// late-evaluated and is elided when `current_slot == head_slot`.
let parent_slot_ok = parent_slot + 1 == head_slot;
if !parent_slot_ok {
return Err(DoNotReOrg::ParentDistance.into());
}
// Check shuffling stability.
let shuffling_stable = re_org_block_slot % E::slots_per_epoch() != 0;
if !shuffling_stable {
return Err(DoNotReOrg::ShufflingUnstable.into());
}
// Check FFG.
let ffg_competitive = parent_node.unrealized_justified_checkpoint
== head_node.unrealized_justified_checkpoint
&& parent_node.unrealized_finalized_checkpoint
== head_node.unrealized_finalized_checkpoint;
if !ffg_competitive {
return Err(DoNotReOrg::JustificationAndFinalizationNotCompetitive.into());
}
// Compute re-org weight threshold.
let re_org_weight_threshold =
calculate_committee_fraction::<E>(justified_balances, re_org_threshold.0)
.ok_or(Error::ReOrgThresholdOverflow)?;
Ok(ProposerHeadInfo {
head_node,
parent_node,
re_org_weight_threshold,
current_slot,
})
}
/// Returns `true` if there are any blocks in `self` with an `INVALID` execution payload status.
///
/// This will operate on *all* blocks, even those that do not descend from the finalized
@ -368,7 +603,7 @@ impl ProtoArrayForkChoice {
if vote.current_root == node.root {
// Any voting validator that does not have a balance should be
// ignored. This is consistent with `compute_deltas`.
self.balances.get(validator_index)
self.balances.effective_balances.get(validator_index)
} else {
None
}
@ -382,9 +617,11 @@ impl ProtoArrayForkChoice {
// Compute the score based upon the current balances. We can't rely on
// the `previous_proposr_boost.score` since it is set to zero with an
// invalid node.
let proposer_score =
calculate_proposer_boost::<E>(&self.balances, proposer_score_boost)
.ok_or("Failed to compute proposer boost")?;
let proposer_score = calculate_committee_fraction::<E>(
&self.balances,
proposer_score_boost,
)
.ok_or("Failed to compute proposer boost")?;
// Store the score we've applied here so it can be removed in
// a later call to `apply_score_changes`.
self.proto_array.previous_proposer_boost.score = proposer_score;
@ -538,10 +775,11 @@ impl ProtoArrayForkChoice {
bytes: &[u8],
count_unrealized_full: CountUnrealizedFull,
) -> Result<Self, String> {
SszContainer::from_ssz_bytes(bytes)
.map(|container| (container, count_unrealized_full))
.map(Into::into)
.map_err(|e| format!("Failed to decode ProtoArrayForkChoice: {:?}", e))
let container = SszContainer::from_ssz_bytes(bytes)
.map_err(|e| format!("Failed to decode ProtoArrayForkChoice: {:?}", e))?;
(container, count_unrealized_full)
.try_into()
.map_err(|e| format!("Failed to initialize ProtoArrayForkChoice: {e:?}"))
}
/// Returns a read-lock to core `ProtoArray` struct.

View File

@ -2,10 +2,12 @@ use crate::proto_array::ProposerBoost;
use crate::{
proto_array::{CountUnrealizedFull, ProtoArray, ProtoNode},
proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker},
Error, JustifiedBalances,
};
use ssz::{four_byte_option_impl, Encode};
use ssz_derive::{Decode, Encode};
use std::collections::HashMap;
use std::convert::TryFrom;
use types::{Checkpoint, Hash256};
// Define a "legacy" implementation of `Option<usize>` which uses four bytes for encoding the union
@ -30,7 +32,7 @@ impl From<&ProtoArrayForkChoice> for SszContainer {
Self {
votes: from.votes.0.clone(),
balances: from.balances.clone(),
balances: from.balances.effective_balances.clone(),
prune_threshold: proto_array.prune_threshold,
justified_checkpoint: proto_array.justified_checkpoint,
finalized_checkpoint: proto_array.finalized_checkpoint,
@ -41,8 +43,12 @@ impl From<&ProtoArrayForkChoice> for SszContainer {
}
}
impl From<(SszContainer, CountUnrealizedFull)> for ProtoArrayForkChoice {
fn from((from, count_unrealized_full): (SszContainer, CountUnrealizedFull)) -> Self {
impl TryFrom<(SszContainer, CountUnrealizedFull)> for ProtoArrayForkChoice {
type Error = Error;
fn try_from(
(from, count_unrealized_full): (SszContainer, CountUnrealizedFull),
) -> Result<Self, Error> {
let proto_array = ProtoArray {
prune_threshold: from.prune_threshold,
justified_checkpoint: from.justified_checkpoint,
@ -53,10 +59,10 @@ impl From<(SszContainer, CountUnrealizedFull)> for ProtoArrayForkChoice {
count_unrealized_full,
};
Self {
Ok(Self {
proto_array,
votes: ElasticList(from.votes),
balances: from.balances,
}
balances: JustifiedBalances::from_effective_balances(from.balances)?,
})
}
}

View File

@ -51,6 +51,7 @@ pub mod graffiti;
pub mod historical_batch;
pub mod indexed_attestation;
pub mod light_client_bootstrap;
pub mod light_client_finality_update;
pub mod light_client_optimistic_update;
pub mod light_client_update;
pub mod pending_attestation;
@ -149,6 +150,10 @@ pub use crate::free_attestation::FreeAttestation;
pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN};
pub use crate::historical_batch::HistoricalBatch;
pub use crate::indexed_attestation::IndexedAttestation;
pub use crate::kzg_commitment::KzgCommitment;
pub use crate::kzg_proof::KzgProof;
pub use crate::light_client_finality_update::LightClientFinalityUpdate;
pub use crate::light_client_optimistic_update::LightClientOptimisticUpdate;
pub use crate::participation_flags::ParticipationFlags;
pub use crate::participation_list::ParticipationList;
pub use crate::payload::{

View File

@ -1,10 +1,10 @@
use super::{BeaconBlockHeader, EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee};
use crate::{light_client_update::*, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec};
use safe_arith::ArithError;
use super::{
BeaconBlockHeader, EthSpec, FixedVector, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock,
Slot, SyncAggregate,
};
use crate::{light_client_update::*, test_utils::TestRandom, BeaconState, ChainSpec};
use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use ssz_types::typenum::{U5, U6};
use std::sync::Arc;
use test_random_derive::TestRandom;
use tree_hash::TreeHash;
@ -28,43 +28,38 @@ pub struct LightClientFinalityUpdate<T: EthSpec> {
impl<T: EthSpec> LightClientFinalityUpdate<T> {
pub fn new(
chain_spec: ChainSpec,
beacon_state: BeaconState<T>,
block: BeaconBlock<T>,
chain_spec: &ChainSpec,
beacon_state: &BeaconState<T>,
block: &SignedBeaconBlock<T>,
attested_state: &mut BeaconState<T>,
finalized_block: BeaconBlock<T>,
finalized_block: &SignedBlindedBeaconBlock<T>,
) -> Result<Self, Error> {
let altair_fork_epoch = chain_spec
.altair_fork_epoch
.ok_or(Error::AltairForkNotActive)?;
if attested_state.slot().epoch(T::slots_per_epoch()) < altair_fork_epoch {
if beacon_state.slot().epoch(T::slots_per_epoch()) < altair_fork_epoch {
return Err(Error::AltairForkNotActive);
}
let sync_aggregate = block.body().sync_aggregate()?;
let sync_aggregate = block.message().body().sync_aggregate()?;
if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize {
return Err(Error::NotEnoughSyncCommitteeParticipants);
}
// Compute and validate attested header.
let mut attested_header = attested_state.latest_block_header().clone();
attested_header.state_root = attested_state.tree_hash_root();
attested_header.state_root = attested_state.update_tree_hash_cache()?;
// Build finalized header from finalized block
let finalized_header = BeaconBlockHeader {
slot: finalized_block.slot(),
proposer_index: finalized_block.proposer_index(),
parent_root: finalized_block.parent_root(),
state_root: finalized_block.state_root(),
body_root: finalized_block.body_root(),
};
let finalized_header = finalized_block.message().block_header();
if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root {
return Err(Error::InvalidFinalizedBlock);
}
let finality_branch = attested_state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?;
Ok(Self {
attested_header: attested_header,
finalized_header: finalized_header,
attested_header,
finalized_header,
finality_branch: FixedVector::new(finality_branch)?,
sync_aggregate: sync_aggregate.clone(),
signature_slot: block.slot(),

View File

@ -1,6 +1,6 @@
use super::{BeaconBlockHeader, EthSpec, Slot, SyncAggregate};
use crate::{
light_client_update::Error, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec,
light_client_update::Error, test_utils::TestRandom, BeaconState, ChainSpec, SignedBeaconBlock,
};
use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
@ -23,9 +23,9 @@ pub struct LightClientOptimisticUpdate<T: EthSpec> {
impl<T: EthSpec> LightClientOptimisticUpdate<T> {
pub fn new(
chain_spec: ChainSpec,
block: BeaconBlock<T>,
attested_state: BeaconState<T>,
chain_spec: &ChainSpec,
block: &SignedBeaconBlock<T>,
attested_state: &BeaconState<T>,
) -> Result<Self, Error> {
let altair_fork_epoch = chain_spec
.altair_fork_epoch
@ -34,7 +34,7 @@ impl<T: EthSpec> LightClientOptimisticUpdate<T> {
return Err(Error::AltairForkNotActive);
}
let sync_aggregate = block.body().sync_aggregate()?;
let sync_aggregate = block.message().body().sync_aggregate()?;
if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize {
return Err(Error::NotEnoughSyncCommitteeParticipants);
}

View File

@ -261,7 +261,7 @@ impl<T: EthSpec> ExecPayload<T> for FullPayload<T> {
})
}
fn is_default_with_empty_roots<'a>(&'a self) -> bool {
fn is_default_with_empty_roots(&self) -> bool {
// For full payloads the empty/zero distinction does not exist.
self.is_default_with_zero_roots()
}
@ -536,7 +536,7 @@ impl<T: EthSpec> ExecPayload<T> for BlindedPayload<T> {
}
}
fn is_default_with_zero_roots<'a>(&'a self) -> bool {
fn is_default_with_zero_roots(&self) -> bool {
self.to_ref().is_default_with_zero_roots()
}
@ -643,13 +643,13 @@ impl<'b, T: EthSpec> ExecPayload<T> for BlindedPayloadRef<'b, T> {
}
macro_rules! impl_exec_payload_common {
($wrapper_type:ident,
$wrapped_type:ident,
$wrapped_type_full:ident,
$wrapped_type_header:ident,
$wrapped_field:ident,
$fork_variant:ident,
$block_type_variant:ident,
($wrapper_type:ident, // BlindedPayloadMerge | FullPayloadMerge
$wrapped_type:ident, // ExecutionPayloadHeaderMerge | ExecutionPayloadMerge
$wrapped_type_full:ident, // ExecutionPayloadMerge | ExecutionPayloadMerge
$wrapped_type_header:ident, // ExecutionPayloadHeaderMerge | ExecutionPayloadHeaderMerge
$wrapped_field:ident, // execution_payload_header | execution_payload
$fork_variant:ident, // Merge | Merge
$block_type_variant:ident, // Blinded | Full
$f:block,
$g:block) => {
impl<T: EthSpec> ExecPayload<T> for $wrapper_type<T> {
@ -696,7 +696,15 @@ macro_rules! impl_exec_payload_common {
}
fn is_default_with_empty_roots(&self) -> bool {
self.$wrapped_field == $wrapped_type::from($wrapped_type_full::default())
// FIXME: is there a better way than ignoring this lint?
// This is necessary because the first invocation of this macro might expand to:
// self.execution_payload_header == ExecutionPayloadHeaderMerge::from(ExecutionPayloadMerge::default())
// but the second invocation might expand to:
// self.execution_payload == ExecutionPayloadMerge::from(ExecutionPayloadMerge::default())
#[allow(clippy::cmp_owned)]
{
self.$wrapped_field == $wrapped_type::from($wrapped_type_full::default())
}
}
fn transactions(&self) -> Option<&Transactions<T>> {
@ -720,16 +728,17 @@ macro_rules! impl_exec_payload_common {
}
macro_rules! impl_exec_payload_for_fork {
// BlindedPayloadMerge, FullPayloadMerge, ExecutionPayloadHeaderMerge, ExecutionPayloadMerge, Merge
($wrapper_type_header:ident, $wrapper_type_full:ident, $wrapped_type_header:ident, $wrapped_type_full:ident, $fork_variant:ident) => {
//*************** Blinded payload implementations ******************//
impl_exec_payload_common!(
$wrapper_type_header,
$wrapped_type_header,
$wrapped_type_full,
$wrapped_type_header,
$wrapper_type_header, // BlindedPayloadMerge
$wrapped_type_header, // ExecutionPayloadHeaderMerge
$wrapped_type_full, // ExecutionPayloadMerge
$wrapped_type_header, // ExecutionPayloadHeaderMerge
execution_payload_header,
$fork_variant,
$fork_variant, // Merge
Blinded,
{ |_| { None } },
{
@ -794,12 +803,12 @@ macro_rules! impl_exec_payload_for_fork {
//*************** Full payload implementations ******************//
impl_exec_payload_common!(
$wrapper_type_full,
$wrapped_type_full,
$wrapped_type_full,
$wrapped_type_header,
$wrapper_type_full, // FullPayloadMerge
$wrapped_type_full, // ExecutionPayloadMerge
$wrapped_type_full, // ExecutionPayloadMerge
$wrapped_type_header, // ExecutionPayloadHeaderMerge
execution_payload,
$fork_variant,
$fork_variant, // Merge
Full,
{
let c: for<'a> fn(&'a $wrapper_type_full<T>) -> Option<&'a Transactions<T>> =

View File

@ -12,8 +12,10 @@ use tree_hash_derive::TreeHash;
pub struct Withdrawal {
#[serde(with = "eth2_serde_utils::quoted_u64")]
pub index: u64,
#[serde(with = "eth2_serde_utils::quoted_u64")]
pub validator_index: u64,
pub address: Address,
#[serde(with = "eth2_serde_utils::quoted_u64")]
pub amount: u64,
}

View File

@ -1,7 +1,7 @@
# `lcli` requires the full project to be in scope, so this should be built either:
# - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .`
# - from the current directory with the command: `docker build -f ./Dockerfile ../`
FROM rust:1.62.1-bullseye AS builder
FROM rust:1.65.0-bullseye AS builder
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler
COPY . lighthouse
ARG PORTABLE

View File

@ -624,6 +624,14 @@ fn main() {
.takes_value(true)
.help("The genesis time when generating a genesis state."),
)
.arg(
Arg::with_name("proposer-score-boost")
.long("proposer-score-boost")
.value_name("INTEGER")
.takes_value(true)
.help("The proposer score boost to apply as a percentage, e.g. 70 = 70%"),
)
)
.subcommand(
SubCommand::with_name("check-deposit-data")

View File

@ -67,6 +67,10 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul
spec.genesis_fork_version = v;
}
if let Some(proposer_score_boost) = parse_optional(matches, "proposer-score-boost")? {
spec.proposer_score_boost = Some(proposer_score_boost);
}
if let Some(fork_epoch) = parse_optional(matches, "altair-fork-epoch")? {
spec.altair_fork_epoch = Some(fork_epoch);
}
@ -107,9 +111,9 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul
execution_payload_header.as_ref()
{
let eth1_block_hash =
parse_optional(matches, "eth1-block-hash")?.unwrap_or(payload.block_hash());
parse_optional(matches, "eth1-block-hash")?.unwrap_or_else(|| payload.block_hash());
let genesis_time =
parse_optional(matches, "genesis-time")?.unwrap_or(payload.timestamp());
parse_optional(matches, "genesis-time")?.unwrap_or_else(|| payload.timestamp());
(eth1_block_hash, genesis_time)
} else {
let eth1_block_hash = parse_required(matches, "eth1-block-hash").map_err(|_| {

View File

@ -1,6 +1,9 @@
use beacon_node::{beacon_chain::CountUnrealizedFull, ClientConfig as Config};
use crate::exec::{CommandLineTestExec, CompletedTest};
use beacon_node::beacon_chain::chain_config::{
DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD,
};
use eth1::Eth1Endpoint;
use lighthouse_network::PeerId;
use std::fs::File;
@ -10,6 +13,7 @@ use std::path::PathBuf;
use std::process::Command;
use std::str::FromStr;
use std::string::ToString;
use std::time::Duration;
use tempfile::TempDir;
use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec};
use unused_port::{unused_tcp_port, unused_udp_port};
@ -153,6 +157,31 @@ fn checkpoint_sync_url_timeout_default() {
});
}
#[test]
fn prepare_payload_lookahead_default() {
CommandLineTest::new()
.run_with_zero_port()
.with_config(|config| {
assert_eq!(
config.chain.prepare_payload_lookahead,
Duration::from_secs(4),
)
});
}
#[test]
fn prepare_payload_lookahead_shorter() {
CommandLineTest::new()
.flag("prepare-payload-lookahead", Some("1500"))
.run_with_zero_port()
.with_config(|config| {
assert_eq!(
config.chain.prepare_payload_lookahead,
Duration::from_millis(1500)
)
});
}
#[test]
fn paranoid_block_proposal_default() {
CommandLineTest::new()
@ -1500,6 +1529,49 @@ fn ensure_panic_on_failed_launch() {
});
}
#[test]
fn enable_proposer_re_orgs_default() {
CommandLineTest::new().run().with_config(|config| {
assert_eq!(
config.chain.re_org_threshold,
Some(DEFAULT_RE_ORG_THRESHOLD)
);
assert_eq!(
config.chain.re_org_max_epochs_since_finalization,
DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION,
);
});
}
#[test]
fn disable_proposer_re_orgs() {
CommandLineTest::new()
.flag("disable-proposer-reorgs", None)
.run()
.with_config(|config| assert_eq!(config.chain.re_org_threshold, None));
}
#[test]
fn proposer_re_org_threshold() {
CommandLineTest::new()
.flag("proposer-reorg-threshold", Some("90"))
.run()
.with_config(|config| assert_eq!(config.chain.re_org_threshold.unwrap().0, 90));
}
#[test]
fn proposer_re_org_max_epochs_since_finalization() {
CommandLineTest::new()
.flag("proposer-reorg-epochs-since-finalization", Some("8"))
.run()
.with_config(|config| {
assert_eq!(
config.chain.re_org_max_epochs_since_finalization.as_u64(),
8
)
});
}
#[test]
fn monitoring_endpoint() {
CommandLineTest::new()

View File

@ -389,6 +389,24 @@ fn no_doppelganger_protection_flag() {
.with_config(|config| assert!(!config.enable_doppelganger_protection));
}
#[test]
fn block_delay_ms() {
CommandLineTest::new()
.flag("block-delay-ms", Some("2000"))
.run()
.with_config(|config| {
assert_eq!(
config.block_delay,
Some(std::time::Duration::from_millis(2000))
)
});
}
#[test]
fn no_block_delay_ms() {
CommandLineTest::new()
.run()
.with_config(|config| assert_eq!(config.block_delay, None));
}
#[test]
fn no_gas_limit_flag() {
CommandLineTest::new()
.run()

View File

@ -36,6 +36,7 @@ lcli \
--eth1-follow-distance 1 \
--seconds-per-slot $SECONDS_PER_SLOT \
--seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \
--proposer-score-boost "$PROPOSER_SCORE_BOOST" \
--validator-count $GENESIS_VALIDATOR_COUNT \
--interop-genesis-state \
--force

View File

@ -53,5 +53,8 @@ SECONDS_PER_SLOT=3
# Seconds per Eth1 block
SECONDS_PER_ETH1_BLOCK=1
# Proposer score boost percentage
PROPOSER_SCORE_BOOST=70
# Command line arguments for validator client
VC_ARGS=""

View File

@ -29,7 +29,7 @@ pub struct MerkleProofValidity<E: EthSpec> {
impl<E: EthSpec> LoadCase for MerkleProofValidity<E> {
fn load_from_dir(path: &Path, fork_name: ForkName) -> Result<Self, Error> {
let spec = &testing_spec::<E>(fork_name);
let state = ssz_decode_state(&path.join("state.ssz_snappy"), spec)?;
let state = ssz_decode_state(&path.join("object.ssz_snappy"), spec)?;
let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?;
// Metadata does not exist in these tests but it is left like this just in case.
let meta_path = path.join("meta.yaml");

View File

@ -20,8 +20,6 @@ use state_processing::{
ConsensusContext,
};
use std::fmt::Debug;
#[cfg(not(all(feature = "withdrawals", feature = "withdrawals-processing")))]
use std::marker::PhantomData;
use std::path::Path;
#[cfg(feature = "withdrawals")]
use types::SignedBlsToExecutionChange;
@ -43,6 +41,7 @@ struct ExecutionMetadata {
}
/// Newtype for testing withdrawals.
#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))]
#[derive(Debug, Clone, Deserialize)]
pub struct WithdrawalsPayload<T: EthSpec> {
payload: FullPayload<T>,

View File

@ -1,9 +1,11 @@
pub use case_result::CaseResult;
#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))]
pub use cases::WithdrawalsPayload;
pub use cases::{
Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates,
JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates,
RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset,
SyncCommitteeUpdates, WithdrawalsPayload,
SyncCommitteeUpdates,
};
pub use decode::log_file_access;
pub use error::Error;

View File

@ -82,12 +82,14 @@ fn operations_execution_payload_blinded() {
OperationsHandler::<MainnetEthSpec, BlindedPayload<_>>::default().run();
}
#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))]
#[test]
fn operations_withdrawals() {
OperationsHandler::<MinimalEthSpec, WithdrawalsPayload<_>>::default().run();
OperationsHandler::<MainnetEthSpec, WithdrawalsPayload<_>>::default().run();
}
#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))]
#[test]
fn operations_bls_to_execution_change() {
OperationsHandler::<MinimalEthSpec, SignedBlsToExecutionChange>::default().run();

View File

@ -616,7 +616,8 @@ async fn check_payload_reconstruction<E: GenericExecutionEngine>(
) {
let reconstructed = ee
.execution_layer
.get_payload_by_block_hash(payload.block_hash())
// FIXME: handle other forks here?
.get_payload_by_block_hash(payload.block_hash(), ForkName::Merge)
.await
.unwrap()
.unwrap();

View File

@ -1,6 +1,7 @@
use crate::beacon_node_fallback::{Error as FallbackError, Errors};
use crate::{
beacon_node_fallback::{BeaconNodeFallback, RequireSynced},
determine_graffiti,
graffiti_file::GraffitiFile,
OfflineOnFailure,
};
@ -10,7 +11,9 @@ use slog::{crit, debug, error, info, trace, warn};
use slot_clock::SlotClock;
use std::ops::Deref;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc;
use tokio::time::sleep;
use types::{
AbstractExecPayload, BlindedPayload, BlockType, EthSpec, FullPayload, Graffiti, PublicKeyBytes,
Slot,
@ -45,6 +48,7 @@ pub struct BlockServiceBuilder<T, E: EthSpec> {
context: Option<RuntimeContext<E>>,
graffiti: Option<Graffiti>,
graffiti_file: Option<GraffitiFile>,
block_delay: Option<Duration>,
}
impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> {
@ -56,6 +60,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> {
context: None,
graffiti: None,
graffiti_file: None,
block_delay: None,
}
}
@ -89,6 +94,11 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> {
self
}
pub fn block_delay(mut self, block_delay: Option<Duration>) -> Self {
self.block_delay = block_delay;
self
}
pub fn build(self) -> Result<BlockService<T, E>, String> {
Ok(BlockService {
inner: Arc::new(Inner {
@ -106,6 +116,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> {
.ok_or("Cannot build BlockService without runtime_context")?,
graffiti: self.graffiti,
graffiti_file: self.graffiti_file,
block_delay: self.block_delay,
}),
})
}
@ -119,6 +130,7 @@ pub struct Inner<T, E: EthSpec> {
context: RuntimeContext<E>,
graffiti: Option<Graffiti>,
graffiti_file: Option<GraffitiFile>,
block_delay: Option<Duration>,
}
/// Attempts to produce attestations for any block producer(s) at the start of the epoch.
@ -163,6 +175,16 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
async move {
while let Some(notif) = notification_rx.recv().await {
let service = self.clone();
if let Some(delay) = service.block_delay {
debug!(
service.context.log(),
"Delaying block production by {}ms",
delay.as_millis()
);
sleep(delay).await;
}
service.do_update(notif).await.ok();
}
debug!(log, "Block service shutting down");
@ -300,18 +322,13 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
})?
.into();
let graffiti = self
.graffiti_file
.clone()
.and_then(|mut g| match g.load_graffiti(&validator_pubkey) {
Ok(g) => g,
Err(e) => {
warn!(log, "Failed to read graffiti file"; "error" => ?e);
None
}
})
.or_else(|| self.validator_store.graffiti(&validator_pubkey))
.or(self.graffiti);
let graffiti = determine_graffiti(
&validator_pubkey,
log,
self.graffiti_file.clone(),
self.validator_store.graffiti(&validator_pubkey),
self.graffiti,
);
let randao_reveal_ref = &randao_reveal;
let self_ref = &self;

View File

@ -308,5 +308,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
by this validator client. Note this will not necessarily be used if the gas limit \
set here moves too far from the previous block's gas limit. [default: 30,000,000]")
.requires("builder-proposals"),
)
)
/*
* Experimental/development options.
*/
.arg(
Arg::with_name("block-delay-ms")
.long("block-delay-ms")
.value_name("MILLIS")
.hidden(true)
.help("Time to delay block production from the start of the slot. Should only be \
used for testing.")
.takes_value(true),
)
}

View File

@ -13,6 +13,7 @@ use slog::{info, warn, Logger};
use std::fs;
use std::net::IpAddr;
use std::path::PathBuf;
use std::time::Duration;
use types::{Address, GRAFFITI_BYTES_LEN};
pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/";
@ -61,6 +62,10 @@ pub struct Config {
/// A list of custom certificates that the validator client will additionally use when
/// connecting to a beacon node over SSL/TLS.
pub beacon_nodes_tls_certs: Option<Vec<PathBuf>>,
/// Delay from the start of the slot to wait before publishing a block.
///
/// This is *not* recommended in prod and should only be used for testing.
pub block_delay: Option<Duration>,
/// Disables publishing http api requests to all beacon nodes for select api calls.
pub disable_run_on_all: bool,
}
@ -95,6 +100,7 @@ impl Default for Config {
monitoring_api: None,
enable_doppelganger_protection: false,
beacon_nodes_tls_certs: None,
block_delay: None,
builder_proposals: false,
builder_registration_timestamp_override: None,
gas_limit: None,
@ -341,6 +347,13 @@ impl Config {
);
}
/*
* Experimental
*/
if let Some(delay_ms) = parse_optional::<u64>(cli_args, "block-delay-ms")? {
config.block_delay = Some(Duration::from_millis(delay_ms));
}
Ok(config)
}
}

View File

@ -4,7 +4,7 @@ mod keystores;
mod remotekeys;
mod tests;
use crate::ValidatorStore;
use crate::{determine_graffiti, GraffitiFile, ValidatorStore};
use account_utils::{
mnemonic_from_phrase,
validator_definitions::{SigningDefinition, ValidatorDefinition, Web3SignerDefinition},
@ -13,13 +13,14 @@ pub use api_secret::ApiSecret;
use create_validator::{create_validators_mnemonic, create_validators_web3signer};
use eth2::lighthouse_vc::{
std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse},
types::{self as api_types, GenericResponse, PublicKey, PublicKeyBytes},
types::{self as api_types, GenericResponse, Graffiti, PublicKey, PublicKeyBytes},
};
use lighthouse_version::version_with_platform;
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use slog::{crit, info, warn, Logger};
use slot_clock::SlotClock;
use std::collections::HashMap;
use std::future::Future;
use std::marker::PhantomData;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
@ -65,6 +66,8 @@ pub struct Context<T: SlotClock, E: EthSpec> {
pub api_secret: ApiSecret,
pub validator_store: Option<Arc<ValidatorStore<T, E>>>,
pub validator_dir: Option<PathBuf>,
pub graffiti_file: Option<GraffitiFile>,
pub graffiti_flag: Option<Graffiti>,
pub spec: ChainSpec,
pub config: Config,
pub log: Logger,
@ -177,6 +180,12 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
})
});
let inner_graffiti_file = ctx.graffiti_file.clone();
let graffiti_file_filter = warp::any().map(move || inner_graffiti_file.clone());
let inner_graffiti_flag = ctx.graffiti_flag;
let graffiti_flag_filter = warp::any().map(move || inner_graffiti_flag);
let inner_ctx = ctx.clone();
let log_filter = warp::any().map(move || inner_ctx.log.clone());
@ -329,6 +338,42 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
})
});
let get_lighthouse_ui_graffiti = warp::path("lighthouse")
.and(warp::path("ui"))
.and(warp::path("graffiti"))
.and(warp::path::end())
.and(validator_store_filter.clone())
.and(graffiti_file_filter)
.and(graffiti_flag_filter)
.and(signer.clone())
.and(log_filter.clone())
.and_then(
|validator_store: Arc<ValidatorStore<T, E>>,
graffiti_file: Option<GraffitiFile>,
graffiti_flag: Option<Graffiti>,
signer,
log| {
blocking_signed_json_task(signer, move || {
let mut result = HashMap::new();
for (key, graffiti_definition) in validator_store
.initialized_validators()
.read()
.get_all_validators_graffiti()
{
let graffiti = determine_graffiti(
key,
&log,
graffiti_file.clone(),
graffiti_definition,
graffiti_flag,
);
result.insert(key.to_string(), graffiti.map(|g| g.as_utf8_lossy()));
}
Ok(api_types::GenericResponse::from(result))
})
},
);
// POST lighthouse/validators/
let post_validators = warp::path("lighthouse")
.and(warp::path("validators"))
@ -945,6 +990,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
.or(get_lighthouse_validators)
.or(get_lighthouse_validators_pubkey)
.or(get_lighthouse_ui_health)
.or(get_lighthouse_ui_graffiti)
.or(get_fee_recipient)
.or(get_gas_limit)
.or(get_std_keystores)

View File

@ -120,6 +120,8 @@ impl ApiTester {
api_secret,
validator_dir: Some(validator_dir.path().into()),
validator_store: Some(validator_store.clone()),
graffiti_file: None,
graffiti_flag: Some(Graffiti::default()),
spec: E::default_spec(),
config: HttpConfig {
enabled: true,

View File

@ -634,6 +634,15 @@ impl InitializedValidators {
self.validators.get(public_key).and_then(|v| v.graffiti)
}
/// Returns a `HashMap` of `public_key` -> `graffiti` for all initialized validators.
pub fn get_all_validators_graffiti(&self) -> HashMap<&PublicKeyBytes, Option<Graffiti>> {
let mut result = HashMap::new();
for public_key in self.validators.keys() {
result.insert(public_key, self.graffiti(public_key));
}
result
}
/// Returns the `suggested_fee_recipient` for a given public key specified in the
/// `ValidatorDefinitions`.
pub fn suggested_fee_recipient(&self, public_key: &PublicKeyBytes) -> Option<Address> {

View File

@ -30,13 +30,14 @@ use crate::beacon_node_fallback::{
RequireSynced,
};
use crate::doppelganger_service::DoppelgangerService;
use crate::graffiti_file::GraffitiFile;
use account_utils::validator_definitions::ValidatorDefinitions;
use attestation_service::{AttestationService, AttestationServiceBuilder};
use block_service::{BlockService, BlockServiceBuilder};
use clap::ArgMatches;
use duties_service::DutiesService;
use environment::RuntimeContext;
use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, StatusCode, Timeouts};
use eth2::{reqwest::ClientBuilder, types::Graffiti, BeaconNodeHttpClient, StatusCode, Timeouts};
use http_api::ApiSecret;
use notifier::spawn_notifier;
use parking_lot::RwLock;
@ -57,7 +58,7 @@ use tokio::{
sync::mpsc,
time::{sleep, Duration},
};
use types::{EthSpec, Hash256};
use types::{EthSpec, Hash256, PublicKeyBytes};
use validator_store::ValidatorStore;
/// The interval between attempts to contact the beacon node during startup.
@ -426,6 +427,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
.runtime_context(context.service_context("block".into()))
.graffiti(config.graffiti)
.graffiti_file(config.graffiti_file.clone())
.block_delay(config.block_delay)
.build()?;
let attestation_service = AttestationServiceBuilder::new()
@ -526,6 +528,8 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
api_secret,
validator_store: Some(self.validator_store.clone()),
validator_dir: Some(self.config.validator_dir.clone()),
graffiti_file: self.config.graffiti_file.clone(),
graffiti_flag: self.config.graffiti,
spec: self.context.eth2_config.spec.clone(),
config: self.config.http_api.clone(),
log: log.clone(),
@ -726,3 +730,24 @@ pub fn load_pem_certificate<P: AsRef<Path>>(pem_path: P) -> Result<Certificate,
.map_err(|e| format!("Unable to read certificate file: {}", e))?;
Certificate::from_pem(&buf).map_err(|e| format!("Unable to parse certificate: {}", e))
}
// Given the various graffiti control methods, determine the graffiti that will be used for
// the next block produced by the validator with the given public key.
pub fn determine_graffiti(
validator_pubkey: &PublicKeyBytes,
log: &Logger,
graffiti_file: Option<GraffitiFile>,
validator_definition_graffiti: Option<Graffiti>,
graffiti_flag: Option<Graffiti>,
) -> Option<Graffiti> {
graffiti_file
.and_then(|mut g| match g.load_graffiti(validator_pubkey) {
Ok(g) => g,
Err(e) => {
warn!(log, "Failed to read graffiti file"; "error" => ?e);
None
}
})
.or(validator_definition_graffiti)
.or(graffiti_flag)
}