Merge Deneb (#4054)
This commit is contained in:
commit
8b0545da12
6
.github/workflows/docker.yml
vendored
6
.github/workflows/docker.yml
vendored
@ -5,6 +5,7 @@ on:
|
||||
branches:
|
||||
- unstable
|
||||
- stable
|
||||
- deneb-free-blobs
|
||||
tags:
|
||||
- v*
|
||||
|
||||
@ -40,6 +41,11 @@ jobs:
|
||||
run: |
|
||||
echo "VERSION=latest" >> $GITHUB_ENV
|
||||
echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV
|
||||
- name: Extract version (if deneb)
|
||||
if: github.event.ref == 'refs/heads/deneb-free-blobs'
|
||||
run: |
|
||||
echo "VERSION=deneb" >> $GITHUB_ENV
|
||||
echo "VERSION_SUFFIX=" >> $GITHUB_ENV
|
||||
- name: Extract version (if tagged release)
|
||||
if: startsWith(github.event.ref, 'refs/tags')
|
||||
run: |
|
||||
|
24
.github/workflows/test-suite.yml
vendored
24
.github/workflows/test-suite.yml
vendored
@ -128,6 +128,21 @@ jobs:
|
||||
bins: cargo-nextest
|
||||
- name: Run operation_pool tests for all known forks
|
||||
run: make test-op-pool
|
||||
network-tests:
|
||||
name: network-tests
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Get latest version of stable Rust
|
||||
uses: moonrepo/setup-rust@v1
|
||||
with:
|
||||
channel: stable
|
||||
cache-target: release
|
||||
bins: cargo-nextest
|
||||
- name: Run network tests for all known forks
|
||||
run: make test-network
|
||||
slasher-tests:
|
||||
name: slasher-tests
|
||||
runs-on: ubuntu-latest
|
||||
@ -289,7 +304,8 @@ jobs:
|
||||
run: |
|
||||
make
|
||||
- name: Install lcli
|
||||
if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
# TODO(jimmy): re-enable this once we merge deneb into unstable
|
||||
# if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
run: make install-lcli
|
||||
- name: Run the doppelganger protection failure test script
|
||||
run: |
|
||||
@ -340,8 +356,10 @@ jobs:
|
||||
run: make arbitrary-fuzz
|
||||
- name: Run cargo audit
|
||||
run: make audit-CI
|
||||
- name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose
|
||||
run: CARGO_HOME=$(readlink -f $HOME) make vendor
|
||||
# TODO(sean): re-enable this when we can figure it out with c-kzg
|
||||
# Issue: https://github.com/sigp/lighthouse/issues/4440
|
||||
# - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose
|
||||
# run: CARGO_HOME=$(readlink -f $HOME) make vendor
|
||||
check-msrv:
|
||||
name: check-msrv
|
||||
runs-on: ubuntu-latest
|
||||
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -9,7 +9,11 @@ perf.data*
|
||||
/bin
|
||||
genesis.ssz
|
||||
/clippy.toml
|
||||
/.cargo
|
||||
|
||||
# IntelliJ
|
||||
/*.iml
|
||||
.idea
|
||||
.idea
|
||||
|
||||
# VSCode
|
||||
/.vscode
|
||||
|
405
Cargo.lock
generated
405
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -59,6 +59,7 @@ members = [
|
||||
"consensus/swap_or_not_shuffle",
|
||||
|
||||
"crypto/bls",
|
||||
"crypto/kzg",
|
||||
"crypto/eth2_key_derivation",
|
||||
"crypto/eth2_keystore",
|
||||
"crypto/eth2_wallet",
|
||||
@ -144,7 +145,7 @@ serde_json = "1"
|
||||
serde_repr = "0.1"
|
||||
serde_yaml = "0.8"
|
||||
sha2 = "0.9"
|
||||
slog = { version = "2", features = ["max_level_trace", "release_max_level_trace"] }
|
||||
slog = { version = "2", features = ["max_level_trace", "release_max_level_trace", "nested-values"] }
|
||||
slog-async = "2"
|
||||
slog-term = "2"
|
||||
sloggers = { version = "2", features = ["json"] }
|
||||
@ -194,6 +195,7 @@ fork_choice = { path = "consensus/fork_choice" }
|
||||
genesis = { path = "beacon_node/genesis" }
|
||||
http_api = { path = "beacon_node/http_api" }
|
||||
int_to_bytes = { path = "consensus/int_to_bytes" }
|
||||
kzg = { path = "crypto/kzg" }
|
||||
lighthouse_metrics = { path = "common/lighthouse_metrics" }
|
||||
lighthouse_network = { path = "beacon_node/lighthouse_network" }
|
||||
lighthouse_version = { path = "common/lighthouse_version" }
|
||||
|
@ -1,5 +1,5 @@
|
||||
[target.x86_64-unknown-linux-gnu]
|
||||
pre-build = ["apt-get install -y cmake clang-3.9"]
|
||||
pre-build = ["apt-get install -y cmake clang-5.0"]
|
||||
|
||||
[target.aarch64-unknown-linux-gnu]
|
||||
pre-build = ["apt-get install -y cmake clang-3.9"]
|
||||
pre-build = ["apt-get install -y cmake clang-5.0"]
|
||||
|
18
Makefile
18
Makefile
@ -36,7 +36,7 @@ PROFILE ?= release
|
||||
|
||||
# List of all hard forks. This list is used to set env variables for several tests so that
|
||||
# they run for different forks.
|
||||
FORKS=phase0 altair merge capella
|
||||
FORKS=phase0 altair merge capella deneb
|
||||
|
||||
# Extra flags for Cargo
|
||||
CARGO_INSTALL_EXTRA_FLAGS?=
|
||||
@ -106,22 +106,22 @@ build-release-tarballs:
|
||||
# Runs the full workspace tests in **release**, without downloading any additional
|
||||
# test vectors.
|
||||
test-release:
|
||||
cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher
|
||||
cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher --exclude network
|
||||
|
||||
# Runs the full workspace tests in **release**, without downloading any additional
|
||||
# test vectors, using nextest.
|
||||
nextest-release:
|
||||
cargo nextest run --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher
|
||||
cargo nextest run --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher --exclude network
|
||||
|
||||
# Runs the full workspace tests in **debug**, without downloading any additional test
|
||||
# vectors.
|
||||
test-debug:
|
||||
cargo test --workspace --exclude ef_tests --exclude beacon_chain
|
||||
cargo test --workspace --exclude ef_tests --exclude beacon_chain --exclude network
|
||||
|
||||
# Runs the full workspace tests in **debug**, without downloading any additional test
|
||||
# vectors, using nextest.
|
||||
nextest-debug:
|
||||
cargo nextest run --workspace --exclude ef_tests --exclude beacon_chain
|
||||
cargo nextest run --workspace --exclude ef_tests --exclude beacon_chain --exclude network
|
||||
|
||||
# Runs cargo-fmt (linter).
|
||||
cargo-fmt:
|
||||
@ -161,6 +161,14 @@ test-op-pool-%:
|
||||
--features 'beacon_chain/fork_from_env'\
|
||||
-p operation_pool
|
||||
|
||||
# Run the tests in the `network` crate for all known forks.
|
||||
test-network: $(patsubst %,test-network-%,$(FORKS))
|
||||
|
||||
test-network-%:
|
||||
env FORK_NAME=$* cargo nextest run --release \
|
||||
--features 'fork_from_env' \
|
||||
-p network
|
||||
|
||||
# Run the tests in the `slasher` crate for all supported database backends.
|
||||
test-slasher:
|
||||
cargo nextest run --release -p slasher --features lmdb
|
||||
|
@ -14,7 +14,7 @@ use slot_clock::{SlotClock, SystemTimeSlotClock};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
use types::{ChainSpec, Epoch, EthSpec, Fork, VoluntaryExit};
|
||||
use types::{ChainSpec, Epoch, EthSpec, VoluntaryExit};
|
||||
|
||||
pub const CMD: &str = "exit";
|
||||
pub const KEYSTORE_FLAG: &str = "keystore";
|
||||
@ -146,7 +146,6 @@ async fn publish_voluntary_exit<E: EthSpec>(
|
||||
.ok_or("Failed to get current epoch. Please check your system time")?;
|
||||
let validator_index = get_validator_index_for_exit(client, &keypair.pk, epoch, spec).await?;
|
||||
|
||||
let fork = get_beacon_state_fork(client).await?;
|
||||
let voluntary_exit = VoluntaryExit {
|
||||
epoch,
|
||||
validator_index,
|
||||
@ -173,12 +172,8 @@ async fn publish_voluntary_exit<E: EthSpec>(
|
||||
|
||||
if confirmation == CONFIRMATION_PHRASE {
|
||||
// Sign and publish the voluntary exit to network
|
||||
let signed_voluntary_exit = voluntary_exit.sign(
|
||||
&keypair.sk,
|
||||
&fork,
|
||||
genesis_data.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
let signed_voluntary_exit =
|
||||
voluntary_exit.sign(&keypair.sk, genesis_data.genesis_validators_root, spec);
|
||||
client
|
||||
.post_beacon_pool_voluntary_exits(&signed_voluntary_exit)
|
||||
.await
|
||||
@ -316,16 +311,6 @@ async fn is_syncing(client: &BeaconNodeHttpClient) -> Result<bool, String> {
|
||||
.is_syncing)
|
||||
}
|
||||
|
||||
/// Get fork object for the current state by querying the beacon node client.
|
||||
async fn get_beacon_state_fork(client: &BeaconNodeHttpClient) -> Result<Fork, String> {
|
||||
Ok(client
|
||||
.get_beacon_states_fork(StateId::Head)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to get get fork: {:?}", e))?
|
||||
.ok_or("Failed to get fork, state not found")?
|
||||
.data)
|
||||
}
|
||||
|
||||
/// Calculates the current epoch from the genesis time and current time.
|
||||
fn get_current_epoch<E: EthSpec>(genesis_time: u64, spec: &ChainSpec) -> Option<Epoch> {
|
||||
let slot_clock = SystemTimeSlotClock::new(
|
||||
|
@ -37,6 +37,7 @@ eth2_network_config = { workspace = true }
|
||||
execution_layer = { workspace = true }
|
||||
lighthouse_network = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
clap_utils = { workspace = true }
|
||||
hyper = { workspace = true }
|
||||
lighthouse_version = { workspace = true }
|
||||
|
@ -17,6 +17,8 @@ environment = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
[dependencies]
|
||||
serde_json = { workspace = true }
|
||||
eth2_network_config = { workspace = true }
|
||||
merkle_proof = { workspace = true }
|
||||
store = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
@ -50,6 +52,7 @@ lru = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
bitvec = { workspace = true }
|
||||
bls = { workspace = true }
|
||||
kzg = { workspace = true }
|
||||
safe_arith = { workspace = true }
|
||||
fork_choice = { workspace = true }
|
||||
task_executor = { workspace = true }
|
||||
@ -65,6 +68,8 @@ superstruct = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
exit-future = { workspace = true }
|
||||
oneshot_broadcast = { path = "../../common/oneshot_broadcast/" }
|
||||
slog-term = { workspace = true }
|
||||
slog-async = { workspace = true }
|
||||
|
||||
[[test]]
|
||||
name = "beacon_chain_tests"
|
||||
|
@ -50,9 +50,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
match state {
|
||||
BeaconState::Base(_) => self.compute_attestation_rewards_base(state, validators),
|
||||
BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => {
|
||||
self.compute_attestation_rewards_altair(state, validators)
|
||||
}
|
||||
BeaconState::Altair(_)
|
||||
| BeaconState::Merge(_)
|
||||
| BeaconState::Capella(_)
|
||||
| BeaconState::Deneb(_) => self.compute_attestation_rewards_altair(state, validators),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,7 @@ use std::borrow::Cow;
|
||||
use strum::AsRefStr;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{
|
||||
Attestation, BeaconCommittee, ChainSpec, CommitteeIndex, Epoch, EthSpec, Hash256,
|
||||
Attestation, BeaconCommittee, ChainSpec, CommitteeIndex, Epoch, EthSpec, ForkName, Hash256,
|
||||
IndexedAttestation, SelectionProof, SignedAggregateAndProof, Slot, SubnetId,
|
||||
};
|
||||
|
||||
@ -1049,10 +1049,21 @@ pub fn verify_propagation_slot_range<S: SlotClock, E: EthSpec>(
|
||||
}
|
||||
|
||||
// Taking advantage of saturating subtraction on `Slot`.
|
||||
let earliest_permissible_slot = slot_clock
|
||||
let one_epoch_prior = slot_clock
|
||||
.now_with_past_tolerance(spec.maximum_gossip_clock_disparity())
|
||||
.ok_or(BeaconChainError::UnableToReadSlot)?
|
||||
- E::slots_per_epoch();
|
||||
|
||||
let current_fork =
|
||||
spec.fork_name_at_slot::<E>(slot_clock.now().ok_or(BeaconChainError::UnableToReadSlot)?);
|
||||
let earliest_permissible_slot = match current_fork {
|
||||
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => one_epoch_prior,
|
||||
// EIP-7045
|
||||
ForkName::Deneb => one_epoch_prior
|
||||
.epoch(E::slots_per_epoch())
|
||||
.start_slot(E::slots_per_epoch()),
|
||||
};
|
||||
|
||||
if attestation_slot < earliest_permissible_slot {
|
||||
return Err(Error::PastSlot {
|
||||
attestation_slot,
|
||||
|
@ -64,19 +64,19 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self.compute_beacon_block_attestation_reward_base(block, block_root, state)
|
||||
.map_err(|e| {
|
||||
error!(
|
||||
self.log,
|
||||
"Error calculating base block attestation reward";
|
||||
"error" => ?e
|
||||
self.log,
|
||||
"Error calculating base block attestation reward";
|
||||
"error" => ?e
|
||||
);
|
||||
BeaconChainError::BlockRewardAttestationError
|
||||
})?
|
||||
} else {
|
||||
self.compute_beacon_block_attestation_reward_altair(block, state)
|
||||
self.compute_beacon_block_attestation_reward_altair_deneb(block, state)
|
||||
.map_err(|e| {
|
||||
error!(
|
||||
self.log,
|
||||
"Error calculating altair block attestation reward";
|
||||
"error" => ?e
|
||||
self.log,
|
||||
"Error calculating altair block attestation reward";
|
||||
"error" => ?e
|
||||
);
|
||||
BeaconChainError::BlockRewardAttestationError
|
||||
})?
|
||||
@ -173,7 +173,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
Ok(block_attestation_reward)
|
||||
}
|
||||
|
||||
fn compute_beacon_block_attestation_reward_altair<Payload: AbstractExecPayload<T::EthSpec>>(
|
||||
fn compute_beacon_block_attestation_reward_altair_deneb<
|
||||
Payload: AbstractExecPayload<T::EthSpec>,
|
||||
>(
|
||||
&self,
|
||||
block: BeaconBlockRef<'_, T::EthSpec, Payload>,
|
||||
state: &mut BeaconState<T::EthSpec>,
|
||||
@ -192,6 +194,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
for attestation in block.body().attestations() {
|
||||
let data = &attestation.data;
|
||||
let inclusion_delay = state.slot().safe_sub(data.slot)?.as_u64();
|
||||
// [Modified in Deneb:EIP7045]
|
||||
let participation_flag_indices = get_attestation_participation_flag_indices(
|
||||
state,
|
||||
data,
|
||||
|
@ -3,7 +3,7 @@ use execution_layer::{ExecutionLayer, ExecutionPayloadBodyV1};
|
||||
use slog::{crit, debug, Logger};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use store::DatabaseBlock;
|
||||
use store::{DatabaseBlock, ExecutionPayloadDeneb};
|
||||
use task_executor::TaskExecutor;
|
||||
use tokio::sync::{
|
||||
mpsc::{self, UnboundedSender},
|
||||
@ -97,6 +97,7 @@ fn reconstruct_default_header_block<E: EthSpec>(
|
||||
let payload: ExecutionPayload<E> = match fork {
|
||||
ForkName::Merge => ExecutionPayloadMerge::default().into(),
|
||||
ForkName::Capella => ExecutionPayloadCapella::default().into(),
|
||||
ForkName::Deneb => ExecutionPayloadDeneb::default().into(),
|
||||
ForkName::Base | ForkName::Altair => {
|
||||
return Err(Error::PayloadReconstruction(format!(
|
||||
"Block with fork variant {} has execution payload",
|
||||
@ -714,19 +715,21 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn check_all_blocks_from_altair_to_capella() {
|
||||
async fn check_all_blocks_from_altair_to_deneb() {
|
||||
let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize;
|
||||
let num_epochs = 8;
|
||||
let bellatrix_fork_epoch = 2usize;
|
||||
let capella_fork_epoch = 4usize;
|
||||
let deneb_fork_epoch = 6usize;
|
||||
let num_blocks_produced = num_epochs * slots_per_epoch;
|
||||
|
||||
let mut spec = test_spec::<MinimalEthSpec>();
|
||||
spec.altair_fork_epoch = Some(Epoch::new(0));
|
||||
spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64));
|
||||
spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64));
|
||||
spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64));
|
||||
|
||||
let harness = get_harness(VALIDATOR_COUNT, spec);
|
||||
let harness = get_harness(VALIDATOR_COUNT, spec.clone());
|
||||
// go to bellatrix fork
|
||||
harness
|
||||
.extend_slots(bellatrix_fork_epoch * slots_per_epoch)
|
||||
@ -833,17 +836,19 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn check_fallback_altair_to_capella() {
|
||||
async fn check_fallback_altair_to_deneb() {
|
||||
let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize;
|
||||
let num_epochs = 8;
|
||||
let bellatrix_fork_epoch = 2usize;
|
||||
let capella_fork_epoch = 4usize;
|
||||
let deneb_fork_epoch = 6usize;
|
||||
let num_blocks_produced = num_epochs * slots_per_epoch;
|
||||
|
||||
let mut spec = test_spec::<MinimalEthSpec>();
|
||||
spec.altair_fork_epoch = Some(Epoch::new(0));
|
||||
spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64));
|
||||
spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64));
|
||||
spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64));
|
||||
|
||||
let harness = get_harness(VALIDATOR_COUNT, spec);
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
554
beacon_node/beacon_chain/src/blob_verification.rs
Normal file
554
beacon_node/beacon_chain/src/blob_verification.rs
Normal file
@ -0,0 +1,554 @@
|
||||
use derivative::Derivative;
|
||||
use slot_clock::SlotClock;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::beacon_chain::{
|
||||
BeaconChain, BeaconChainTypes, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT,
|
||||
VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT,
|
||||
};
|
||||
use crate::block_verification::cheap_state_advance_to_obtain_committees;
|
||||
use crate::data_availability_checker::AvailabilityCheckError;
|
||||
use crate::kzg_utils::{validate_blob, validate_blobs};
|
||||
use crate::{metrics, BeaconChainError};
|
||||
use kzg::{Kzg, KzgCommitment};
|
||||
use slog::{debug, warn};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use ssz_types::VariableList;
|
||||
use tree_hash::TreeHash;
|
||||
use types::blob_sidecar::BlobIdentifier;
|
||||
use types::{
|
||||
BeaconStateError, BlobSidecar, BlobSidecarList, CloneConfig, EthSpec, Hash256,
|
||||
SignedBlobSidecar, Slot,
|
||||
};
|
||||
|
||||
/// An error occurred while validating a gossip blob.
|
||||
#[derive(Debug)]
|
||||
pub enum GossipBlobError<T: EthSpec> {
|
||||
/// The blob sidecar is from a slot that is later than the current slot (with respect to the
|
||||
/// gossip clock disparity).
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
||||
FutureSlot {
|
||||
message_slot: Slot,
|
||||
latest_permissible_slot: Slot,
|
||||
},
|
||||
|
||||
/// There was an error whilst processing the blob. It is not known if it is
|
||||
/// valid or invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// We were unable to process this blob due to an internal error. It's
|
||||
/// unclear if the blob is valid.
|
||||
BeaconChainError(BeaconChainError),
|
||||
|
||||
/// The `BlobSidecar` was gossiped over an incorrect subnet.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The blob is invalid or the peer is faulty.
|
||||
InvalidSubnet { expected: u64, received: u64 },
|
||||
|
||||
/// The sidecar corresponds to a slot older than the finalized head slot.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// It's unclear if this blob is valid, but this blob is for a finalized slot and is
|
||||
/// therefore useless to us.
|
||||
PastFinalizedSlot {
|
||||
blob_slot: Slot,
|
||||
finalized_slot: Slot,
|
||||
},
|
||||
|
||||
/// The proposer index specified in the sidecar does not match the locally computed
|
||||
/// proposer index.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The blob is invalid and the peer is faulty.
|
||||
ProposerIndexMismatch { sidecar: usize, local: usize },
|
||||
|
||||
/// The proposal signature in invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The blob is invalid and the peer is faulty.
|
||||
ProposerSignatureInvalid,
|
||||
|
||||
/// The proposal_index corresponding to blob.beacon_block_root is not known.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The blob is invalid and the peer is faulty.
|
||||
UnknownValidator(u64),
|
||||
|
||||
/// The provided blob is not from a later slot than its parent.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The blob is invalid and the peer is faulty.
|
||||
BlobIsNotLaterThanParent { blob_slot: Slot, parent_slot: Slot },
|
||||
|
||||
/// The provided blob's parent block is unknown.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// We cannot process the blob without validating its parent, the peer isn't necessarily faulty.
|
||||
BlobParentUnknown(Arc<BlobSidecar<T>>),
|
||||
|
||||
/// A blob has already been seen for the given `(sidecar.block_root, sidecar.index)` tuple
|
||||
/// over gossip or no gossip sources.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer isn't faulty, but we do not forward it over gossip.
|
||||
RepeatBlob {
|
||||
proposer: u64,
|
||||
slot: Slot,
|
||||
index: u64,
|
||||
},
|
||||
}
|
||||
|
||||
impl<T: EthSpec> std::fmt::Display for GossipBlobError<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
GossipBlobError::BlobParentUnknown(blob_sidecar) => {
|
||||
write!(
|
||||
f,
|
||||
"BlobParentUnknown(parent_root:{})",
|
||||
blob_sidecar.block_parent_root
|
||||
)
|
||||
}
|
||||
other => write!(f, "{:?}", other),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> From<BeaconChainError> for GossipBlobError<T> {
|
||||
fn from(e: BeaconChainError) -> Self {
|
||||
GossipBlobError::BeaconChainError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> From<BeaconStateError> for GossipBlobError<T> {
|
||||
fn from(e: BeaconStateError) -> Self {
|
||||
GossipBlobError::BeaconChainError(BeaconChainError::BeaconStateError(e))
|
||||
}
|
||||
}
|
||||
|
||||
pub type GossipVerifiedBlobList<T> = VariableList<
|
||||
GossipVerifiedBlob<T>,
|
||||
<<T as BeaconChainTypes>::EthSpec as EthSpec>::MaxBlobsPerBlock,
|
||||
>;
|
||||
|
||||
/// A wrapper around a `BlobSidecar` that indicates it has been approved for re-gossiping on
|
||||
/// the p2p network.
|
||||
#[derive(Debug)]
|
||||
pub struct GossipVerifiedBlob<T: BeaconChainTypes> {
|
||||
blob: SignedBlobSidecar<T::EthSpec>,
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> GossipVerifiedBlob<T> {
|
||||
pub fn new(
|
||||
blob: SignedBlobSidecar<T::EthSpec>,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Self, GossipBlobError<T::EthSpec>> {
|
||||
let blob_index = blob.message.index;
|
||||
validate_blob_sidecar_for_gossip(blob, blob_index, chain)
|
||||
}
|
||||
/// Construct a `GossipVerifiedBlob` that is assumed to be valid.
|
||||
///
|
||||
/// This should ONLY be used for testing.
|
||||
pub fn __assumed_valid(blob: SignedBlobSidecar<T::EthSpec>) -> Self {
|
||||
Self { blob }
|
||||
}
|
||||
pub fn id(&self) -> BlobIdentifier {
|
||||
self.blob.message.id()
|
||||
}
|
||||
pub fn block_root(&self) -> Hash256 {
|
||||
self.blob.message.block_root
|
||||
}
|
||||
pub fn to_blob(self) -> Arc<BlobSidecar<T::EthSpec>> {
|
||||
self.blob.message
|
||||
}
|
||||
pub fn as_blob(&self) -> &BlobSidecar<T::EthSpec> {
|
||||
&self.blob.message
|
||||
}
|
||||
pub fn signed_blob(&self) -> SignedBlobSidecar<T::EthSpec> {
|
||||
self.blob.clone()
|
||||
}
|
||||
pub fn slot(&self) -> Slot {
|
||||
self.blob.message.slot
|
||||
}
|
||||
pub fn index(&self) -> u64 {
|
||||
self.blob.message.index
|
||||
}
|
||||
pub fn kzg_commitment(&self) -> KzgCommitment {
|
||||
self.blob.message.kzg_commitment
|
||||
}
|
||||
pub fn proposer_index(&self) -> u64 {
|
||||
self.blob.message.proposer_index
|
||||
}
|
||||
}
|
||||
|
||||
pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
|
||||
signed_blob_sidecar: SignedBlobSidecar<T::EthSpec>,
|
||||
subnet: u64,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<GossipVerifiedBlob<T>, GossipBlobError<T::EthSpec>> {
|
||||
let blob_slot = signed_blob_sidecar.message.slot;
|
||||
let blob_index = signed_blob_sidecar.message.index;
|
||||
let block_parent_root = signed_blob_sidecar.message.block_parent_root;
|
||||
let blob_proposer_index = signed_blob_sidecar.message.proposer_index;
|
||||
let block_root = signed_blob_sidecar.message.block_root;
|
||||
let blob_epoch = blob_slot.epoch(T::EthSpec::slots_per_epoch());
|
||||
|
||||
// Verify that the blob_sidecar was received on the correct subnet.
|
||||
if blob_index != subnet {
|
||||
return Err(GossipBlobError::InvalidSubnet {
|
||||
expected: blob_index,
|
||||
received: subnet,
|
||||
});
|
||||
}
|
||||
|
||||
let blob_root = get_blob_root(&signed_blob_sidecar);
|
||||
|
||||
// Verify that the sidecar is not from a future slot.
|
||||
let latest_permissible_slot = chain
|
||||
.slot_clock
|
||||
.now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity())
|
||||
.ok_or(BeaconChainError::UnableToReadSlot)?;
|
||||
if blob_slot > latest_permissible_slot {
|
||||
return Err(GossipBlobError::FutureSlot {
|
||||
message_slot: blob_slot,
|
||||
latest_permissible_slot,
|
||||
});
|
||||
}
|
||||
|
||||
// Verify that the sidecar slot is greater than the latest finalized slot
|
||||
let latest_finalized_slot = chain
|
||||
.head()
|
||||
.finalized_checkpoint()
|
||||
.epoch
|
||||
.start_slot(T::EthSpec::slots_per_epoch());
|
||||
if blob_slot <= latest_finalized_slot {
|
||||
return Err(GossipBlobError::PastFinalizedSlot {
|
||||
blob_slot,
|
||||
finalized_slot: latest_finalized_slot,
|
||||
});
|
||||
}
|
||||
|
||||
// Verify that this is the first blob sidecar received for the (sidecar.block_root, sidecar.index) tuple
|
||||
if chain
|
||||
.observed_blob_sidecars
|
||||
.read()
|
||||
.is_known(&signed_blob_sidecar.message)
|
||||
.map_err(|e| GossipBlobError::BeaconChainError(e.into()))?
|
||||
{
|
||||
return Err(GossipBlobError::RepeatBlob {
|
||||
proposer: blob_proposer_index,
|
||||
slot: blob_slot,
|
||||
index: blob_index,
|
||||
});
|
||||
}
|
||||
|
||||
// We have already verified that the blob is past finalization, so we can
|
||||
// just check fork choice for the block's parent.
|
||||
let Some(parent_block) = chain
|
||||
.canonical_head
|
||||
.fork_choice_read_lock()
|
||||
.get_block(&block_parent_root)
|
||||
else {
|
||||
return Err(GossipBlobError::BlobParentUnknown(
|
||||
signed_blob_sidecar.message,
|
||||
));
|
||||
};
|
||||
|
||||
if parent_block.slot >= blob_slot {
|
||||
return Err(GossipBlobError::BlobIsNotLaterThanParent {
|
||||
blob_slot,
|
||||
parent_slot: parent_block.slot,
|
||||
});
|
||||
}
|
||||
|
||||
// Note: We check that the proposer_index matches against the shuffling first to avoid
|
||||
// signature verification against an invalid proposer_index.
|
||||
let proposer_shuffling_root =
|
||||
if parent_block.slot.epoch(T::EthSpec::slots_per_epoch()) == blob_epoch {
|
||||
parent_block
|
||||
.next_epoch_shuffling_id
|
||||
.shuffling_decision_block
|
||||
} else {
|
||||
parent_block.root
|
||||
};
|
||||
|
||||
let proposer_opt = chain
|
||||
.beacon_proposer_cache
|
||||
.lock()
|
||||
.get_slot::<T::EthSpec>(proposer_shuffling_root, blob_slot);
|
||||
|
||||
let (proposer_index, fork) = if let Some(proposer) = proposer_opt {
|
||||
(proposer.index, proposer.fork)
|
||||
} else {
|
||||
debug!(
|
||||
chain.log,
|
||||
"Proposer shuffling cache miss for blob verification";
|
||||
"block_root" => %block_root,
|
||||
"index" => %blob_index,
|
||||
);
|
||||
if let Some(mut snapshot) = chain
|
||||
.snapshot_cache
|
||||
.try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
||||
.and_then(|snapshot_cache| {
|
||||
snapshot_cache.get_cloned(block_parent_root, CloneConfig::committee_caches_only())
|
||||
})
|
||||
{
|
||||
if snapshot.beacon_state.slot() == blob_slot {
|
||||
debug!(
|
||||
chain.log,
|
||||
"Cloning snapshot cache state for blob verification";
|
||||
"block_root" => %block_root,
|
||||
"index" => %blob_index,
|
||||
);
|
||||
(
|
||||
snapshot
|
||||
.beacon_state
|
||||
.get_beacon_proposer_index(blob_slot, &chain.spec)?,
|
||||
snapshot.beacon_state.fork(),
|
||||
)
|
||||
} else {
|
||||
debug!(
|
||||
chain.log,
|
||||
"Cloning and advancing snapshot cache state for blob verification";
|
||||
"block_root" => %block_root,
|
||||
"index" => %blob_index,
|
||||
);
|
||||
let state =
|
||||
cheap_state_advance_to_obtain_committees::<_, GossipBlobError<T::EthSpec>>(
|
||||
&mut snapshot.beacon_state,
|
||||
Some(snapshot.beacon_block_root),
|
||||
blob_slot,
|
||||
&chain.spec,
|
||||
)?;
|
||||
(
|
||||
state.get_beacon_proposer_index(blob_slot, &chain.spec)?,
|
||||
state.fork(),
|
||||
)
|
||||
}
|
||||
}
|
||||
// Need to advance the state to get the proposer index
|
||||
else {
|
||||
warn!(
|
||||
chain.log,
|
||||
"Snapshot cache miss for blob verification";
|
||||
"block_root" => %block_root,
|
||||
"index" => %blob_index,
|
||||
);
|
||||
|
||||
let parent_block = chain
|
||||
.get_blinded_block(&block_parent_root)
|
||||
.map_err(GossipBlobError::BeaconChainError)?
|
||||
.ok_or_else(|| {
|
||||
GossipBlobError::from(BeaconChainError::MissingBeaconBlock(block_parent_root))
|
||||
})?;
|
||||
|
||||
let mut parent_state = chain
|
||||
.get_state(&parent_block.state_root(), Some(parent_block.slot()))?
|
||||
.ok_or_else(|| {
|
||||
BeaconChainError::DBInconsistent(format!(
|
||||
"Missing state {:?}",
|
||||
parent_block.state_root()
|
||||
))
|
||||
})?;
|
||||
let state = cheap_state_advance_to_obtain_committees::<_, GossipBlobError<T::EthSpec>>(
|
||||
&mut parent_state,
|
||||
Some(parent_block.state_root()),
|
||||
blob_slot,
|
||||
&chain.spec,
|
||||
)?;
|
||||
|
||||
let proposers = state.get_beacon_proposer_indices(&chain.spec)?;
|
||||
let proposer_index = *proposers
|
||||
.get(blob_slot.as_usize() % T::EthSpec::slots_per_epoch() as usize)
|
||||
.ok_or_else(|| BeaconChainError::NoProposerForSlot(blob_slot))?;
|
||||
|
||||
// Prime the proposer shuffling cache with the newly-learned value.
|
||||
chain.beacon_proposer_cache.lock().insert(
|
||||
blob_epoch,
|
||||
proposer_shuffling_root,
|
||||
proposers,
|
||||
state.fork(),
|
||||
)?;
|
||||
(proposer_index, state.fork())
|
||||
}
|
||||
};
|
||||
|
||||
if proposer_index != blob_proposer_index as usize {
|
||||
return Err(GossipBlobError::ProposerIndexMismatch {
|
||||
sidecar: blob_proposer_index as usize,
|
||||
local: proposer_index,
|
||||
});
|
||||
}
|
||||
|
||||
// Signature verification
|
||||
let signature_is_valid = {
|
||||
let pubkey_cache = chain
|
||||
.validator_pubkey_cache
|
||||
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)
|
||||
.map_err(GossipBlobError::BeaconChainError)?;
|
||||
|
||||
let pubkey = pubkey_cache
|
||||
.get(proposer_index)
|
||||
.ok_or_else(|| GossipBlobError::UnknownValidator(proposer_index as u64))?;
|
||||
|
||||
signed_blob_sidecar.verify_signature(
|
||||
Some(blob_root),
|
||||
pubkey,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
)
|
||||
};
|
||||
|
||||
if !signature_is_valid {
|
||||
return Err(GossipBlobError::ProposerSignatureInvalid);
|
||||
}
|
||||
|
||||
// Now the signature is valid, store the proposal so we don't accept another blob sidecar
|
||||
// with the same `BlobIdentifier`.
|
||||
// It's important to double-check that the proposer still hasn't been observed so we don't
|
||||
// have a race-condition when verifying two blocks simultaneously.
|
||||
//
|
||||
// Note: If this BlobSidecar goes on to fail full verification, we do not evict it from the seen_cache
|
||||
// as alternate blob_sidecars for the same identifier can still be retrieved
|
||||
// over rpc. Evicting them from this cache would allow faster propagation over gossip. So we allow
|
||||
// retrieval of potentially valid blocks over rpc, but try to punish the proposer for signing
|
||||
// invalid messages. Issue for more background
|
||||
// https://github.com/ethereum/consensus-specs/issues/3261
|
||||
if chain
|
||||
.observed_blob_sidecars
|
||||
.write()
|
||||
.observe_sidecar(&signed_blob_sidecar.message)
|
||||
.map_err(|e| GossipBlobError::BeaconChainError(e.into()))?
|
||||
{
|
||||
return Err(GossipBlobError::RepeatBlob {
|
||||
proposer: proposer_index as u64,
|
||||
slot: blob_slot,
|
||||
index: blob_index,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(GossipVerifiedBlob {
|
||||
blob: signed_blob_sidecar,
|
||||
})
|
||||
}
|
||||
|
||||
/// Wrapper over a `BlobSidecar` for which we have completed kzg verification.
|
||||
/// i.e. `verify_blob_kzg_proof(blob, commitment, proof) == true`.
|
||||
#[derive(Debug, Derivative, Clone, Encode, Decode)]
|
||||
#[derivative(PartialEq, Eq)]
|
||||
#[ssz(struct_behaviour = "transparent")]
|
||||
pub struct KzgVerifiedBlob<T: EthSpec> {
|
||||
blob: Arc<BlobSidecar<T>>,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> PartialOrd for KzgVerifiedBlob<T> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> Ord for KzgVerifiedBlob<T> {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
self.blob.cmp(&other.blob)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> KzgVerifiedBlob<T> {
|
||||
pub fn to_blob(self) -> Arc<BlobSidecar<T>> {
|
||||
self.blob
|
||||
}
|
||||
pub fn as_blob(&self) -> &BlobSidecar<T> {
|
||||
&self.blob
|
||||
}
|
||||
pub fn clone_blob(&self) -> Arc<BlobSidecar<T>> {
|
||||
self.blob.clone()
|
||||
}
|
||||
pub fn block_root(&self) -> Hash256 {
|
||||
self.blob.block_root
|
||||
}
|
||||
pub fn blob_index(&self) -> u64 {
|
||||
self.blob.index
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl<T: EthSpec> KzgVerifiedBlob<T> {
|
||||
pub fn new(blob: BlobSidecar<T>) -> Self {
|
||||
Self {
|
||||
blob: Arc::new(blob),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Complete kzg verification for a `GossipVerifiedBlob`.
|
||||
///
|
||||
/// Returns an error if the kzg verification check fails.
|
||||
pub fn verify_kzg_for_blob<T: EthSpec>(
|
||||
blob: Arc<BlobSidecar<T>>,
|
||||
kzg: &Kzg<T::Kzg>,
|
||||
) -> Result<KzgVerifiedBlob<T>, AvailabilityCheckError> {
|
||||
let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_SINGLE_TIMES);
|
||||
//TODO(sean) remove clone
|
||||
if validate_blob::<T>(kzg, blob.blob.clone(), blob.kzg_commitment, blob.kzg_proof)
|
||||
.map_err(AvailabilityCheckError::Kzg)?
|
||||
{
|
||||
Ok(KzgVerifiedBlob { blob })
|
||||
} else {
|
||||
Err(AvailabilityCheckError::KzgVerificationFailed)
|
||||
}
|
||||
}
|
||||
|
||||
/// Complete kzg verification for a list of `BlobSidecar`s.
|
||||
/// Returns an error if any of the `BlobSidecar`s fails kzg verification.
|
||||
///
|
||||
/// Note: This function should be preferred over calling `verify_kzg_for_blob`
|
||||
/// in a loop since this function kzg verifies a list of blobs more efficiently.
|
||||
pub fn verify_kzg_for_blob_list<T: EthSpec>(
|
||||
blob_list: &BlobSidecarList<T>,
|
||||
kzg: &Kzg<T::Kzg>,
|
||||
) -> Result<(), AvailabilityCheckError> {
|
||||
let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_BATCH_TIMES);
|
||||
let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list
|
||||
.iter()
|
||||
.map(|blob| (blob.blob.clone(), (blob.kzg_commitment, blob.kzg_proof)))
|
||||
.unzip();
|
||||
if validate_blobs::<T>(
|
||||
kzg,
|
||||
commitments.as_slice(),
|
||||
blobs.as_slice(),
|
||||
proofs.as_slice(),
|
||||
)
|
||||
.map_err(AvailabilityCheckError::Kzg)?
|
||||
{
|
||||
Ok(())
|
||||
} else {
|
||||
Err(AvailabilityCheckError::KzgVerificationFailed)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the canonical root of the given `blob`.
|
||||
///
|
||||
/// Use this function to ensure that we report the blob hashing time Prometheus metric.
|
||||
pub fn get_blob_root<E: EthSpec>(blob: &SignedBlobSidecar<E>) -> Hash256 {
|
||||
let blob_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOB_ROOT);
|
||||
|
||||
let blob_root = blob.message.tree_hash_root();
|
||||
|
||||
metrics::stop_timer(blob_root_timer);
|
||||
|
||||
blob_root
|
||||
}
|
@ -23,6 +23,7 @@
|
||||
//! |
|
||||
//! ▼
|
||||
//! SignedBeaconBlock
|
||||
//! |
|
||||
//! |---------------
|
||||
//! | |
|
||||
//! | ▼
|
||||
@ -47,6 +48,11 @@
|
||||
// returned alongside.
|
||||
#![allow(clippy::result_large_err)]
|
||||
|
||||
use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob};
|
||||
use crate::block_verification_types::{
|
||||
AsBlock, BlockContentsError, BlockImportData, GossipVerifiedBlockContents, RpcBlock,
|
||||
};
|
||||
use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlock};
|
||||
use crate::eth1_finalization_cache::Eth1FinalizationData;
|
||||
use crate::execution_payload::{
|
||||
is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block,
|
||||
@ -64,15 +70,17 @@ use crate::{
|
||||
metrics, BeaconChain, BeaconChainError, BeaconChainTypes,
|
||||
};
|
||||
use derivative::Derivative;
|
||||
use eth2::types::EventKind;
|
||||
use eth2::types::{EventKind, SignedBlockContents};
|
||||
use execution_layer::PayloadStatus;
|
||||
use fork_choice::{AttestationFromBlock, PayloadVerificationStatus};
|
||||
pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus};
|
||||
use parking_lot::RwLockReadGuard;
|
||||
use proto_array::Block as ProtoBlock;
|
||||
use safe_arith::ArithError;
|
||||
use slog::{debug, error, warn, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use ssz::Encode;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use ssz_types::VariableList;
|
||||
use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block};
|
||||
use state_processing::{
|
||||
block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError},
|
||||
@ -82,18 +90,19 @@ use state_processing::{
|
||||
StateProcessingStrategy, VerifyBlockRoot,
|
||||
};
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Debug;
|
||||
use std::fs;
|
||||
use std::io::Write;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp};
|
||||
use store::{Error as DBError, HotStateSummary, KeyValueStore, SignedBlobSidecarList, StoreOp};
|
||||
use task_executor::JoinHandle;
|
||||
use tree_hash::TreeHash;
|
||||
use types::ExecPayload;
|
||||
use types::{
|
||||
BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch,
|
||||
EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes,
|
||||
RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot,
|
||||
BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec,
|
||||
ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch,
|
||||
SignedBeaconBlock, SignedBeaconBlockHeader, Slot,
|
||||
};
|
||||
|
||||
pub const POS_PANDA_BANNER: &str = r#"
|
||||
@ -141,7 +150,7 @@ pub enum BlockError<T: EthSpec> {
|
||||
///
|
||||
/// It's unclear if this block is valid, but it cannot be processed without already knowing
|
||||
/// its parent.
|
||||
ParentUnknown(Arc<SignedBeaconBlock<T>>),
|
||||
ParentUnknown(RpcBlock<T>),
|
||||
/// The block slot is greater than the present slot.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
@ -215,7 +224,7 @@ pub enum BlockError<T: EthSpec> {
|
||||
///
|
||||
/// The block is invalid and the peer is faulty.
|
||||
InvalidSignature,
|
||||
/// The provided block is from an later slot than its parent.
|
||||
/// The provided block is not from a later slot than its parent.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
@ -284,6 +293,27 @@ pub enum BlockError<T: EthSpec> {
|
||||
/// Honest peers shouldn't forward more than 1 equivocating block from the same proposer, so
|
||||
/// we penalise them with a mid-tolerance error.
|
||||
Slashable,
|
||||
/// The block and blob together failed validation.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// This error implies that the block satisfied all block validity conditions except consistency
|
||||
/// with the corresponding blob that we received over gossip/rpc. This is because availability
|
||||
/// checks are always done after all other checks are completed.
|
||||
/// This implies that either:
|
||||
/// 1. The block proposer is faulty
|
||||
/// 2. We received the blob over rpc and it is invalid (inconsistent w.r.t the block).
|
||||
/// 3. It is an internal error
|
||||
/// For all these cases, we cannot penalize the peer that gave us the block.
|
||||
/// TODO: We may need to penalize the peer that gave us a potentially invalid rpc blob.
|
||||
/// https://github.com/sigp/lighthouse/issues/4546
|
||||
AvailabilityCheck(AvailabilityCheckError),
|
||||
}
|
||||
|
||||
impl<T: EthSpec> From<AvailabilityCheckError> for BlockError<T> {
|
||||
fn from(e: AvailabilityCheckError) -> Self {
|
||||
Self::AvailabilityCheck(e)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returned when block validation failed due to some issue verifying
|
||||
@ -459,6 +489,7 @@ impl<T: EthSpec> From<ArithError> for BlockError<T> {
|
||||
}
|
||||
|
||||
/// Stores information about verifying a payload against an execution engine.
|
||||
#[derive(Debug, PartialEq, Clone, Encode, Decode)]
|
||||
pub struct PayloadVerificationOutcome {
|
||||
pub payload_verification_status: PayloadVerificationStatus,
|
||||
pub is_valid_merge_transition_block: bool,
|
||||
@ -528,7 +559,7 @@ fn process_block_slash_info<T: BeaconChainTypes>(
|
||||
/// The given `chain_segment` must contain only blocks from the same epoch, otherwise an error
|
||||
/// will be returned.
|
||||
pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
|
||||
mut chain_segment: Vec<(Hash256, Arc<SignedBeaconBlock<T::EthSpec>>)>,
|
||||
mut chain_segment: Vec<(Hash256, RpcBlock<T::EthSpec>)>,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Vec<SignatureVerifiedBlock<T>>, BlockError<T::EthSpec>> {
|
||||
if chain_segment.is_empty() {
|
||||
@ -545,7 +576,7 @@ pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
|
||||
.map(|(_, block)| block.slot())
|
||||
.unwrap_or_else(|| slot);
|
||||
|
||||
let state = cheap_state_advance_to_obtain_committees(
|
||||
let state = cheap_state_advance_to_obtain_committees::<_, BlockError<T::EthSpec>>(
|
||||
&mut parent.pre_state,
|
||||
parent.beacon_state_root,
|
||||
highest_slot,
|
||||
@ -561,12 +592,16 @@ pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
|
||||
let mut consensus_context =
|
||||
ConsensusContext::new(block.slot()).set_current_block_root(*block_root);
|
||||
|
||||
signature_verifier.include_all_signatures(block, &mut consensus_context)?;
|
||||
signature_verifier.include_all_signatures(block.as_block(), &mut consensus_context)?;
|
||||
|
||||
let maybe_available_block = chain
|
||||
.data_availability_checker
|
||||
.check_rpc_block_availability(block.clone())?;
|
||||
|
||||
// Save the block and its consensus context. The context will have had its proposer index
|
||||
// and attesting indices filled in, which can be used to accelerate later block processing.
|
||||
signature_verified_blocks.push(SignatureVerifiedBlock {
|
||||
block: block.clone(),
|
||||
block: maybe_available_block,
|
||||
block_root: *block_root,
|
||||
parent: None,
|
||||
consensus_context,
|
||||
@ -600,7 +635,7 @@ pub struct GossipVerifiedBlock<T: BeaconChainTypes> {
|
||||
/// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit
|
||||
/// signatures) have been verified.
|
||||
pub struct SignatureVerifiedBlock<T: BeaconChainTypes> {
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
block: MaybeAvailableBlock<T::EthSpec>,
|
||||
block_root: Hash256,
|
||||
parent: Option<PreProcessingSnapshot<T::EthSpec>>,
|
||||
consensus_context: ConsensusContext<T::EthSpec>,
|
||||
@ -617,52 +652,74 @@ type PayloadVerificationHandle<E> =
|
||||
/// - Signatures
|
||||
/// - State root check
|
||||
/// - Per block processing
|
||||
/// - Blobs sidecar has been validated if present
|
||||
///
|
||||
/// Note: a `ExecutionPendingBlock` is not _forever_ valid to be imported, it may later become invalid
|
||||
/// due to finality or some other event. A `ExecutionPendingBlock` should be imported into the
|
||||
/// `BeaconChain` immediately after it is instantiated.
|
||||
pub struct ExecutionPendingBlock<T: BeaconChainTypes> {
|
||||
pub block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
pub block_root: Hash256,
|
||||
pub state: BeaconState<T::EthSpec>,
|
||||
pub parent_block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
|
||||
pub parent_eth1_finalization_data: Eth1FinalizationData,
|
||||
pub confirmed_state_roots: Vec<Hash256>,
|
||||
pub consensus_context: ConsensusContext<T::EthSpec>,
|
||||
pub block: MaybeAvailableBlock<T::EthSpec>,
|
||||
pub import_data: BlockImportData<T::EthSpec>,
|
||||
pub payload_verification_handle: PayloadVerificationHandle<T::EthSpec>,
|
||||
}
|
||||
|
||||
pub trait IntoGossipVerifiedBlock<T: BeaconChainTypes>: Sized {
|
||||
pub trait IntoGossipVerifiedBlockContents<T: BeaconChainTypes>: Sized {
|
||||
fn into_gossip_verified_block(
|
||||
self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>>;
|
||||
fn inner(&self) -> Arc<SignedBeaconBlock<T::EthSpec>>;
|
||||
) -> Result<GossipVerifiedBlockContents<T>, BlockContentsError<T::EthSpec>>;
|
||||
fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec>;
|
||||
fn inner_blobs(&self) -> Option<SignedBlobSidecarList<T::EthSpec>>;
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> IntoGossipVerifiedBlock<T> for GossipVerifiedBlock<T> {
|
||||
impl<T: BeaconChainTypes> IntoGossipVerifiedBlockContents<T> for GossipVerifiedBlockContents<T> {
|
||||
fn into_gossip_verified_block(
|
||||
self,
|
||||
_chain: &BeaconChain<T>,
|
||||
) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> {
|
||||
) -> Result<GossipVerifiedBlockContents<T>, BlockContentsError<T::EthSpec>> {
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
fn inner(&self) -> Arc<SignedBeaconBlock<T::EthSpec>> {
|
||||
self.block.clone()
|
||||
fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||
self.0.block.as_block()
|
||||
}
|
||||
fn inner_blobs(&self) -> Option<SignedBlobSidecarList<T::EthSpec>> {
|
||||
self.1.as_ref().map(|blobs| {
|
||||
VariableList::from(
|
||||
blobs
|
||||
.into_iter()
|
||||
.map(GossipVerifiedBlob::signed_blob)
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> IntoGossipVerifiedBlock<T> for Arc<SignedBeaconBlock<T::EthSpec>> {
|
||||
impl<T: BeaconChainTypes> IntoGossipVerifiedBlockContents<T> for SignedBlockContents<T::EthSpec> {
|
||||
fn into_gossip_verified_block(
|
||||
self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> {
|
||||
GossipVerifiedBlock::new(self, chain)
|
||||
) -> Result<GossipVerifiedBlockContents<T>, BlockContentsError<T::EthSpec>> {
|
||||
let (block, blobs) = self.deconstruct();
|
||||
let gossip_verified_block = GossipVerifiedBlock::new(Arc::new(block), chain)?;
|
||||
let gossip_verified_blobs = blobs
|
||||
.map(|blobs| {
|
||||
Ok::<_, GossipBlobError<T::EthSpec>>(VariableList::from(
|
||||
blobs
|
||||
.into_iter()
|
||||
.map(|blob| GossipVerifiedBlob::new(blob, chain))
|
||||
.collect::<Result<Vec<_>, GossipBlobError<T::EthSpec>>>()?,
|
||||
))
|
||||
})
|
||||
.transpose()?;
|
||||
Ok((gossip_verified_block, gossip_verified_blobs))
|
||||
}
|
||||
|
||||
fn inner(&self) -> Arc<SignedBeaconBlock<T::EthSpec>> {
|
||||
self.clone()
|
||||
fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||
self.signed_block()
|
||||
}
|
||||
|
||||
fn inner_blobs(&self) -> Option<SignedBlobSidecarList<T::EthSpec>> {
|
||||
self.blobs_cloned()
|
||||
}
|
||||
}
|
||||
|
||||
@ -762,11 +819,15 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
||||
// Do not process a block that doesn't descend from the finalized root.
|
||||
//
|
||||
// We check this *before* we load the parent so that we can return a more detailed error.
|
||||
check_block_is_finalized_checkpoint_or_descendant(chain, &fork_choice_read_lock, &block)?;
|
||||
let block = check_block_is_finalized_checkpoint_or_descendant(
|
||||
chain,
|
||||
&fork_choice_read_lock,
|
||||
block,
|
||||
)?;
|
||||
drop(fork_choice_read_lock);
|
||||
|
||||
let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch());
|
||||
let (parent_block, block) = verify_parent_block_is_known(chain, block)?;
|
||||
let (parent_block, block) = verify_parent_block_is_known(block_root, chain, block)?;
|
||||
|
||||
// Track the number of skip slots between the block and its parent.
|
||||
metrics::set_gauge(
|
||||
@ -825,7 +886,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
||||
);
|
||||
|
||||
// The state produced is only valid for determining proposer/attester shuffling indices.
|
||||
let state = cheap_state_advance_to_obtain_committees(
|
||||
let state = cheap_state_advance_to_obtain_committees::<_, BlockError<T::EthSpec>>(
|
||||
&mut parent.pre_state,
|
||||
parent.beacon_state_root,
|
||||
block.slot(),
|
||||
@ -877,7 +938,9 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
||||
.observe_proposal(block_root, block.message())
|
||||
.map_err(|e| BlockError::BeaconChainError(e.into()))?
|
||||
{
|
||||
SeenBlock::Slashable => return Err(BlockError::Slashable),
|
||||
SeenBlock::Slashable => {
|
||||
return Err(BlockError::Slashable);
|
||||
}
|
||||
SeenBlock::Duplicate => return Err(BlockError::BlockIsAlreadyKnown),
|
||||
SeenBlock::UniqueNonSlashable => {}
|
||||
};
|
||||
@ -895,7 +958,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
||||
// Having checked the proposer index and the block root we can cache them.
|
||||
let consensus_context = ConsensusContext::new(block.slot())
|
||||
.set_current_block_root(block_root)
|
||||
.set_proposer_index(block.message().proposer_index());
|
||||
.set_proposer_index(block.as_block().message().proposer_index());
|
||||
|
||||
Ok(Self {
|
||||
block,
|
||||
@ -928,7 +991,7 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for GossipVerifiedBlock<T
|
||||
}
|
||||
|
||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||
&self.block
|
||||
self.block.as_block()
|
||||
}
|
||||
}
|
||||
|
||||
@ -938,12 +1001,13 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
||||
///
|
||||
/// Returns an error if the block is invalid, or if the block was unable to be verified.
|
||||
pub fn new(
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
block: MaybeAvailableBlock<T::EthSpec>,
|
||||
block_root: Hash256,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Self, BlockError<T::EthSpec>> {
|
||||
// Ensure the block is the correct structure for the fork at `block.slot()`.
|
||||
block
|
||||
.as_block()
|
||||
.fork_name(&chain.spec)
|
||||
.map_err(BlockError::InconsistentFork)?;
|
||||
|
||||
@ -952,7 +1016,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
||||
|
||||
let (mut parent, block) = load_parent(block_root, block, chain)?;
|
||||
|
||||
let state = cheap_state_advance_to_obtain_committees(
|
||||
let state = cheap_state_advance_to_obtain_committees::<_, BlockError<T::EthSpec>>(
|
||||
&mut parent.pre_state,
|
||||
parent.beacon_state_root,
|
||||
block.slot(),
|
||||
@ -966,7 +1030,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
||||
let mut consensus_context =
|
||||
ConsensusContext::new(block.slot()).set_current_block_root(block_root);
|
||||
|
||||
signature_verifier.include_all_signatures(&block, &mut consensus_context)?;
|
||||
signature_verifier.include_all_signatures(block.as_block(), &mut consensus_context)?;
|
||||
|
||||
if signature_verifier.verify().is_ok() {
|
||||
Ok(Self {
|
||||
@ -982,7 +1046,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
||||
|
||||
/// As for `new` above but producing `BlockSlashInfo`.
|
||||
pub fn check_slashable(
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
block: MaybeAvailableBlock<T::EthSpec>,
|
||||
block_root: Hash256,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Self, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
||||
@ -1002,7 +1066,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
||||
load_parent(from.block_root, from.block, chain)?
|
||||
};
|
||||
|
||||
let state = cheap_state_advance_to_obtain_committees(
|
||||
let state = cheap_state_advance_to_obtain_committees::<_, BlockError<T::EthSpec>>(
|
||||
&mut parent.pre_state,
|
||||
parent.beacon_state_root,
|
||||
block.slot(),
|
||||
@ -1017,11 +1081,14 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
||||
// signature.
|
||||
let mut consensus_context = from.consensus_context;
|
||||
signature_verifier
|
||||
.include_all_signatures_except_proposal(&block, &mut consensus_context)?;
|
||||
.include_all_signatures_except_proposal(block.as_ref(), &mut consensus_context)?;
|
||||
|
||||
if signature_verifier.verify().is_ok() {
|
||||
Ok(Self {
|
||||
block,
|
||||
block: MaybeAvailableBlock::AvailabilityPending {
|
||||
block_root: from.block_root,
|
||||
block,
|
||||
},
|
||||
block_root: from.block_root,
|
||||
parent: Some(parent),
|
||||
consensus_context,
|
||||
@ -1074,7 +1141,7 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBloc
|
||||
}
|
||||
|
||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||
&self.block
|
||||
self.block.as_block()
|
||||
}
|
||||
}
|
||||
|
||||
@ -1090,8 +1157,19 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock
|
||||
// Perform an early check to prevent wasting time on irrelevant blocks.
|
||||
let block_root = check_block_relevancy(&self, block_root, chain)
|
||||
.map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?;
|
||||
|
||||
SignatureVerifiedBlock::check_slashable(self, block_root, chain)?
|
||||
let maybe_available = chain
|
||||
.data_availability_checker
|
||||
.check_rpc_block_availability(RpcBlock::new_without_blobs(
|
||||
Some(block_root),
|
||||
self.clone(),
|
||||
))
|
||||
.map_err(|e| {
|
||||
BlockSlashInfo::SignatureNotChecked(
|
||||
self.signed_block_header(),
|
||||
BlockError::AvailabilityCheck(e),
|
||||
)
|
||||
})?;
|
||||
SignatureVerifiedBlock::check_slashable(maybe_available, block_root, chain)?
|
||||
.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer)
|
||||
}
|
||||
|
||||
@ -1100,6 +1178,36 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for RpcBlock<T::EthSpec> {
|
||||
/// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock`
|
||||
/// and then using that implementation of `IntoExecutionPendingBlock` to complete verification.
|
||||
fn into_execution_pending_block_slashable(
|
||||
self,
|
||||
block_root: Hash256,
|
||||
chain: &Arc<BeaconChain<T>>,
|
||||
notify_execution_layer: NotifyExecutionLayer,
|
||||
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
||||
// Perform an early check to prevent wasting time on irrelevant blocks.
|
||||
let block_root = check_block_relevancy(self.as_block(), block_root, chain)
|
||||
.map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?;
|
||||
let maybe_available = chain
|
||||
.data_availability_checker
|
||||
.check_rpc_block_availability(self.clone())
|
||||
.map_err(|e| {
|
||||
BlockSlashInfo::SignatureNotChecked(
|
||||
self.signed_block_header(),
|
||||
BlockError::AvailabilityCheck(e),
|
||||
)
|
||||
})?;
|
||||
SignatureVerifiedBlock::check_slashable(maybe_available, block_root, chain)?
|
||||
.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer)
|
||||
}
|
||||
|
||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||
self.as_block()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
/// Instantiates `Self`, a wrapper that indicates that the given `block` is fully valid. See
|
||||
/// the struct-level documentation for more information.
|
||||
@ -1109,7 +1217,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
///
|
||||
/// Returns an error if the block is invalid, or if the block was unable to be verified.
|
||||
pub fn from_signature_verified_components(
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
block: MaybeAvailableBlock<T::EthSpec>,
|
||||
block_root: Hash256,
|
||||
parent: PreProcessingSnapshot<T::EthSpec>,
|
||||
mut consensus_context: ConsensusContext<T::EthSpec>,
|
||||
@ -1145,14 +1253,14 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
// because it will revert finalization. Note that the finalized block is stored in fork
|
||||
// choice, so we will not reject any child of the finalized block (this is relevant during
|
||||
// genesis).
|
||||
return Err(BlockError::ParentUnknown(block));
|
||||
return Err(BlockError::ParentUnknown(block.into_rpc_block()));
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform cursory checks to see if the block is even worth processing.
|
||||
*/
|
||||
|
||||
check_block_relevancy(&block, block_root, chain)?;
|
||||
check_block_relevancy(block.as_block(), block_root, chain)?;
|
||||
|
||||
// Define a future that will verify the execution payload with an execution engine.
|
||||
//
|
||||
@ -1160,7 +1268,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
// with the payload verification.
|
||||
let payload_notifier = PayloadNotifier::new(
|
||||
chain.clone(),
|
||||
block.clone(),
|
||||
block.block_cloned(),
|
||||
&parent.pre_state,
|
||||
notify_execution_layer,
|
||||
)?;
|
||||
@ -1310,7 +1418,9 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
StoreOp::PutStateTemporaryFlag(state_root),
|
||||
]
|
||||
};
|
||||
chain.store.do_atomically(state_batch)?;
|
||||
chain
|
||||
.store
|
||||
.do_atomically_with_block_and_blobs_cache(state_batch)?;
|
||||
drop(txn_lock);
|
||||
|
||||
confirmed_state_roots.push(state_root);
|
||||
@ -1401,13 +1511,13 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
&state,
|
||||
&chain.log,
|
||||
);
|
||||
write_block(&block, block_root, &chain.log);
|
||||
write_block(block.as_block(), block_root, &chain.log);
|
||||
|
||||
let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE);
|
||||
|
||||
if let Err(err) = per_block_processing(
|
||||
&mut state,
|
||||
&block,
|
||||
block.as_block(),
|
||||
// Signatures were verified earlier in this function.
|
||||
BlockSignatureStrategy::NoVerification,
|
||||
StateProcessingStrategy::Accurate,
|
||||
@ -1491,12 +1601,14 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
|
||||
Ok(Self {
|
||||
block,
|
||||
block_root,
|
||||
state,
|
||||
parent_block: parent.beacon_block,
|
||||
parent_eth1_finalization_data,
|
||||
confirmed_state_roots,
|
||||
consensus_context,
|
||||
import_data: BlockImportData {
|
||||
block_root,
|
||||
state,
|
||||
parent_block: parent.beacon_block,
|
||||
parent_eth1_finalization_data,
|
||||
confirmed_state_roots,
|
||||
consensus_context,
|
||||
},
|
||||
payload_verification_handle,
|
||||
})
|
||||
}
|
||||
@ -1551,13 +1663,16 @@ fn check_block_against_finalized_slot<T: BeaconChainTypes>(
|
||||
/// ## Warning
|
||||
///
|
||||
/// Taking a lock on the `chain.canonical_head.fork_choice` might cause a deadlock here.
|
||||
pub fn check_block_is_finalized_checkpoint_or_descendant<T: BeaconChainTypes>(
|
||||
pub fn check_block_is_finalized_checkpoint_or_descendant<
|
||||
T: BeaconChainTypes,
|
||||
B: AsBlock<T::EthSpec>,
|
||||
>(
|
||||
chain: &BeaconChain<T>,
|
||||
fork_choice: &BeaconForkChoice<T>,
|
||||
block: &Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
) -> Result<(), BlockError<T::EthSpec>> {
|
||||
block: B,
|
||||
) -> Result<B, BlockError<T::EthSpec>> {
|
||||
if fork_choice.is_finalized_checkpoint_or_descendant(block.parent_root()) {
|
||||
Ok(())
|
||||
Ok(block)
|
||||
} else {
|
||||
// If fork choice does *not* consider the parent to be a descendant of the finalized block,
|
||||
// then there are two more cases:
|
||||
@ -1576,7 +1691,7 @@ pub fn check_block_is_finalized_checkpoint_or_descendant<T: BeaconChainTypes>(
|
||||
block_parent_root: block.parent_root(),
|
||||
})
|
||||
} else {
|
||||
Err(BlockError::ParentUnknown(block.clone()))
|
||||
Err(BlockError::ParentUnknown(block.into_rpc_block()))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1647,17 +1762,21 @@ pub fn get_block_root<E: EthSpec>(block: &SignedBeaconBlock<E>) -> Hash256 {
|
||||
/// fork choice.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn verify_parent_block_is_known<T: BeaconChainTypes>(
|
||||
block_root: Hash256,
|
||||
chain: &BeaconChain<T>,
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
) -> Result<(ProtoBlock, Arc<SignedBeaconBlock<T::EthSpec>>), BlockError<T::EthSpec>> {
|
||||
if let Some(proto_block) = chain
|
||||
.canonical_head
|
||||
.fork_choice_read_lock()
|
||||
.get_block(&block.message().parent_root())
|
||||
.get_block(&block.parent_root())
|
||||
{
|
||||
Ok((proto_block, block))
|
||||
} else {
|
||||
Err(BlockError::ParentUnknown(block))
|
||||
Err(BlockError::ParentUnknown(RpcBlock::new_without_blobs(
|
||||
Some(block_root),
|
||||
block,
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
@ -1666,17 +1785,11 @@ fn verify_parent_block_is_known<T: BeaconChainTypes>(
|
||||
/// Returns `Err(BlockError::ParentUnknown)` if the parent is not found, or if an error occurs
|
||||
/// whilst attempting the operation.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn load_parent<T: BeaconChainTypes>(
|
||||
fn load_parent<T: BeaconChainTypes, B: AsBlock<T::EthSpec>>(
|
||||
block_root: Hash256,
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
block: B,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<
|
||||
(
|
||||
PreProcessingSnapshot<T::EthSpec>,
|
||||
Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
),
|
||||
BlockError<T::EthSpec>,
|
||||
> {
|
||||
) -> Result<(PreProcessingSnapshot<T::EthSpec>, B), BlockError<T::EthSpec>> {
|
||||
let spec = &chain.spec;
|
||||
|
||||
// Reject any block if its parent is not known to fork choice.
|
||||
@ -1694,7 +1807,7 @@ fn load_parent<T: BeaconChainTypes>(
|
||||
.fork_choice_read_lock()
|
||||
.contains_block(&block.parent_root())
|
||||
{
|
||||
return Err(BlockError::ParentUnknown(block));
|
||||
return Err(BlockError::ParentUnknown(block.into_rpc_block()));
|
||||
}
|
||||
|
||||
let block_delay = chain
|
||||
@ -1794,6 +1907,30 @@ fn load_parent<T: BeaconChainTypes>(
|
||||
result
|
||||
}
|
||||
|
||||
/// This trait is used to unify `BlockError` and `BlobError` so
|
||||
/// `cheap_state_advance_to_obtain_committees` can be re-used in gossip blob validation.
|
||||
pub trait CheapStateAdvanceError: From<BeaconStateError> + From<BeaconChainError> + Debug {
|
||||
fn not_later_than_parent_error(block_slot: Slot, state_slot: Slot) -> Self;
|
||||
}
|
||||
|
||||
impl<E: EthSpec> CheapStateAdvanceError for BlockError<E> {
|
||||
fn not_later_than_parent_error(block_slot: Slot, parent_slot: Slot) -> Self {
|
||||
BlockError::BlockIsNotLaterThanParent {
|
||||
block_slot,
|
||||
parent_slot,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> CheapStateAdvanceError for GossipBlobError<E> {
|
||||
fn not_later_than_parent_error(blob_slot: Slot, parent_slot: Slot) -> Self {
|
||||
GossipBlobError::BlobIsNotLaterThanParent {
|
||||
blob_slot,
|
||||
parent_slot,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Performs a cheap (time-efficient) state advancement so the committees and proposer shuffling for
|
||||
/// `slot` can be obtained from `state`.
|
||||
///
|
||||
@ -1805,12 +1942,12 @@ fn load_parent<T: BeaconChainTypes>(
|
||||
/// and `Cow::Borrowed(state)` will be returned. Otherwise, the state will be cloned, cheaply
|
||||
/// advanced and then returned as a `Cow::Owned`. The end result is that the given `state` is never
|
||||
/// mutated to be invalid (in fact, it is never changed beyond a simple committee cache build).
|
||||
fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
|
||||
pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: CheapStateAdvanceError>(
|
||||
state: &'a mut BeaconState<E>,
|
||||
state_root_opt: Option<Hash256>,
|
||||
block_slot: Slot,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Cow<'a, BeaconState<E>>, BlockError<E>> {
|
||||
) -> Result<Cow<'a, BeaconState<E>>, Err> {
|
||||
let block_epoch = block_slot.epoch(E::slots_per_epoch());
|
||||
|
||||
if state.current_epoch() == block_epoch {
|
||||
@ -1821,10 +1958,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
|
||||
|
||||
Ok(Cow::Borrowed(state))
|
||||
} else if state.slot() > block_slot {
|
||||
Err(BlockError::BlockIsNotLaterThanParent {
|
||||
block_slot,
|
||||
parent_slot: state.slot(),
|
||||
})
|
||||
Err(Err::not_later_than_parent_error(block_slot, state.slot()))
|
||||
} else {
|
||||
let mut state = state.clone_with(CloneConfig::committee_caches_only());
|
||||
let target_slot = block_epoch.start_slot(E::slots_per_epoch());
|
||||
@ -1832,7 +1966,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
|
||||
// Advance the state into the same epoch as the block. Use the "partial" method since state
|
||||
// roots are not important for proposer/attester shuffling.
|
||||
partial_state_advance(&mut state, state_root_opt, target_slot, spec)
|
||||
.map_err(|e| BlockError::BeaconChainError(BeaconChainError::from(e)))?;
|
||||
.map_err(BeaconChainError::from)?;
|
||||
|
||||
state.build_committee_cache(RelativeEpoch::Previous, spec)?;
|
||||
state.build_committee_cache(RelativeEpoch::Current, spec)?;
|
||||
|
522
beacon_node/beacon_chain/src/block_verification_types.rs
Normal file
522
beacon_node/beacon_chain/src/block_verification_types.rs
Normal file
@ -0,0 +1,522 @@
|
||||
use crate::blob_verification::{GossipBlobError, GossipVerifiedBlobList};
|
||||
use crate::block_verification::BlockError;
|
||||
use crate::data_availability_checker::AvailabilityCheckError;
|
||||
pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock};
|
||||
use crate::eth1_finalization_cache::Eth1FinalizationData;
|
||||
use crate::{get_block_root, GossipVerifiedBlock, PayloadVerificationOutcome};
|
||||
use derivative::Derivative;
|
||||
use ssz_types::VariableList;
|
||||
use state_processing::ConsensusContext;
|
||||
use std::sync::Arc;
|
||||
use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList};
|
||||
use types::{
|
||||
BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, Epoch, EthSpec, Hash256,
|
||||
SignedBeaconBlock, SignedBeaconBlockHeader, Slot,
|
||||
};
|
||||
|
||||
/// A block that has been received over RPC. It has 2 internal variants:
|
||||
///
|
||||
/// 1. `BlockAndBlobs`: A fully available post deneb block with all the blobs available. This variant
|
||||
/// is only constructed after making consistency checks between blocks and blobs.
|
||||
/// Hence, it is fully self contained w.r.t verification. i.e. this block has all the required
|
||||
/// data to get verified and imported into fork choice.
|
||||
///
|
||||
/// 2. `Block`: This can be a fully available pre-deneb block **or** a post-deneb block that may or may
|
||||
/// not require blobs to be considered fully available.
|
||||
///
|
||||
/// Note: We make a distinction over blocks received over gossip because
|
||||
/// in a post-deneb world, the blobs corresponding to a given block that are received
|
||||
/// over rpc do not contain the proposer signature for dos resistance.
|
||||
#[derive(Debug, Clone, Derivative)]
|
||||
#[derivative(Hash(bound = "E: EthSpec"))]
|
||||
pub struct RpcBlock<E: EthSpec> {
|
||||
block_root: Hash256,
|
||||
block: RpcBlockInner<E>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> RpcBlock<E> {
|
||||
pub fn block_root(&self) -> Hash256 {
|
||||
self.block_root
|
||||
}
|
||||
}
|
||||
|
||||
/// Note: This variant is intentionally private because we want to safely construct the
|
||||
/// internal variants after applying consistency checks to ensure that the block and blobs
|
||||
/// are consistent with respect to each other.
|
||||
#[derive(Debug, Clone, Derivative)]
|
||||
#[derivative(Hash(bound = "E: EthSpec"))]
|
||||
enum RpcBlockInner<E: EthSpec> {
|
||||
/// Single block lookup response. This should potentially hit the data availability cache.
|
||||
Block(Arc<SignedBeaconBlock<E>>),
|
||||
/// This variant is used with parent lookups and by-range responses. It should have all blobs
|
||||
/// ordered, all block roots matching, and the correct number of blobs for this block.
|
||||
BlockAndBlobs(Arc<SignedBeaconBlock<E>>, BlobSidecarList<E>),
|
||||
}
|
||||
|
||||
impl<E: EthSpec> RpcBlock<E> {
|
||||
/// Constructs a `Block` variant.
|
||||
pub fn new_without_blobs(
|
||||
block_root: Option<Hash256>,
|
||||
block: Arc<SignedBeaconBlock<E>>,
|
||||
) -> Self {
|
||||
let block_root = block_root.unwrap_or_else(|| get_block_root(&block));
|
||||
|
||||
Self {
|
||||
block_root,
|
||||
block: RpcBlockInner::Block(block),
|
||||
}
|
||||
}
|
||||
|
||||
/// Constructs a new `BlockAndBlobs` variant after making consistency
|
||||
/// checks between the provided blocks and blobs.
|
||||
pub fn new(
|
||||
block_root: Option<Hash256>,
|
||||
block: Arc<SignedBeaconBlock<E>>,
|
||||
blobs: Option<BlobSidecarList<E>>,
|
||||
) -> Result<Self, AvailabilityCheckError> {
|
||||
let block_root = block_root.unwrap_or_else(|| get_block_root(&block));
|
||||
|
||||
if let (Some(blobs), Ok(block_commitments)) = (
|
||||
blobs.as_ref(),
|
||||
block.message().body().blob_kzg_commitments(),
|
||||
) {
|
||||
if blobs.len() != block_commitments.len() {
|
||||
return Err(AvailabilityCheckError::MissingBlobs);
|
||||
}
|
||||
for (blob, &block_commitment) in blobs.iter().zip(block_commitments.iter()) {
|
||||
let blob_block_root = blob.block_root;
|
||||
if blob_block_root != block_root {
|
||||
return Err(AvailabilityCheckError::InconsistentBlobBlockRoots {
|
||||
block_root,
|
||||
blob_block_root,
|
||||
});
|
||||
}
|
||||
let blob_commitment = blob.kzg_commitment;
|
||||
if blob_commitment != block_commitment {
|
||||
return Err(AvailabilityCheckError::KzgCommitmentMismatch {
|
||||
block_commitment,
|
||||
blob_commitment,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
let inner = match blobs {
|
||||
Some(blobs) => RpcBlockInner::BlockAndBlobs(block, blobs),
|
||||
None => RpcBlockInner::Block(block),
|
||||
};
|
||||
Ok(Self {
|
||||
block_root,
|
||||
block: inner,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new_from_fixed(
|
||||
block_root: Hash256,
|
||||
block: Arc<SignedBeaconBlock<E>>,
|
||||
blobs: FixedBlobSidecarList<E>,
|
||||
) -> Result<Self, AvailabilityCheckError> {
|
||||
let filtered = blobs
|
||||
.into_iter()
|
||||
.filter_map(|b| b.clone())
|
||||
.collect::<Vec<_>>();
|
||||
let blobs = if filtered.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(VariableList::from(filtered))
|
||||
};
|
||||
Self::new(Some(block_root), block, blobs)
|
||||
}
|
||||
|
||||
pub fn deconstruct(
|
||||
self,
|
||||
) -> (
|
||||
Hash256,
|
||||
Arc<SignedBeaconBlock<E>>,
|
||||
Option<BlobSidecarList<E>>,
|
||||
) {
|
||||
let block_root = self.block_root();
|
||||
match self.block {
|
||||
RpcBlockInner::Block(block) => (block_root, block, None),
|
||||
RpcBlockInner::BlockAndBlobs(block, blobs) => (block_root, block, Some(blobs)),
|
||||
}
|
||||
}
|
||||
pub fn n_blobs(&self) -> usize {
|
||||
match &self.block {
|
||||
RpcBlockInner::Block(_) => 0,
|
||||
RpcBlockInner::BlockAndBlobs(_, blobs) => blobs.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A block that has gone through all pre-deneb block processing checks including block processing
|
||||
/// and execution by an EL client. This block hasn't necessarily completed data availability checks.
|
||||
///
|
||||
///
|
||||
/// It contains 2 variants:
|
||||
/// 1. `Available`: This block has been executed and also contains all data to consider it a
|
||||
/// fully available block. i.e. for post-deneb, this implies that this contains all the
|
||||
/// required blobs.
|
||||
/// 2. `AvailabilityPending`: This block hasn't received all required blobs to consider it a
|
||||
/// fully available block.
|
||||
pub enum ExecutedBlock<E: EthSpec> {
|
||||
Available(AvailableExecutedBlock<E>),
|
||||
AvailabilityPending(AvailabilityPendingExecutedBlock<E>),
|
||||
}
|
||||
|
||||
impl<E: EthSpec> ExecutedBlock<E> {
|
||||
pub fn new(
|
||||
block: MaybeAvailableBlock<E>,
|
||||
import_data: BlockImportData<E>,
|
||||
payload_verification_outcome: PayloadVerificationOutcome,
|
||||
) -> Self {
|
||||
match block {
|
||||
MaybeAvailableBlock::Available(available_block) => {
|
||||
Self::Available(AvailableExecutedBlock::new(
|
||||
available_block,
|
||||
import_data,
|
||||
payload_verification_outcome,
|
||||
))
|
||||
}
|
||||
MaybeAvailableBlock::AvailabilityPending {
|
||||
block_root: _,
|
||||
block: pending_block,
|
||||
} => Self::AvailabilityPending(AvailabilityPendingExecutedBlock::new(
|
||||
pending_block,
|
||||
import_data,
|
||||
payload_verification_outcome,
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_block(&self) -> &SignedBeaconBlock<E> {
|
||||
match self {
|
||||
Self::Available(available) => available.block.block(),
|
||||
Self::AvailabilityPending(pending) => &pending.block,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block_root(&self) -> Hash256 {
|
||||
match self {
|
||||
ExecutedBlock::AvailabilityPending(pending) => pending.import_data.block_root,
|
||||
ExecutedBlock::Available(available) => available.import_data.block_root,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A block that has completed all pre-deneb block processing checks including verification
|
||||
/// by an EL client **and** has all requisite blob data to be imported into fork choice.
|
||||
#[derive(PartialEq)]
|
||||
pub struct AvailableExecutedBlock<E: EthSpec> {
|
||||
pub block: AvailableBlock<E>,
|
||||
pub import_data: BlockImportData<E>,
|
||||
pub payload_verification_outcome: PayloadVerificationOutcome,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> AvailableExecutedBlock<E> {
|
||||
pub fn new(
|
||||
block: AvailableBlock<E>,
|
||||
import_data: BlockImportData<E>,
|
||||
payload_verification_outcome: PayloadVerificationOutcome,
|
||||
) -> Self {
|
||||
Self {
|
||||
block,
|
||||
import_data,
|
||||
payload_verification_outcome,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_all_blob_ids(&self) -> Vec<BlobIdentifier> {
|
||||
let num_blobs_expected = self
|
||||
.block
|
||||
.message()
|
||||
.body()
|
||||
.blob_kzg_commitments()
|
||||
.map_or(0, |commitments| commitments.len());
|
||||
let mut blob_ids = Vec::with_capacity(num_blobs_expected);
|
||||
for i in 0..num_blobs_expected {
|
||||
blob_ids.push(BlobIdentifier {
|
||||
block_root: self.import_data.block_root,
|
||||
index: i as u64,
|
||||
});
|
||||
}
|
||||
blob_ids
|
||||
}
|
||||
}
|
||||
|
||||
/// A block that has completed all pre-deneb block processing checks, verification
|
||||
/// by an EL client but does not have all requisite blob data to get imported into
|
||||
/// fork choice.
|
||||
pub struct AvailabilityPendingExecutedBlock<E: EthSpec> {
|
||||
pub block: Arc<SignedBeaconBlock<E>>,
|
||||
pub import_data: BlockImportData<E>,
|
||||
pub payload_verification_outcome: PayloadVerificationOutcome,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> AvailabilityPendingExecutedBlock<E> {
|
||||
pub fn new(
|
||||
block: Arc<SignedBeaconBlock<E>>,
|
||||
import_data: BlockImportData<E>,
|
||||
payload_verification_outcome: PayloadVerificationOutcome,
|
||||
) -> Self {
|
||||
Self {
|
||||
block,
|
||||
import_data,
|
||||
payload_verification_outcome,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_block(&self) -> &SignedBeaconBlock<E> {
|
||||
&self.block
|
||||
}
|
||||
|
||||
pub fn num_blobs_expected(&self) -> usize {
|
||||
self.block
|
||||
.message()
|
||||
.body()
|
||||
.blob_kzg_commitments()
|
||||
.map_or(0, |commitments| commitments.len())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct BlockImportData<E: EthSpec> {
|
||||
pub block_root: Hash256,
|
||||
pub state: BeaconState<E>,
|
||||
pub parent_block: SignedBeaconBlock<E, BlindedPayload<E>>,
|
||||
pub parent_eth1_finalization_data: Eth1FinalizationData,
|
||||
pub confirmed_state_roots: Vec<Hash256>,
|
||||
pub consensus_context: ConsensusContext<E>,
|
||||
}
|
||||
|
||||
pub type GossipVerifiedBlockContents<T> =
|
||||
(GossipVerifiedBlock<T>, Option<GossipVerifiedBlobList<T>>);
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BlockContentsError<T: EthSpec> {
|
||||
BlockError(BlockError<T>),
|
||||
BlobError(GossipBlobError<T>),
|
||||
}
|
||||
|
||||
impl<T: EthSpec> From<BlockError<T>> for BlockContentsError<T> {
|
||||
fn from(value: BlockError<T>) -> Self {
|
||||
Self::BlockError(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> From<GossipBlobError<T>> for BlockContentsError<T> {
|
||||
fn from(value: GossipBlobError<T>) -> Self {
|
||||
Self::BlobError(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> std::fmt::Display for BlockContentsError<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
BlockContentsError::BlockError(err) => {
|
||||
write!(f, "BlockError({})", err)
|
||||
}
|
||||
BlockContentsError::BlobError(err) => {
|
||||
write!(f, "BlobError({})", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for common block operations.
|
||||
pub trait AsBlock<E: EthSpec> {
|
||||
fn slot(&self) -> Slot;
|
||||
fn epoch(&self) -> Epoch;
|
||||
fn parent_root(&self) -> Hash256;
|
||||
fn state_root(&self) -> Hash256;
|
||||
fn signed_block_header(&self) -> SignedBeaconBlockHeader;
|
||||
fn message(&self) -> BeaconBlockRef<E>;
|
||||
fn as_block(&self) -> &SignedBeaconBlock<E>;
|
||||
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>>;
|
||||
fn canonical_root(&self) -> Hash256;
|
||||
fn into_rpc_block(self) -> RpcBlock<E>;
|
||||
}
|
||||
|
||||
impl<E: EthSpec> AsBlock<E> for Arc<SignedBeaconBlock<E>> {
|
||||
fn slot(&self) -> Slot {
|
||||
SignedBeaconBlock::slot(self)
|
||||
}
|
||||
|
||||
fn epoch(&self) -> Epoch {
|
||||
SignedBeaconBlock::epoch(self)
|
||||
}
|
||||
|
||||
fn parent_root(&self) -> Hash256 {
|
||||
SignedBeaconBlock::parent_root(self)
|
||||
}
|
||||
|
||||
fn state_root(&self) -> Hash256 {
|
||||
SignedBeaconBlock::state_root(self)
|
||||
}
|
||||
|
||||
fn signed_block_header(&self) -> SignedBeaconBlockHeader {
|
||||
SignedBeaconBlock::signed_block_header(self)
|
||||
}
|
||||
|
||||
fn message(&self) -> BeaconBlockRef<E> {
|
||||
SignedBeaconBlock::message(self)
|
||||
}
|
||||
|
||||
fn as_block(&self) -> &SignedBeaconBlock<E> {
|
||||
self
|
||||
}
|
||||
|
||||
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
|
||||
Arc::<SignedBeaconBlock<E>>::clone(self)
|
||||
}
|
||||
|
||||
fn canonical_root(&self) -> Hash256 {
|
||||
SignedBeaconBlock::canonical_root(self)
|
||||
}
|
||||
|
||||
fn into_rpc_block(self) -> RpcBlock<E> {
|
||||
RpcBlock::new_without_blobs(None, self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> AsBlock<E> for MaybeAvailableBlock<E> {
|
||||
fn slot(&self) -> Slot {
|
||||
self.as_block().slot()
|
||||
}
|
||||
fn epoch(&self) -> Epoch {
|
||||
self.as_block().epoch()
|
||||
}
|
||||
fn parent_root(&self) -> Hash256 {
|
||||
self.as_block().parent_root()
|
||||
}
|
||||
fn state_root(&self) -> Hash256 {
|
||||
self.as_block().state_root()
|
||||
}
|
||||
fn signed_block_header(&self) -> SignedBeaconBlockHeader {
|
||||
self.as_block().signed_block_header()
|
||||
}
|
||||
fn message(&self) -> BeaconBlockRef<E> {
|
||||
self.as_block().message()
|
||||
}
|
||||
fn as_block(&self) -> &SignedBeaconBlock<E> {
|
||||
match &self {
|
||||
MaybeAvailableBlock::Available(block) => block.as_block(),
|
||||
MaybeAvailableBlock::AvailabilityPending {
|
||||
block_root: _,
|
||||
block,
|
||||
} => block,
|
||||
}
|
||||
}
|
||||
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
|
||||
match &self {
|
||||
MaybeAvailableBlock::Available(block) => block.block_cloned(),
|
||||
MaybeAvailableBlock::AvailabilityPending {
|
||||
block_root: _,
|
||||
block,
|
||||
} => block.clone(),
|
||||
}
|
||||
}
|
||||
fn canonical_root(&self) -> Hash256 {
|
||||
self.as_block().canonical_root()
|
||||
}
|
||||
|
||||
fn into_rpc_block(self) -> RpcBlock<E> {
|
||||
match self {
|
||||
MaybeAvailableBlock::Available(available_block) => available_block.into_rpc_block(),
|
||||
MaybeAvailableBlock::AvailabilityPending { block_root, block } => {
|
||||
RpcBlock::new_without_blobs(Some(block_root), block)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> AsBlock<E> for AvailableBlock<E> {
|
||||
fn slot(&self) -> Slot {
|
||||
self.block().slot()
|
||||
}
|
||||
|
||||
fn epoch(&self) -> Epoch {
|
||||
self.block().epoch()
|
||||
}
|
||||
|
||||
fn parent_root(&self) -> Hash256 {
|
||||
self.block().parent_root()
|
||||
}
|
||||
|
||||
fn state_root(&self) -> Hash256 {
|
||||
self.block().state_root()
|
||||
}
|
||||
|
||||
fn signed_block_header(&self) -> SignedBeaconBlockHeader {
|
||||
self.block().signed_block_header()
|
||||
}
|
||||
|
||||
fn message(&self) -> BeaconBlockRef<E> {
|
||||
self.block().message()
|
||||
}
|
||||
|
||||
fn as_block(&self) -> &SignedBeaconBlock<E> {
|
||||
self.block()
|
||||
}
|
||||
|
||||
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
|
||||
AvailableBlock::block_cloned(self)
|
||||
}
|
||||
|
||||
fn canonical_root(&self) -> Hash256 {
|
||||
self.block().canonical_root()
|
||||
}
|
||||
|
||||
fn into_rpc_block(self) -> RpcBlock<E> {
|
||||
let (block_root, block, blobs_opt) = self.deconstruct();
|
||||
// Circumvent the constructor here, because an Available block will have already had
|
||||
// consistency checks performed.
|
||||
let inner = match blobs_opt {
|
||||
None => RpcBlockInner::Block(block),
|
||||
Some(blobs) => RpcBlockInner::BlockAndBlobs(block, blobs),
|
||||
};
|
||||
RpcBlock {
|
||||
block_root,
|
||||
block: inner,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> AsBlock<E> for RpcBlock<E> {
|
||||
fn slot(&self) -> Slot {
|
||||
self.as_block().slot()
|
||||
}
|
||||
fn epoch(&self) -> Epoch {
|
||||
self.as_block().epoch()
|
||||
}
|
||||
fn parent_root(&self) -> Hash256 {
|
||||
self.as_block().parent_root()
|
||||
}
|
||||
fn state_root(&self) -> Hash256 {
|
||||
self.as_block().state_root()
|
||||
}
|
||||
fn signed_block_header(&self) -> SignedBeaconBlockHeader {
|
||||
self.as_block().signed_block_header()
|
||||
}
|
||||
fn message(&self) -> BeaconBlockRef<E> {
|
||||
self.as_block().message()
|
||||
}
|
||||
fn as_block(&self) -> &SignedBeaconBlock<E> {
|
||||
match &self.block {
|
||||
RpcBlockInner::Block(block) => block,
|
||||
RpcBlockInner::BlockAndBlobs(block, _) => block,
|
||||
}
|
||||
}
|
||||
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
|
||||
match &self.block {
|
||||
RpcBlockInner::Block(block) => block.clone(),
|
||||
RpcBlockInner::BlockAndBlobs(block, _) => block.clone(),
|
||||
}
|
||||
}
|
||||
fn canonical_root(&self) -> Hash256 {
|
||||
self.as_block().canonical_root()
|
||||
}
|
||||
|
||||
fn into_rpc_block(self) -> RpcBlock<E> {
|
||||
self
|
||||
}
|
||||
}
|
@ -1,4 +1,5 @@
|
||||
use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY};
|
||||
use crate::data_availability_checker::DataAvailabilityChecker;
|
||||
use crate::eth1_chain::{CachingEth1Backend, SszEth1};
|
||||
use crate::eth1_finalization_cache::Eth1FinalizationCache;
|
||||
use crate::fork_choice_signal::ForkChoiceSignalTx;
|
||||
@ -20,6 +21,7 @@ use eth1::Config as Eth1Config;
|
||||
use execution_layer::ExecutionLayer;
|
||||
use fork_choice::{ForkChoice, ResetPayloadStatuses};
|
||||
use futures::channel::mpsc::Sender;
|
||||
use kzg::{Kzg, TrustedSetup};
|
||||
use operation_pool::{OperationPool, PersistedOperationPool};
|
||||
use parking_lot::RwLock;
|
||||
use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold};
|
||||
@ -95,6 +97,7 @@ pub struct BeaconChainBuilder<T: BeaconChainTypes> {
|
||||
// Pending I/O batch that is constructed during building and should be executed atomically
|
||||
// alongside `PersistedBeaconChain` storage when `BeaconChainBuilder::build` is called.
|
||||
pending_io_batch: Vec<KeyValueStoreOp>,
|
||||
trusted_setup: Option<TrustedSetup>,
|
||||
task_executor: Option<TaskExecutor>,
|
||||
}
|
||||
|
||||
@ -134,6 +137,7 @@ where
|
||||
slasher: None,
|
||||
validator_monitor: None,
|
||||
pending_io_batch: vec![],
|
||||
trusted_setup: None,
|
||||
task_executor: None,
|
||||
}
|
||||
}
|
||||
@ -392,6 +396,11 @@ where
|
||||
.init_anchor_info(genesis.beacon_block.message(), retain_historic_states)
|
||||
.map_err(|e| format!("Failed to initialize genesis anchor: {:?}", e))?,
|
||||
);
|
||||
self.pending_io_batch.push(
|
||||
store
|
||||
.init_blob_info(genesis.beacon_block.slot())
|
||||
.map_err(|e| format!("Failed to initialize genesis blob info: {:?}", e))?,
|
||||
);
|
||||
|
||||
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis)
|
||||
.map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?;
|
||||
@ -515,6 +524,11 @@ where
|
||||
.init_anchor_info(weak_subj_block.message(), retain_historic_states)
|
||||
.map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?,
|
||||
);
|
||||
self.pending_io_batch.push(
|
||||
store
|
||||
.init_blob_info(weak_subj_block.slot())
|
||||
.map_err(|e| format!("Failed to initialize blob info: {:?}", e))?,
|
||||
);
|
||||
|
||||
// Store pruning checkpoint to prevent attempting to prune before the anchor state.
|
||||
self.pending_io_batch
|
||||
@ -625,6 +639,11 @@ where
|
||||
self
|
||||
}
|
||||
|
||||
pub fn trusted_setup(mut self, trusted_setup: TrustedSetup) -> Self {
|
||||
self.trusted_setup = Some(trusted_setup);
|
||||
self
|
||||
}
|
||||
|
||||
/// Consumes `self`, returning a `BeaconChain` if all required parameters have been supplied.
|
||||
///
|
||||
/// An error will be returned at runtime if all required parameters have not been configured.
|
||||
@ -666,6 +685,15 @@ where
|
||||
slot_clock.now().ok_or("Unable to read slot")?
|
||||
};
|
||||
|
||||
let kzg = if let Some(trusted_setup) = self.trusted_setup {
|
||||
let kzg = Kzg::new_from_trusted_setup(trusted_setup)
|
||||
.map_err(|e| format!("Failed to load trusted setup: {:?}", e))?;
|
||||
let kzg_arc = Arc::new(kzg);
|
||||
Some(kzg_arc)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let initial_head_block_root = fork_choice
|
||||
.get_head(current_slot, &self.spec)
|
||||
.map_err(|e| format!("Unable to get fork choice head: {:?}", e))?;
|
||||
@ -826,14 +854,14 @@ where
|
||||
};
|
||||
|
||||
let beacon_chain = BeaconChain {
|
||||
spec: self.spec,
|
||||
spec: self.spec.clone(),
|
||||
config: self.chain_config,
|
||||
store,
|
||||
store: store.clone(),
|
||||
task_executor: self
|
||||
.task_executor
|
||||
.ok_or("Cannot build without task executor")?,
|
||||
store_migrator,
|
||||
slot_clock,
|
||||
slot_clock: slot_clock.clone(),
|
||||
op_pool: self.op_pool.ok_or("Cannot build without op pool")?,
|
||||
// TODO: allow for persisting and loading the pool from disk.
|
||||
naive_aggregation_pool: <_>::default(),
|
||||
@ -855,6 +883,7 @@ where
|
||||
observed_sync_aggregators: <_>::default(),
|
||||
// TODO: allow for persisting and loading the pool from disk.
|
||||
observed_block_producers: <_>::default(),
|
||||
observed_blob_sidecars: <_>::default(),
|
||||
observed_voluntary_exits: <_>::default(),
|
||||
observed_proposer_slashings: <_>::default(),
|
||||
observed_attester_slashings: <_>::default(),
|
||||
@ -896,6 +925,11 @@ where
|
||||
slasher: self.slasher.clone(),
|
||||
validator_monitor: RwLock::new(validator_monitor),
|
||||
genesis_backfill_slot,
|
||||
data_availability_checker: Arc::new(
|
||||
DataAvailabilityChecker::new(slot_clock, kzg.clone(), store, &log, self.spec)
|
||||
.map_err(|e| format!("Error initializing DataAvailabiltyChecker: {:?}", e))?,
|
||||
),
|
||||
kzg,
|
||||
};
|
||||
|
||||
let head = beacon_chain.head_snapshot();
|
||||
@ -958,6 +992,13 @@ where
|
||||
);
|
||||
}
|
||||
|
||||
// Prune blobs older than the blob data availability boundary in the background.
|
||||
if let Some(data_availability_boundary) = beacon_chain.data_availability_boundary() {
|
||||
beacon_chain
|
||||
.store_migrator
|
||||
.process_prune_blobs(data_availability_boundary);
|
||||
}
|
||||
|
||||
Ok(beacon_chain)
|
||||
}
|
||||
}
|
||||
@ -1055,6 +1096,7 @@ fn descriptive_db_error(item: &str, error: &StoreError) -> String {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::test_utils::EphemeralHarnessType;
|
||||
use crate::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD;
|
||||
use ethereum_hashing::hash;
|
||||
use genesis::{
|
||||
@ -1069,6 +1111,7 @@ mod test {
|
||||
use types::{EthSpec, MinimalEthSpec, Slot};
|
||||
|
||||
type TestEthSpec = MinimalEthSpec;
|
||||
type Builder = BeaconChainBuilder<EphemeralHarnessType<TestEthSpec>>;
|
||||
|
||||
fn get_logger() -> Logger {
|
||||
let builder = NullLoggerBuilder;
|
||||
@ -1101,7 +1144,7 @@ mod test {
|
||||
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
|
||||
let runtime = TestRuntime::default();
|
||||
|
||||
let chain = BeaconChainBuilder::new(MinimalEthSpec)
|
||||
let chain = Builder::new(MinimalEthSpec)
|
||||
.logger(log.clone())
|
||||
.store(Arc::new(store))
|
||||
.task_executor(runtime.task_executor.clone())
|
||||
|
@ -984,6 +984,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.start_slot(T::EthSpec::slots_per_epoch()),
|
||||
);
|
||||
|
||||
self.observed_blob_sidecars.write().prune(
|
||||
new_view
|
||||
.finalized_checkpoint
|
||||
.epoch
|
||||
.start_slot(T::EthSpec::slots_per_epoch()),
|
||||
);
|
||||
|
||||
self.snapshot_cache
|
||||
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
||||
.map(|mut snapshot_cache| {
|
||||
@ -1051,6 +1058,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self.head_tracker.clone(),
|
||||
)?;
|
||||
|
||||
// Prune blobs in the background.
|
||||
if let Some(data_availability_boundary) = self.data_availability_boundary() {
|
||||
self.store_migrator
|
||||
.process_prune_blobs(data_availability_boundary);
|
||||
}
|
||||
|
||||
// Take a write-lock on the canonical head and signal for it to prune.
|
||||
self.canonical_head.fork_choice_write_lock().prune()?;
|
||||
|
||||
|
610
beacon_node/beacon_chain/src/data_availability_checker.rs
Normal file
610
beacon_node/beacon_chain/src/data_availability_checker.rs
Normal file
@ -0,0 +1,610 @@
|
||||
use crate::blob_verification::{verify_kzg_for_blob, verify_kzg_for_blob_list, GossipVerifiedBlob};
|
||||
use crate::block_verification_types::{
|
||||
AvailabilityPendingExecutedBlock, AvailableExecutedBlock, RpcBlock,
|
||||
};
|
||||
pub use crate::data_availability_checker::availability_view::{
|
||||
AvailabilityView, GetCommitment, GetCommitments,
|
||||
};
|
||||
pub use crate::data_availability_checker::child_components::ChildComponents;
|
||||
use crate::data_availability_checker::overflow_lru_cache::OverflowLRUCache;
|
||||
use crate::data_availability_checker::processing_cache::ProcessingCache;
|
||||
use crate::{BeaconChain, BeaconChainTypes, BeaconStore};
|
||||
use kzg::Kzg;
|
||||
use parking_lot::RwLock;
|
||||
pub use processing_cache::ProcessingComponents;
|
||||
use slasher::test_utils::E;
|
||||
use slog::{debug, error, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use std::fmt;
|
||||
use std::fmt::Debug;
|
||||
use std::sync::Arc;
|
||||
use task_executor::TaskExecutor;
|
||||
use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments};
|
||||
use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList};
|
||||
use types::consts::deneb::MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS;
|
||||
use types::{BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot};
|
||||
|
||||
mod availability_view;
|
||||
mod child_components;
|
||||
mod error;
|
||||
mod overflow_lru_cache;
|
||||
mod processing_cache;
|
||||
mod state_lru_cache;
|
||||
|
||||
pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory};
|
||||
|
||||
/// The LRU Cache stores `PendingComponents` which can store up to
|
||||
/// `MAX_BLOBS_PER_BLOCK = 6` blobs each. A `BlobSidecar` is 0.131256 MB. So
|
||||
/// the maximum size of a `PendingComponents` is ~ 0.787536 MB. Setting this
|
||||
/// to 1024 means the maximum size of the cache is ~ 0.8 GB. But the cache
|
||||
/// will target a size of less than 75% of capacity.
|
||||
pub const OVERFLOW_LRU_CAPACITY: usize = 1024;
|
||||
/// Until tree-states is implemented, we can't store very many states in memory :(
|
||||
pub const STATE_LRU_CAPACITY: usize = 2;
|
||||
|
||||
/// This includes a cache for any blocks or blobs that have been received over gossip or RPC
|
||||
/// and are awaiting more components before they can be imported. Additionally the
|
||||
/// `DataAvailabilityChecker` is responsible for KZG verification of block components as well as
|
||||
/// checking whether a "availability check" is required at all.
|
||||
pub struct DataAvailabilityChecker<T: BeaconChainTypes> {
|
||||
processing_cache: RwLock<ProcessingCache<T::EthSpec>>,
|
||||
availability_cache: Arc<OverflowLRUCache<T>>,
|
||||
slot_clock: T::SlotClock,
|
||||
kzg: Option<Arc<Kzg<<T::EthSpec as EthSpec>::Kzg>>>,
|
||||
log: Logger,
|
||||
spec: ChainSpec,
|
||||
}
|
||||
|
||||
/// This type is returned after adding a block / blob to the `DataAvailabilityChecker`.
|
||||
///
|
||||
/// Indicates if the block is fully `Available` or if we need blobs or blocks
|
||||
/// to "complete" the requirements for an `AvailableBlock`.
|
||||
#[derive(PartialEq)]
|
||||
pub enum Availability<T: EthSpec> {
|
||||
MissingComponents(Hash256),
|
||||
Available(Box<AvailableExecutedBlock<T>>),
|
||||
}
|
||||
|
||||
impl<T: EthSpec> Debug for Availability<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
Self::MissingComponents(block_root) => {
|
||||
write!(f, "MissingComponents({})", block_root)
|
||||
}
|
||||
Self::Available(block) => write!(f, "Available({:?})", block.import_data.block_root),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
pub fn new(
|
||||
slot_clock: T::SlotClock,
|
||||
kzg: Option<Arc<Kzg<<T::EthSpec as EthSpec>::Kzg>>>,
|
||||
store: BeaconStore<T>,
|
||||
log: &Logger,
|
||||
spec: ChainSpec,
|
||||
) -> Result<Self, AvailabilityCheckError> {
|
||||
let overflow_cache = OverflowLRUCache::new(OVERFLOW_LRU_CAPACITY, store, spec.clone())?;
|
||||
Ok(Self {
|
||||
processing_cache: <_>::default(),
|
||||
availability_cache: Arc::new(overflow_cache),
|
||||
slot_clock,
|
||||
log: log.clone(),
|
||||
kzg,
|
||||
spec,
|
||||
})
|
||||
}
|
||||
|
||||
/// Checks if the given block root is cached.
|
||||
pub fn has_block(&self, block_root: &Hash256) -> bool {
|
||||
self.processing_cache.read().has_block(block_root)
|
||||
}
|
||||
|
||||
/// Get the processing info for a block.
|
||||
pub fn get_processing_components(
|
||||
&self,
|
||||
block_root: Hash256,
|
||||
) -> Option<ProcessingComponents<T::EthSpec>> {
|
||||
self.processing_cache.read().get(&block_root).cloned()
|
||||
}
|
||||
|
||||
/// A `None` indicates blobs are not required.
|
||||
///
|
||||
/// If there's no block, all possible ids will be returned that don't exist in the given blobs.
|
||||
/// If there no blobs, all possible ids will be returned.
|
||||
pub fn get_missing_blob_ids<V: AvailabilityView<T::EthSpec>>(
|
||||
&self,
|
||||
block_root: Hash256,
|
||||
availability_view: &V,
|
||||
) -> MissingBlobs {
|
||||
let Some(current_slot) = self.slot_clock.now_or_genesis() else {
|
||||
error!(
|
||||
self.log,
|
||||
"Failed to read slot clock when checking for missing blob ids"
|
||||
);
|
||||
return MissingBlobs::BlobsNotRequired;
|
||||
};
|
||||
|
||||
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
|
||||
|
||||
if self.da_check_required_for_epoch(current_epoch) {
|
||||
match availability_view.get_cached_block() {
|
||||
Some(cached_block) => {
|
||||
let block_commitments = cached_block.get_commitments();
|
||||
let blob_commitments = availability_view.get_cached_blobs();
|
||||
|
||||
let num_blobs_expected = block_commitments.len();
|
||||
let mut blob_ids = Vec::with_capacity(num_blobs_expected);
|
||||
|
||||
// Zip here will always limit the number of iterations to the size of
|
||||
// `block_commitment` because `blob_commitments` will always be populated
|
||||
// with `Option` values up to `MAX_BLOBS_PER_BLOCK`.
|
||||
for (index, (block_commitment, blob_commitment_opt)) in block_commitments
|
||||
.into_iter()
|
||||
.zip(blob_commitments.iter())
|
||||
.enumerate()
|
||||
{
|
||||
// Always add a missing blob.
|
||||
let Some(blob_commitment) = blob_commitment_opt else {
|
||||
blob_ids.push(BlobIdentifier {
|
||||
block_root,
|
||||
index: index as u64,
|
||||
});
|
||||
continue;
|
||||
};
|
||||
|
||||
let blob_commitment = *blob_commitment.get_commitment();
|
||||
|
||||
// Check for consistency, but this shouldn't happen, an availability view
|
||||
// should guaruntee consistency.
|
||||
if blob_commitment != block_commitment {
|
||||
error!(self.log,
|
||||
"Inconsistent availability view";
|
||||
"block_root" => ?block_root,
|
||||
"block_commitment" => ?block_commitment,
|
||||
"blob_commitment" => ?blob_commitment,
|
||||
"index" => index
|
||||
);
|
||||
blob_ids.push(BlobIdentifier {
|
||||
block_root,
|
||||
index: index as u64,
|
||||
});
|
||||
}
|
||||
}
|
||||
MissingBlobs::KnownMissing(blob_ids)
|
||||
}
|
||||
None => {
|
||||
MissingBlobs::PossibleMissing(BlobIdentifier::get_all_blob_ids::<E>(block_root))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
MissingBlobs::BlobsNotRequired
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a blob from the availability cache.
|
||||
pub fn get_blob(
|
||||
&self,
|
||||
blob_id: &BlobIdentifier,
|
||||
) -> Result<Option<Arc<BlobSidecar<T::EthSpec>>>, AvailabilityCheckError> {
|
||||
self.availability_cache.peek_blob(blob_id)
|
||||
}
|
||||
|
||||
/// Put a list of blobs received via RPC into the availability cache. This performs KZG
|
||||
/// verification on the blobs in the list.
|
||||
pub fn put_rpc_blobs(
|
||||
&self,
|
||||
block_root: Hash256,
|
||||
blobs: FixedBlobSidecarList<T::EthSpec>,
|
||||
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
|
||||
let mut verified_blobs = vec![];
|
||||
if let Some(kzg) = self.kzg.as_ref() {
|
||||
for blob in blobs.iter().flatten() {
|
||||
verified_blobs.push(verify_kzg_for_blob(blob.clone(), kzg)?)
|
||||
}
|
||||
} else {
|
||||
return Err(AvailabilityCheckError::KzgNotInitialized);
|
||||
};
|
||||
self.availability_cache
|
||||
.put_kzg_verified_blobs(block_root, verified_blobs)
|
||||
}
|
||||
|
||||
/// This first validates the KZG commitments included in the blob sidecar.
|
||||
/// Check if we've cached other blobs for this block. If it completes a set and we also
|
||||
/// have a block cached, return the `Availability` variant triggering block import.
|
||||
/// Otherwise cache the blob sidecar.
|
||||
///
|
||||
/// This should only accept gossip verified blobs, so we should not have to worry about dupes.
|
||||
pub fn put_gossip_blob(
|
||||
&self,
|
||||
gossip_blob: GossipVerifiedBlob<T>,
|
||||
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
|
||||
// Verify the KZG commitments.
|
||||
let kzg_verified_blob = if let Some(kzg) = self.kzg.as_ref() {
|
||||
verify_kzg_for_blob(gossip_blob.to_blob(), kzg)?
|
||||
} else {
|
||||
return Err(AvailabilityCheckError::KzgNotInitialized);
|
||||
};
|
||||
|
||||
self.availability_cache
|
||||
.put_kzg_verified_blobs(kzg_verified_blob.block_root(), vec![kzg_verified_blob])
|
||||
}
|
||||
|
||||
/// Check if we have all the blobs for a block. Returns `Availability` which has information
|
||||
/// about whether all components have been received or more are required.
|
||||
pub fn put_pending_executed_block(
|
||||
&self,
|
||||
executed_block: AvailabilityPendingExecutedBlock<T::EthSpec>,
|
||||
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
|
||||
self.availability_cache
|
||||
.put_pending_executed_block(executed_block)
|
||||
}
|
||||
|
||||
/// Checks if a block is available, returns a `MaybeAvailableBlock` that may include the fully
|
||||
/// available block.
|
||||
pub fn check_rpc_block_availability(
|
||||
&self,
|
||||
block: RpcBlock<T::EthSpec>,
|
||||
) -> Result<MaybeAvailableBlock<T::EthSpec>, AvailabilityCheckError> {
|
||||
let (block_root, block, blobs) = block.deconstruct();
|
||||
match blobs {
|
||||
None => {
|
||||
if self.blobs_required_for_block(&block) {
|
||||
Ok(MaybeAvailableBlock::AvailabilityPending { block_root, block })
|
||||
} else {
|
||||
Ok(MaybeAvailableBlock::Available(AvailableBlock {
|
||||
block_root,
|
||||
block,
|
||||
blobs: None,
|
||||
}))
|
||||
}
|
||||
}
|
||||
Some(blob_list) => {
|
||||
let verified_blobs = if self.blobs_required_for_block(&block) {
|
||||
let kzg = self
|
||||
.kzg
|
||||
.as_ref()
|
||||
.ok_or(AvailabilityCheckError::KzgNotInitialized)?;
|
||||
verify_kzg_for_blob_list(&blob_list, kzg)?;
|
||||
Some(blob_list)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Ok(MaybeAvailableBlock::Available(AvailableBlock {
|
||||
block_root,
|
||||
block,
|
||||
blobs: verified_blobs,
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Determines the blob requirements for a block. If the block is pre-deneb, no blobs are required.
|
||||
/// If the block's epoch is from prior to the data availability boundary, no blobs are required.
|
||||
fn blobs_required_for_block(&self, block: &SignedBeaconBlock<T::EthSpec>) -> bool {
|
||||
block.num_expected_blobs() > 0 && self.da_check_required_for_epoch(block.epoch())
|
||||
}
|
||||
|
||||
/// Adds block commitments to the processing cache. These commitments are unverified but caching
|
||||
/// them here is useful to avoid duplicate downloads of blocks, as well as understanding
|
||||
/// our blob download requirements.
|
||||
pub fn notify_block_commitments(
|
||||
&self,
|
||||
slot: Slot,
|
||||
block_root: Hash256,
|
||||
commitments: KzgCommitments<T::EthSpec>,
|
||||
) {
|
||||
self.processing_cache
|
||||
.write()
|
||||
.entry(block_root)
|
||||
.or_insert_with(|| ProcessingComponents::new(slot))
|
||||
.merge_block(commitments);
|
||||
}
|
||||
|
||||
/// Add a single blob commitment to the processing cache. This commitment is unverified but caching
|
||||
/// them here is useful to avoid duplicate downloads of blobs, as well as understanding
|
||||
/// our block and blob download requirements.
|
||||
pub fn notify_gossip_blob(
|
||||
&self,
|
||||
slot: Slot,
|
||||
block_root: Hash256,
|
||||
blob: &GossipVerifiedBlob<T>,
|
||||
) {
|
||||
let index = blob.as_blob().index;
|
||||
let commitment = blob.as_blob().kzg_commitment;
|
||||
self.processing_cache
|
||||
.write()
|
||||
.entry(block_root)
|
||||
.or_insert_with(|| ProcessingComponents::new(slot))
|
||||
.merge_single_blob(index as usize, commitment);
|
||||
}
|
||||
|
||||
/// Adds blob commitments to the processing cache. These commitments are unverified but caching
|
||||
/// them here is useful to avoid duplicate downloads of blobs, as well as understanding
|
||||
/// our block and blob download requirements.
|
||||
pub fn notify_rpc_blobs(
|
||||
&self,
|
||||
slot: Slot,
|
||||
block_root: Hash256,
|
||||
blobs: &FixedBlobSidecarList<T::EthSpec>,
|
||||
) {
|
||||
let mut commitments = KzgCommitmentOpts::<T::EthSpec>::default();
|
||||
for blob in blobs.iter().flatten() {
|
||||
if let Some(commitment) = commitments.get_mut(blob.index as usize) {
|
||||
*commitment = Some(blob.kzg_commitment);
|
||||
}
|
||||
}
|
||||
self.processing_cache
|
||||
.write()
|
||||
.entry(block_root)
|
||||
.or_insert_with(|| ProcessingComponents::new(slot))
|
||||
.merge_blobs(commitments);
|
||||
}
|
||||
|
||||
/// Clears the block and all blobs from the processing cache for a give root if they exist.
|
||||
pub fn remove_notified(&self, block_root: &Hash256) {
|
||||
self.processing_cache.write().remove(block_root)
|
||||
}
|
||||
|
||||
/// Gather all block roots for which we are not currently processing all components for the
|
||||
/// given slot.
|
||||
pub fn incomplete_processing_components(&self, slot: Slot) -> Vec<Hash256> {
|
||||
self.processing_cache
|
||||
.read()
|
||||
.incomplete_processing_components(slot)
|
||||
}
|
||||
|
||||
/// Determines whether we are at least the `single_lookup_delay` duration into the given slot.
|
||||
/// If we are not currently in the Deneb fork, this delay is not considered.
|
||||
///
|
||||
/// The `single_lookup_delay` is the duration we wait for a blocks or blobs to arrive over
|
||||
/// gossip before making single block or blob requests. This is to minimize the number of
|
||||
/// single lookup requests we end up making.
|
||||
pub fn should_delay_lookup(&self, slot: Slot) -> bool {
|
||||
if !self.is_deneb() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let current_or_future_slot = self
|
||||
.slot_clock
|
||||
.now()
|
||||
.map_or(false, |current_slot| current_slot <= slot);
|
||||
|
||||
let delay_threshold_unmet = self
|
||||
.slot_clock
|
||||
.millis_from_current_slot_start()
|
||||
.map_or(false, |millis_into_slot| {
|
||||
millis_into_slot < self.slot_clock.single_lookup_delay()
|
||||
});
|
||||
current_or_future_slot && delay_threshold_unmet
|
||||
}
|
||||
|
||||
/// The epoch at which we require a data availability check in block processing.
|
||||
/// `None` if the `Deneb` fork is disabled.
|
||||
pub fn data_availability_boundary(&self) -> Option<Epoch> {
|
||||
self.spec.deneb_fork_epoch.and_then(|fork_epoch| {
|
||||
self.slot_clock
|
||||
.now()
|
||||
.map(|slot| slot.epoch(T::EthSpec::slots_per_epoch()))
|
||||
.map(|current_epoch| {
|
||||
std::cmp::max(
|
||||
fork_epoch,
|
||||
current_epoch.saturating_sub(MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS),
|
||||
)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns true if the given epoch lies within the da boundary and false otherwise.
|
||||
pub fn da_check_required_for_epoch(&self, block_epoch: Epoch) -> bool {
|
||||
self.data_availability_boundary()
|
||||
.map_or(false, |da_epoch| block_epoch >= da_epoch)
|
||||
}
|
||||
|
||||
/// Returns `true` if the current epoch is greater than or equal to the `Deneb` epoch.
|
||||
pub fn is_deneb(&self) -> bool {
|
||||
self.slot_clock.now().map_or(false, |slot| {
|
||||
self.spec.deneb_fork_epoch.map_or(false, |deneb_epoch| {
|
||||
let now_epoch = slot.epoch(T::EthSpec::slots_per_epoch());
|
||||
now_epoch >= deneb_epoch
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Persist all in memory components to disk
|
||||
pub fn persist_all(&self) -> Result<(), AvailabilityCheckError> {
|
||||
self.availability_cache.write_all_to_disk()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn start_availability_cache_maintenance_service<T: BeaconChainTypes>(
|
||||
executor: TaskExecutor,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
) {
|
||||
// this cache only needs to be maintained if deneb is configured
|
||||
if chain.spec.deneb_fork_epoch.is_some() {
|
||||
let overflow_cache = chain.data_availability_checker.availability_cache.clone();
|
||||
executor.spawn(
|
||||
async move { availability_cache_maintenance_service(chain, overflow_cache).await },
|
||||
"availability_cache_service",
|
||||
);
|
||||
} else {
|
||||
debug!(
|
||||
chain.log,
|
||||
"Deneb fork not configured, not starting availability cache maintenance service"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async fn availability_cache_maintenance_service<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
overflow_cache: Arc<OverflowLRUCache<T>>,
|
||||
) {
|
||||
let epoch_duration = chain.slot_clock.slot_duration() * T::EthSpec::slots_per_epoch() as u32;
|
||||
loop {
|
||||
match chain
|
||||
.slot_clock
|
||||
.duration_to_next_epoch(T::EthSpec::slots_per_epoch())
|
||||
{
|
||||
Some(duration) => {
|
||||
// this service should run 3/4 of the way through the epoch
|
||||
let additional_delay = (epoch_duration * 3) / 4;
|
||||
tokio::time::sleep(duration + additional_delay).await;
|
||||
|
||||
let deneb_fork_epoch = match chain.spec.deneb_fork_epoch {
|
||||
Some(epoch) => epoch,
|
||||
None => break, // shutdown service if deneb fork epoch not set
|
||||
};
|
||||
|
||||
debug!(
|
||||
chain.log,
|
||||
"Availability cache maintenance service firing";
|
||||
);
|
||||
|
||||
let current_epoch = match chain
|
||||
.slot_clock
|
||||
.now()
|
||||
.map(|slot| slot.epoch(T::EthSpec::slots_per_epoch()))
|
||||
{
|
||||
Some(epoch) => epoch,
|
||||
None => continue, // we'll have to try again next time I suppose..
|
||||
};
|
||||
|
||||
if current_epoch < deneb_fork_epoch {
|
||||
// we are not in deneb yet
|
||||
continue;
|
||||
}
|
||||
|
||||
let finalized_epoch = chain
|
||||
.canonical_head
|
||||
.fork_choice_read_lock()
|
||||
.finalized_checkpoint()
|
||||
.epoch;
|
||||
// any data belonging to an epoch before this should be pruned
|
||||
let cutoff_epoch = std::cmp::max(
|
||||
finalized_epoch + 1,
|
||||
std::cmp::max(
|
||||
current_epoch.saturating_sub(MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS),
|
||||
deneb_fork_epoch,
|
||||
),
|
||||
);
|
||||
|
||||
if let Err(e) = overflow_cache.do_maintenance(cutoff_epoch) {
|
||||
error!(chain.log, "Failed to maintain availability cache"; "error" => ?e);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
error!(chain.log, "Failed to read slot clock");
|
||||
// If we can't read the slot clock, just wait another slot.
|
||||
tokio::time::sleep(chain.slot_clock.slot_duration()).await;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// A fully available block that is ready to be imported into fork choice.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct AvailableBlock<E: EthSpec> {
|
||||
block_root: Hash256,
|
||||
block: Arc<SignedBeaconBlock<E>>,
|
||||
blobs: Option<BlobSidecarList<E>>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> AvailableBlock<E> {
|
||||
pub fn block(&self) -> &SignedBeaconBlock<E> {
|
||||
&self.block
|
||||
}
|
||||
pub fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
|
||||
self.block.clone()
|
||||
}
|
||||
|
||||
pub fn blobs(&self) -> Option<&BlobSidecarList<E>> {
|
||||
self.blobs.as_ref()
|
||||
}
|
||||
|
||||
pub fn deconstruct(
|
||||
self,
|
||||
) -> (
|
||||
Hash256,
|
||||
Arc<SignedBeaconBlock<E>>,
|
||||
Option<BlobSidecarList<E>>,
|
||||
) {
|
||||
let AvailableBlock {
|
||||
block_root,
|
||||
block,
|
||||
blobs,
|
||||
} = self;
|
||||
(block_root, block, blobs)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum MaybeAvailableBlock<E: EthSpec> {
|
||||
/// This variant is fully available.
|
||||
/// i.e. for pre-deneb blocks, it contains a (`SignedBeaconBlock`, `Blobs::None`) and for
|
||||
/// post-4844 blocks, it contains a `SignedBeaconBlock` and a Blobs variant other than `Blobs::None`.
|
||||
Available(AvailableBlock<E>),
|
||||
/// This variant is not fully available and requires blobs to become fully available.
|
||||
AvailabilityPending {
|
||||
block_root: Hash256,
|
||||
block: Arc<SignedBeaconBlock<E>>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum MissingBlobs {
|
||||
/// We know for certain these blobs are missing.
|
||||
KnownMissing(Vec<BlobIdentifier>),
|
||||
/// We think these blobs might be missing.
|
||||
PossibleMissing(Vec<BlobIdentifier>),
|
||||
/// Blobs are not required.
|
||||
BlobsNotRequired,
|
||||
}
|
||||
|
||||
impl MissingBlobs {
|
||||
pub fn new_without_block(block_root: Hash256, is_deneb: bool) -> Self {
|
||||
if is_deneb {
|
||||
MissingBlobs::PossibleMissing(BlobIdentifier::get_all_blob_ids::<E>(block_root))
|
||||
} else {
|
||||
MissingBlobs::BlobsNotRequired
|
||||
}
|
||||
}
|
||||
pub fn is_empty(&self) -> bool {
|
||||
match self {
|
||||
MissingBlobs::KnownMissing(v) => v.is_empty(),
|
||||
MissingBlobs::PossibleMissing(v) => v.is_empty(),
|
||||
MissingBlobs::BlobsNotRequired => true,
|
||||
}
|
||||
}
|
||||
pub fn contains(&self, blob_id: &BlobIdentifier) -> bool {
|
||||
match self {
|
||||
MissingBlobs::KnownMissing(v) => v.contains(blob_id),
|
||||
MissingBlobs::PossibleMissing(v) => v.contains(blob_id),
|
||||
MissingBlobs::BlobsNotRequired => false,
|
||||
}
|
||||
}
|
||||
pub fn remove(&mut self, blob_id: &BlobIdentifier) {
|
||||
match self {
|
||||
MissingBlobs::KnownMissing(v) => v.retain(|id| id != blob_id),
|
||||
MissingBlobs::PossibleMissing(v) => v.retain(|id| id != blob_id),
|
||||
MissingBlobs::BlobsNotRequired => {}
|
||||
}
|
||||
}
|
||||
pub fn indices(&self) -> Vec<u64> {
|
||||
match self {
|
||||
MissingBlobs::KnownMissing(v) => v.iter().map(|id| id.index).collect(),
|
||||
MissingBlobs::PossibleMissing(v) => v.iter().map(|id| id.index).collect(),
|
||||
MissingBlobs::BlobsNotRequired => vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Vec<BlobIdentifier>> for MissingBlobs {
|
||||
fn into(self) -> Vec<BlobIdentifier> {
|
||||
match self {
|
||||
MissingBlobs::KnownMissing(v) => v,
|
||||
MissingBlobs::PossibleMissing(v) => v,
|
||||
MissingBlobs::BlobsNotRequired => vec![],
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,568 @@
|
||||
use super::child_components::ChildComponents;
|
||||
use super::state_lru_cache::DietAvailabilityPendingExecutedBlock;
|
||||
use crate::blob_verification::KzgVerifiedBlob;
|
||||
use crate::block_verification_types::AsBlock;
|
||||
use crate::data_availability_checker::overflow_lru_cache::PendingComponents;
|
||||
use crate::data_availability_checker::ProcessingComponents;
|
||||
use kzg::KzgCommitment;
|
||||
use ssz_types::FixedVector;
|
||||
use std::sync::Arc;
|
||||
use types::beacon_block_body::KzgCommitments;
|
||||
use types::{BlobSidecar, EthSpec, SignedBeaconBlock};
|
||||
|
||||
/// Defines an interface for managing data availability with two key invariants:
|
||||
///
|
||||
/// 1. If we haven't seen a block yet, we will insert the first blob for a given (block_root, index)
|
||||
/// but we won't insert subsequent blobs for the same (block_root, index) if they have a different
|
||||
/// commitment.
|
||||
/// 2. On block insertion, any non-matching blob commitments are evicted.
|
||||
///
|
||||
/// Types implementing this trait can be used for validating and managing availability
|
||||
/// of blocks and blobs in a cache-like data structure.
|
||||
pub trait AvailabilityView<E: EthSpec> {
|
||||
/// The type representing a block in the implementation.
|
||||
type BlockType: GetCommitments<E>;
|
||||
|
||||
/// The type representing a blob in the implementation. Must implement `Clone`.
|
||||
type BlobType: Clone + GetCommitment<E>;
|
||||
|
||||
/// Returns an immutable reference to the cached block.
|
||||
fn get_cached_block(&self) -> &Option<Self::BlockType>;
|
||||
|
||||
/// Returns an immutable reference to the fixed vector of cached blobs.
|
||||
fn get_cached_blobs(&self) -> &FixedVector<Option<Self::BlobType>, E::MaxBlobsPerBlock>;
|
||||
|
||||
/// Returns a mutable reference to the cached block.
|
||||
fn get_cached_block_mut(&mut self) -> &mut Option<Self::BlockType>;
|
||||
|
||||
/// Returns a mutable reference to the fixed vector of cached blobs.
|
||||
fn get_cached_blobs_mut(
|
||||
&mut self,
|
||||
) -> &mut FixedVector<Option<Self::BlobType>, E::MaxBlobsPerBlock>;
|
||||
|
||||
/// Checks if a block exists in the cache.
|
||||
///
|
||||
/// Returns:
|
||||
/// - `true` if a block exists.
|
||||
/// - `false` otherwise.
|
||||
fn block_exists(&self) -> bool {
|
||||
self.get_cached_block().is_some()
|
||||
}
|
||||
|
||||
/// Checks if a blob exists at the given index in the cache.
|
||||
///
|
||||
/// Returns:
|
||||
/// - `true` if a blob exists at the given index.
|
||||
/// - `false` otherwise.
|
||||
fn blob_exists(&self, blob_index: usize) -> bool {
|
||||
self.get_cached_blobs()
|
||||
.get(blob_index)
|
||||
.map(|b| b.is_some())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns the number of blobs that are expected to be present. Returns `None` if we don't have a
|
||||
/// block.
|
||||
///
|
||||
/// This corresponds to the number of commitments that are present in a block.
|
||||
fn num_expected_blobs(&self) -> Option<usize> {
|
||||
self.get_cached_block()
|
||||
.as_ref()
|
||||
.map(|b| b.get_commitments().len())
|
||||
}
|
||||
|
||||
/// Returns the number of blobs that have been received and are stored in the cache.
|
||||
fn num_received_blobs(&self) -> usize {
|
||||
self.get_cached_blobs().iter().flatten().count()
|
||||
}
|
||||
|
||||
/// Inserts a block into the cache.
|
||||
fn insert_block(&mut self, block: Self::BlockType) {
|
||||
*self.get_cached_block_mut() = Some(block)
|
||||
}
|
||||
|
||||
/// Inserts a blob at a specific index in the cache.
|
||||
///
|
||||
/// Existing blob at the index will be replaced.
|
||||
fn insert_blob_at_index(&mut self, blob_index: usize, blob: Self::BlobType) {
|
||||
if let Some(b) = self.get_cached_blobs_mut().get_mut(blob_index) {
|
||||
*b = Some(blob);
|
||||
}
|
||||
}
|
||||
|
||||
/// Merges a given set of blobs into the cache.
|
||||
///
|
||||
/// Blobs are only inserted if:
|
||||
/// 1. The blob entry at the index is empty and no block exists.
|
||||
/// 2. The block exists and its commitment matches the blob's commitment.
|
||||
fn merge_blobs(&mut self, blobs: FixedVector<Option<Self::BlobType>, E::MaxBlobsPerBlock>) {
|
||||
for (index, blob) in blobs.iter().cloned().enumerate() {
|
||||
let Some(blob) = blob else { continue };
|
||||
self.merge_single_blob(index, blob);
|
||||
}
|
||||
}
|
||||
|
||||
/// Merges a single blob into the cache.
|
||||
///
|
||||
/// Blobs are only inserted if:
|
||||
/// 1. The blob entry at the index is empty and no block exists, or
|
||||
/// 2. The block exists and its commitment matches the blob's commitment.
|
||||
fn merge_single_blob(&mut self, index: usize, blob: Self::BlobType) {
|
||||
let commitment = *blob.get_commitment();
|
||||
if let Some(cached_block) = self.get_cached_block() {
|
||||
let block_commitment_opt = cached_block.get_commitments().get(index).copied();
|
||||
if let Some(block_commitment) = block_commitment_opt {
|
||||
if block_commitment == commitment {
|
||||
self.insert_blob_at_index(index, blob)
|
||||
}
|
||||
}
|
||||
} else if !self.blob_exists(index) {
|
||||
self.insert_blob_at_index(index, blob)
|
||||
}
|
||||
}
|
||||
|
||||
/// Inserts a new block and revalidates the existing blobs against it.
|
||||
///
|
||||
/// Blobs that don't match the new block's commitments are evicted.
|
||||
fn merge_block(&mut self, block: Self::BlockType) {
|
||||
self.insert_block(block);
|
||||
let reinsert = std::mem::take(self.get_cached_blobs_mut());
|
||||
self.merge_blobs(reinsert);
|
||||
}
|
||||
|
||||
/// Checks if the block and all of its expected blobs are available in the cache.
|
||||
///
|
||||
/// Returns `true` if both the block exists and the number of received blobs matches the number
|
||||
/// of expected blobs.
|
||||
fn is_available(&self) -> bool {
|
||||
if let Some(num_expected_blobs) = self.num_expected_blobs() {
|
||||
num_expected_blobs == self.num_received_blobs()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements the `AvailabilityView` trait for a given struct.
|
||||
///
|
||||
/// - `$struct_name`: The name of the struct for which to implement `AvailabilityView`.
|
||||
/// - `$block_type`: The type to use for `BlockType` in the `AvailabilityView` trait.
|
||||
/// - `$blob_type`: The type to use for `BlobType` in the `AvailabilityView` trait.
|
||||
/// - `$block_field`: The field name in the struct that holds the cached block.
|
||||
/// - `$blob_field`: The field name in the struct that holds the cached blobs.
|
||||
#[macro_export]
|
||||
macro_rules! impl_availability_view {
|
||||
($struct_name:ident, $block_type:ty, $blob_type:ty, $block_field:ident, $blob_field:ident) => {
|
||||
impl<E: EthSpec> AvailabilityView<E> for $struct_name<E> {
|
||||
type BlockType = $block_type;
|
||||
type BlobType = $blob_type;
|
||||
|
||||
fn get_cached_block(&self) -> &Option<Self::BlockType> {
|
||||
&self.$block_field
|
||||
}
|
||||
|
||||
fn get_cached_blobs(
|
||||
&self,
|
||||
) -> &FixedVector<Option<Self::BlobType>, E::MaxBlobsPerBlock> {
|
||||
&self.$blob_field
|
||||
}
|
||||
|
||||
fn get_cached_block_mut(&mut self) -> &mut Option<Self::BlockType> {
|
||||
&mut self.$block_field
|
||||
}
|
||||
|
||||
fn get_cached_blobs_mut(
|
||||
&mut self,
|
||||
) -> &mut FixedVector<Option<Self::BlobType>, E::MaxBlobsPerBlock> {
|
||||
&mut self.$blob_field
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_availability_view!(
|
||||
ProcessingComponents,
|
||||
KzgCommitments<E>,
|
||||
KzgCommitment,
|
||||
block_commitments,
|
||||
blob_commitments
|
||||
);
|
||||
|
||||
impl_availability_view!(
|
||||
PendingComponents,
|
||||
DietAvailabilityPendingExecutedBlock<E>,
|
||||
KzgVerifiedBlob<E>,
|
||||
executed_block,
|
||||
verified_blobs
|
||||
);
|
||||
|
||||
impl_availability_view!(
|
||||
ChildComponents,
|
||||
Arc<SignedBeaconBlock<E>>,
|
||||
Arc<BlobSidecar<E>>,
|
||||
downloaded_block,
|
||||
downloaded_blobs
|
||||
);
|
||||
|
||||
pub trait GetCommitments<E: EthSpec> {
|
||||
fn get_commitments(&self) -> KzgCommitments<E>;
|
||||
}
|
||||
|
||||
pub trait GetCommitment<E: EthSpec> {
|
||||
fn get_commitment(&self) -> &KzgCommitment;
|
||||
}
|
||||
|
||||
// These implementations are required to implement `AvailabilityView` for `ProcessingView`.
|
||||
impl<E: EthSpec> GetCommitments<E> for KzgCommitments<E> {
|
||||
fn get_commitments(&self) -> KzgCommitments<E> {
|
||||
self.clone()
|
||||
}
|
||||
}
|
||||
impl<E: EthSpec> GetCommitment<E> for KzgCommitment {
|
||||
fn get_commitment(&self) -> &KzgCommitment {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
// These implementations are required to implement `AvailabilityView` for `PendingComponents`.
|
||||
impl<E: EthSpec> GetCommitments<E> for DietAvailabilityPendingExecutedBlock<E> {
|
||||
fn get_commitments(&self) -> KzgCommitments<E> {
|
||||
self.as_block()
|
||||
.message()
|
||||
.body()
|
||||
.blob_kzg_commitments()
|
||||
.cloned()
|
||||
.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> GetCommitment<E> for KzgVerifiedBlob<E> {
|
||||
fn get_commitment(&self) -> &KzgCommitment {
|
||||
&self.as_blob().kzg_commitment
|
||||
}
|
||||
}
|
||||
|
||||
// These implementations are required to implement `AvailabilityView` for `ChildComponents`.
|
||||
impl<E: EthSpec> GetCommitments<E> for Arc<SignedBeaconBlock<E>> {
|
||||
fn get_commitments(&self) -> KzgCommitments<E> {
|
||||
self.message()
|
||||
.body()
|
||||
.blob_kzg_commitments()
|
||||
.ok()
|
||||
.cloned()
|
||||
.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
impl<E: EthSpec> GetCommitment<E> for Arc<BlobSidecar<E>> {
|
||||
fn get_commitment(&self) -> &KzgCommitment {
|
||||
&self.kzg_commitment
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use crate::block_verification_types::BlockImportData;
|
||||
use crate::eth1_finalization_cache::Eth1FinalizationData;
|
||||
use crate::test_utils::{generate_rand_block_and_blobs, NumBlobs};
|
||||
use crate::AvailabilityPendingExecutedBlock;
|
||||
use crate::PayloadVerificationOutcome;
|
||||
use eth2_network_config::get_trusted_setup;
|
||||
use fork_choice::PayloadVerificationStatus;
|
||||
use kzg::{Kzg, TrustedSetup};
|
||||
use rand::rngs::StdRng;
|
||||
use rand::SeedableRng;
|
||||
use state_processing::ConsensusContext;
|
||||
use types::test_utils::TestRandom;
|
||||
use types::{BeaconState, ChainSpec, ForkName, MainnetEthSpec, Slot};
|
||||
|
||||
type E = MainnetEthSpec;
|
||||
|
||||
type Setup<E> = (
|
||||
SignedBeaconBlock<E>,
|
||||
FixedVector<Option<BlobSidecar<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
FixedVector<Option<BlobSidecar<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
);
|
||||
|
||||
pub fn pre_setup() -> Setup<E> {
|
||||
let trusted_setup: TrustedSetup =
|
||||
serde_json::from_reader(get_trusted_setup::<<E as EthSpec>::Kzg>()).unwrap();
|
||||
let kzg = Kzg::new_from_trusted_setup(trusted_setup).unwrap();
|
||||
|
||||
let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64);
|
||||
let (block, blobs_vec) =
|
||||
generate_rand_block_and_blobs::<E>(ForkName::Deneb, NumBlobs::Random, &kzg, &mut rng);
|
||||
let mut blobs: FixedVector<_, <E as EthSpec>::MaxBlobsPerBlock> = FixedVector::default();
|
||||
|
||||
for blob in blobs_vec {
|
||||
if let Some(b) = blobs.get_mut(blob.index as usize) {
|
||||
*b = Some(blob);
|
||||
}
|
||||
}
|
||||
|
||||
let mut invalid_blobs: FixedVector<
|
||||
Option<BlobSidecar<E>>,
|
||||
<E as EthSpec>::MaxBlobsPerBlock,
|
||||
> = FixedVector::default();
|
||||
for (index, blob) in blobs.iter().enumerate() {
|
||||
let mut invalid_blob_opt = blob.clone();
|
||||
if let Some(invalid_blob) = invalid_blob_opt.as_mut() {
|
||||
invalid_blob.kzg_commitment = KzgCommitment::random_for_test(&mut rng);
|
||||
}
|
||||
*invalid_blobs.get_mut(index).unwrap() = invalid_blob_opt;
|
||||
}
|
||||
|
||||
(block, blobs, invalid_blobs)
|
||||
}
|
||||
|
||||
type ProcessingViewSetup<E> = (
|
||||
KzgCommitments<E>,
|
||||
FixedVector<Option<KzgCommitment>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
FixedVector<Option<KzgCommitment>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
);
|
||||
|
||||
pub fn setup_processing_components(
|
||||
block: SignedBeaconBlock<E>,
|
||||
valid_blobs: FixedVector<Option<BlobSidecar<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
invalid_blobs: FixedVector<Option<BlobSidecar<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
) -> ProcessingViewSetup<E> {
|
||||
let commitments = block
|
||||
.message()
|
||||
.body()
|
||||
.blob_kzg_commitments()
|
||||
.unwrap()
|
||||
.clone();
|
||||
let blobs = FixedVector::from(
|
||||
valid_blobs
|
||||
.iter()
|
||||
.map(|blob_opt| blob_opt.as_ref().map(|blob| blob.kzg_commitment))
|
||||
.collect::<Vec<_>>(),
|
||||
);
|
||||
let invalid_blobs = FixedVector::from(
|
||||
invalid_blobs
|
||||
.iter()
|
||||
.map(|blob_opt| blob_opt.as_ref().map(|blob| blob.kzg_commitment))
|
||||
.collect::<Vec<_>>(),
|
||||
);
|
||||
(commitments, blobs, invalid_blobs)
|
||||
}
|
||||
|
||||
type PendingComponentsSetup<E> = (
|
||||
DietAvailabilityPendingExecutedBlock<E>,
|
||||
FixedVector<Option<KzgVerifiedBlob<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
FixedVector<Option<KzgVerifiedBlob<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
);
|
||||
|
||||
pub fn setup_pending_components(
|
||||
block: SignedBeaconBlock<E>,
|
||||
valid_blobs: FixedVector<Option<BlobSidecar<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
invalid_blobs: FixedVector<Option<BlobSidecar<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
) -> PendingComponentsSetup<E> {
|
||||
let blobs = FixedVector::from(
|
||||
valid_blobs
|
||||
.iter()
|
||||
.map(|blob_opt| {
|
||||
blob_opt
|
||||
.as_ref()
|
||||
.map(|blob| KzgVerifiedBlob::new(blob.clone()))
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
);
|
||||
let invalid_blobs = FixedVector::from(
|
||||
invalid_blobs
|
||||
.iter()
|
||||
.map(|blob_opt| {
|
||||
blob_opt
|
||||
.as_ref()
|
||||
.map(|blob| KzgVerifiedBlob::new(blob.clone()))
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
);
|
||||
let dummy_parent = block.clone_as_blinded();
|
||||
let block = AvailabilityPendingExecutedBlock {
|
||||
block: Arc::new(block),
|
||||
import_data: BlockImportData {
|
||||
block_root: Default::default(),
|
||||
state: BeaconState::new(0, Default::default(), &ChainSpec::minimal()),
|
||||
parent_block: dummy_parent,
|
||||
parent_eth1_finalization_data: Eth1FinalizationData {
|
||||
eth1_data: Default::default(),
|
||||
eth1_deposit_index: 0,
|
||||
},
|
||||
confirmed_state_roots: vec![],
|
||||
consensus_context: ConsensusContext::new(Slot::new(0)),
|
||||
},
|
||||
payload_verification_outcome: PayloadVerificationOutcome {
|
||||
payload_verification_status: PayloadVerificationStatus::Verified,
|
||||
is_valid_merge_transition_block: false,
|
||||
},
|
||||
};
|
||||
(block.into(), blobs, invalid_blobs)
|
||||
}
|
||||
|
||||
type ChildComponentsSetup<E> = (
|
||||
Arc<SignedBeaconBlock<E>>,
|
||||
FixedVector<Option<Arc<BlobSidecar<E>>>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
FixedVector<Option<Arc<BlobSidecar<E>>>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
);
|
||||
|
||||
pub fn setup_child_components(
|
||||
block: SignedBeaconBlock<E>,
|
||||
valid_blobs: FixedVector<Option<BlobSidecar<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
invalid_blobs: FixedVector<Option<BlobSidecar<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
) -> ChildComponentsSetup<E> {
|
||||
let blobs = FixedVector::from(
|
||||
valid_blobs
|
||||
.into_iter()
|
||||
.map(|blob_opt| blob_opt.clone().map(Arc::new))
|
||||
.collect::<Vec<_>>(),
|
||||
);
|
||||
let invalid_blobs = FixedVector::from(
|
||||
invalid_blobs
|
||||
.into_iter()
|
||||
.map(|blob_opt| blob_opt.clone().map(Arc::new))
|
||||
.collect::<Vec<_>>(),
|
||||
);
|
||||
(Arc::new(block), blobs, invalid_blobs)
|
||||
}
|
||||
|
||||
pub fn assert_cache_consistent<V: AvailabilityView<E>>(cache: V) {
|
||||
if let Some(cached_block) = cache.get_cached_block() {
|
||||
let cached_block_commitments = cached_block.get_commitments();
|
||||
for index in 0..E::max_blobs_per_block() {
|
||||
let block_commitment = cached_block_commitments.get(index).copied();
|
||||
let blob_commitment_opt = cache.get_cached_blobs().get(index).unwrap();
|
||||
let blob_commitment = blob_commitment_opt.as_ref().map(|b| *b.get_commitment());
|
||||
assert_eq!(block_commitment, blob_commitment);
|
||||
}
|
||||
} else {
|
||||
panic!("No cached block")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn assert_empty_blob_cache<V: AvailabilityView<E>>(cache: V) {
|
||||
for blob in cache.get_cached_blobs().iter() {
|
||||
assert!(blob.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! generate_tests {
|
||||
($module_name:ident, $type_name:ty, $block_field:ident, $blob_field:ident, $setup_fn:ident) => {
|
||||
mod $module_name {
|
||||
use super::*;
|
||||
use types::Hash256;
|
||||
|
||||
#[test]
|
||||
fn valid_block_invalid_blobs_valid_blobs() {
|
||||
let (block_commitments, blobs, random_blobs) = pre_setup();
|
||||
let (block_commitments, blobs, random_blobs) =
|
||||
$setup_fn(block_commitments, blobs, random_blobs);
|
||||
let block_root = Hash256::zero();
|
||||
let mut cache = <$type_name>::empty(block_root);
|
||||
cache.merge_block(block_commitments);
|
||||
cache.merge_blobs(random_blobs);
|
||||
cache.merge_blobs(blobs);
|
||||
|
||||
assert_cache_consistent(cache);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_blobs_block_valid_blobs() {
|
||||
let (block_commitments, blobs, random_blobs) = pre_setup();
|
||||
let (block_commitments, blobs, random_blobs) =
|
||||
$setup_fn(block_commitments, blobs, random_blobs);
|
||||
let block_root = Hash256::zero();
|
||||
let mut cache = <$type_name>::empty(block_root);
|
||||
cache.merge_blobs(random_blobs);
|
||||
cache.merge_block(block_commitments);
|
||||
cache.merge_blobs(blobs);
|
||||
|
||||
assert_cache_consistent(cache);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_blobs_valid_blobs_block() {
|
||||
let (block_commitments, blobs, random_blobs) = pre_setup();
|
||||
let (block_commitments, blobs, random_blobs) =
|
||||
$setup_fn(block_commitments, blobs, random_blobs);
|
||||
|
||||
let block_root = Hash256::zero();
|
||||
let mut cache = <$type_name>::empty(block_root);
|
||||
cache.merge_blobs(random_blobs);
|
||||
cache.merge_blobs(blobs);
|
||||
cache.merge_block(block_commitments);
|
||||
|
||||
assert_empty_blob_cache(cache);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn block_valid_blobs_invalid_blobs() {
|
||||
let (block_commitments, blobs, random_blobs) = pre_setup();
|
||||
let (block_commitments, blobs, random_blobs) =
|
||||
$setup_fn(block_commitments, blobs, random_blobs);
|
||||
|
||||
let block_root = Hash256::zero();
|
||||
let mut cache = <$type_name>::empty(block_root);
|
||||
cache.merge_block(block_commitments);
|
||||
cache.merge_blobs(blobs);
|
||||
cache.merge_blobs(random_blobs);
|
||||
|
||||
assert_cache_consistent(cache);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn valid_blobs_block_invalid_blobs() {
|
||||
let (block_commitments, blobs, random_blobs) = pre_setup();
|
||||
let (block_commitments, blobs, random_blobs) =
|
||||
$setup_fn(block_commitments, blobs, random_blobs);
|
||||
|
||||
let block_root = Hash256::zero();
|
||||
let mut cache = <$type_name>::empty(block_root);
|
||||
cache.merge_blobs(blobs);
|
||||
cache.merge_block(block_commitments);
|
||||
cache.merge_blobs(random_blobs);
|
||||
|
||||
assert_cache_consistent(cache);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn valid_blobs_invalid_blobs_block() {
|
||||
let (block_commitments, blobs, random_blobs) = pre_setup();
|
||||
let (block_commitments, blobs, random_blobs) =
|
||||
$setup_fn(block_commitments, blobs, random_blobs);
|
||||
|
||||
let block_root = Hash256::zero();
|
||||
let mut cache = <$type_name>::empty(block_root);
|
||||
cache.merge_blobs(blobs);
|
||||
cache.merge_blobs(random_blobs);
|
||||
cache.merge_block(block_commitments);
|
||||
|
||||
assert_cache_consistent(cache);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
generate_tests!(
|
||||
processing_components_tests,
|
||||
ProcessingComponents::<E>,
|
||||
kzg_commitments,
|
||||
processing_blobs,
|
||||
setup_processing_components
|
||||
);
|
||||
generate_tests!(
|
||||
pending_components_tests,
|
||||
PendingComponents<E>,
|
||||
executed_block,
|
||||
verified_blobs,
|
||||
setup_pending_components
|
||||
);
|
||||
generate_tests!(
|
||||
child_component_tests,
|
||||
ChildComponents::<E>,
|
||||
downloaded_block,
|
||||
downloaded_blobs,
|
||||
setup_child_components
|
||||
);
|
||||
}
|
@ -0,0 +1,54 @@
|
||||
use crate::block_verification_types::RpcBlock;
|
||||
use crate::data_availability_checker::AvailabilityView;
|
||||
use bls::Hash256;
|
||||
use std::sync::Arc;
|
||||
use types::blob_sidecar::FixedBlobSidecarList;
|
||||
use types::{EthSpec, SignedBeaconBlock};
|
||||
|
||||
/// For requests triggered by an `UnknownBlockParent` or `UnknownBlobParent`, this struct
|
||||
/// is used to cache components as they are sent to the network service. We can't use the
|
||||
/// data availability cache currently because any blocks or blobs without parents
|
||||
/// won't pass validation and therefore won't make it into the cache.
|
||||
pub struct ChildComponents<E: EthSpec> {
|
||||
pub block_root: Hash256,
|
||||
pub downloaded_block: Option<Arc<SignedBeaconBlock<E>>>,
|
||||
pub downloaded_blobs: FixedBlobSidecarList<E>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> From<RpcBlock<E>> for ChildComponents<E> {
|
||||
fn from(value: RpcBlock<E>) -> Self {
|
||||
let (block_root, block, blobs) = value.deconstruct();
|
||||
let fixed_blobs = blobs.map(|blobs| {
|
||||
FixedBlobSidecarList::from(blobs.into_iter().map(Some).collect::<Vec<_>>())
|
||||
});
|
||||
Self::new(block_root, Some(block), fixed_blobs)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> ChildComponents<E> {
|
||||
pub fn empty(block_root: Hash256) -> Self {
|
||||
Self {
|
||||
block_root,
|
||||
downloaded_block: None,
|
||||
downloaded_blobs: <_>::default(),
|
||||
}
|
||||
}
|
||||
pub fn new(
|
||||
block_root: Hash256,
|
||||
block: Option<Arc<SignedBeaconBlock<E>>>,
|
||||
blobs: Option<FixedBlobSidecarList<E>>,
|
||||
) -> Self {
|
||||
let mut cache = Self::empty(block_root);
|
||||
if let Some(block) = block {
|
||||
cache.merge_block(block);
|
||||
}
|
||||
if let Some(blobs) = blobs {
|
||||
cache.merge_blobs(blobs);
|
||||
}
|
||||
cache
|
||||
}
|
||||
|
||||
pub fn clear_blobs(&mut self) {
|
||||
self.downloaded_blobs = FixedBlobSidecarList::default();
|
||||
}
|
||||
}
|
@ -0,0 +1,79 @@
|
||||
use kzg::{Error as KzgError, KzgCommitment};
|
||||
use strum::IntoStaticStr;
|
||||
use types::{BeaconStateError, Hash256};
|
||||
|
||||
#[derive(Debug, IntoStaticStr)]
|
||||
pub enum Error {
|
||||
Kzg(KzgError),
|
||||
KzgNotInitialized,
|
||||
KzgVerificationFailed,
|
||||
KzgCommitmentMismatch {
|
||||
blob_commitment: KzgCommitment,
|
||||
block_commitment: KzgCommitment,
|
||||
},
|
||||
Unexpected,
|
||||
SszTypes(ssz_types::Error),
|
||||
MissingBlobs,
|
||||
BlobIndexInvalid(u64),
|
||||
StoreError(store::Error),
|
||||
DecodeError(ssz::DecodeError),
|
||||
InconsistentBlobBlockRoots {
|
||||
block_root: Hash256,
|
||||
blob_block_root: Hash256,
|
||||
},
|
||||
ParentStateMissing(Hash256),
|
||||
BlockReplayError(state_processing::BlockReplayError),
|
||||
RebuildingStateCaches(BeaconStateError),
|
||||
}
|
||||
|
||||
pub enum ErrorCategory {
|
||||
/// Internal Errors (not caused by peers)
|
||||
Internal,
|
||||
/// Errors caused by faulty / malicious peers
|
||||
Malicious,
|
||||
}
|
||||
|
||||
impl Error {
|
||||
pub fn category(&self) -> ErrorCategory {
|
||||
match self {
|
||||
Error::KzgNotInitialized
|
||||
| Error::SszTypes(_)
|
||||
| Error::MissingBlobs
|
||||
| Error::StoreError(_)
|
||||
| Error::DecodeError(_)
|
||||
| Error::Unexpected
|
||||
| Error::ParentStateMissing(_)
|
||||
| Error::BlockReplayError(_)
|
||||
| Error::RebuildingStateCaches(_) => ErrorCategory::Internal,
|
||||
Error::Kzg(_)
|
||||
| Error::BlobIndexInvalid(_)
|
||||
| Error::KzgCommitmentMismatch { .. }
|
||||
| Error::KzgVerificationFailed
|
||||
| Error::InconsistentBlobBlockRoots { .. } => ErrorCategory::Malicious,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ssz_types::Error> for Error {
|
||||
fn from(value: ssz_types::Error) -> Self {
|
||||
Self::SszTypes(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<store::Error> for Error {
|
||||
fn from(value: store::Error) -> Self {
|
||||
Self::StoreError(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ssz::DecodeError> for Error {
|
||||
fn from(value: ssz::DecodeError) -> Self {
|
||||
Self::DecodeError(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<state_processing::BlockReplayError> for Error {
|
||||
fn from(value: state_processing::BlockReplayError) -> Self {
|
||||
Self::BlockReplayError(value)
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,74 @@
|
||||
use crate::data_availability_checker::AvailabilityView;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments};
|
||||
use types::{EthSpec, Hash256, Slot};
|
||||
|
||||
/// This cache is used only for gossip blocks/blobs and single block/blob lookups, to give req/resp
|
||||
/// a view of what we have and what we require. This cache serves a slightly different purpose than
|
||||
/// gossip caches because it allows us to process duplicate blobs that are valid in gossip.
|
||||
/// See `AvailabilityView`'s trait definition.
|
||||
#[derive(Default)]
|
||||
pub struct ProcessingCache<E: EthSpec> {
|
||||
processing_cache: HashMap<Hash256, ProcessingComponents<E>>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> ProcessingCache<E> {
|
||||
pub fn get(&self, block_root: &Hash256) -> Option<&ProcessingComponents<E>> {
|
||||
self.processing_cache.get(block_root)
|
||||
}
|
||||
pub fn entry(&mut self, block_root: Hash256) -> Entry<'_, Hash256, ProcessingComponents<E>> {
|
||||
self.processing_cache.entry(block_root)
|
||||
}
|
||||
pub fn remove(&mut self, block_root: &Hash256) {
|
||||
self.processing_cache.remove(block_root);
|
||||
}
|
||||
pub fn has_block(&self, block_root: &Hash256) -> bool {
|
||||
self.processing_cache
|
||||
.get(block_root)
|
||||
.map_or(false, |b| b.block_exists())
|
||||
}
|
||||
pub fn incomplete_processing_components(&self, slot: Slot) -> Vec<Hash256> {
|
||||
let mut roots_missing_components = vec![];
|
||||
for (&block_root, info) in self.processing_cache.iter() {
|
||||
if info.slot == slot && !info.is_available() {
|
||||
roots_missing_components.push(block_root);
|
||||
}
|
||||
}
|
||||
roots_missing_components
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ProcessingComponents<E: EthSpec> {
|
||||
slot: Slot,
|
||||
/// Blobs required for a block can only be known if we have seen the block. So `Some` here
|
||||
/// means we've seen it, a `None` means we haven't. The `kzg_commitments` value helps us figure
|
||||
/// out whether incoming blobs actually match the block.
|
||||
pub block_commitments: Option<KzgCommitments<E>>,
|
||||
/// `KzgCommitments` for blobs are always known, even if we haven't seen the block. See
|
||||
/// `AvailabilityView`'s trait definition for more details.
|
||||
pub blob_commitments: KzgCommitmentOpts<E>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> ProcessingComponents<E> {
|
||||
pub fn new(slot: Slot) -> Self {
|
||||
Self {
|
||||
slot,
|
||||
block_commitments: None,
|
||||
blob_commitments: KzgCommitmentOpts::<E>::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Not safe for use outside of tests as this always required a slot.
|
||||
#[cfg(test)]
|
||||
impl<E: EthSpec> ProcessingComponents<E> {
|
||||
pub fn empty(_block_root: Hash256) -> Self {
|
||||
Self {
|
||||
slot: Slot::new(0),
|
||||
block_commitments: None,
|
||||
blob_commitments: KzgCommitmentOpts::<E>::default(),
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,230 @@
|
||||
use crate::block_verification_types::AsBlock;
|
||||
use crate::{
|
||||
block_verification_types::BlockImportData,
|
||||
data_availability_checker::{AvailabilityCheckError, STATE_LRU_CAPACITY},
|
||||
eth1_finalization_cache::Eth1FinalizationData,
|
||||
AvailabilityPendingExecutedBlock, BeaconChainTypes, BeaconStore, PayloadVerificationOutcome,
|
||||
};
|
||||
use lru::LruCache;
|
||||
use parking_lot::RwLock;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use state_processing::{BlockReplayer, ConsensusContext, StateProcessingStrategy};
|
||||
use std::sync::Arc;
|
||||
use types::{ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc};
|
||||
use types::{BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock};
|
||||
|
||||
/// This mirrors everything in the `AvailabilityPendingExecutedBlock`, except
|
||||
/// that it is much smaller because it contains only a state root instead of
|
||||
/// a full `BeaconState`.
|
||||
#[derive(Encode, Decode, Clone)]
|
||||
pub struct DietAvailabilityPendingExecutedBlock<E: EthSpec> {
|
||||
#[ssz(with = "ssz_tagged_signed_beacon_block_arc")]
|
||||
block: Arc<SignedBeaconBlock<E>>,
|
||||
state_root: Hash256,
|
||||
#[ssz(with = "ssz_tagged_signed_beacon_block")]
|
||||
parent_block: SignedBeaconBlock<E, BlindedPayload<E>>,
|
||||
parent_eth1_finalization_data: Eth1FinalizationData,
|
||||
confirmed_state_roots: Vec<Hash256>,
|
||||
consensus_context: ConsensusContext<E>,
|
||||
payload_verification_outcome: PayloadVerificationOutcome,
|
||||
}
|
||||
|
||||
/// just implementing the same methods as `AvailabilityPendingExecutedBlock`
|
||||
impl<E: EthSpec> DietAvailabilityPendingExecutedBlock<E> {
|
||||
pub fn as_block(&self) -> &SignedBeaconBlock<E> {
|
||||
&self.block
|
||||
}
|
||||
|
||||
pub fn num_blobs_expected(&self) -> usize {
|
||||
self.block
|
||||
.message()
|
||||
.body()
|
||||
.blob_kzg_commitments()
|
||||
.map_or(0, |commitments| commitments.len())
|
||||
}
|
||||
}
|
||||
|
||||
/// This LRU cache holds BeaconStates used for block import. If the cache overflows,
|
||||
/// the least recently used state will be dropped. If the dropped state is needed
|
||||
/// later on, it will be recovered from the parent state and replaying the block.
|
||||
///
|
||||
/// WARNING: This cache assumes the parent block of any `AvailabilityPendingExecutedBlock`
|
||||
/// has already been imported into ForkChoice. If this is not the case, the cache
|
||||
/// will fail to recover the state when the cache overflows because it can't load
|
||||
/// the parent state!
|
||||
pub struct StateLRUCache<T: BeaconChainTypes> {
|
||||
states: RwLock<LruCache<Hash256, BeaconState<T::EthSpec>>>,
|
||||
store: BeaconStore<T>,
|
||||
spec: ChainSpec,
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> StateLRUCache<T> {
|
||||
pub fn new(store: BeaconStore<T>, spec: ChainSpec) -> Self {
|
||||
Self {
|
||||
states: RwLock::new(LruCache::new(STATE_LRU_CAPACITY)),
|
||||
store,
|
||||
spec,
|
||||
}
|
||||
}
|
||||
|
||||
/// This will store the state in the LRU cache and return a
|
||||
/// `DietAvailabilityPendingExecutedBlock` which is much cheaper to
|
||||
/// keep around in memory.
|
||||
pub fn register_pending_executed_block(
|
||||
&self,
|
||||
executed_block: AvailabilityPendingExecutedBlock<T::EthSpec>,
|
||||
) -> DietAvailabilityPendingExecutedBlock<T::EthSpec> {
|
||||
let state = executed_block.import_data.state;
|
||||
let state_root = executed_block.block.state_root();
|
||||
self.states.write().put(state_root, state);
|
||||
|
||||
DietAvailabilityPendingExecutedBlock {
|
||||
block: executed_block.block,
|
||||
state_root,
|
||||
parent_block: executed_block.import_data.parent_block,
|
||||
parent_eth1_finalization_data: executed_block.import_data.parent_eth1_finalization_data,
|
||||
confirmed_state_roots: executed_block.import_data.confirmed_state_roots,
|
||||
consensus_context: executed_block.import_data.consensus_context,
|
||||
payload_verification_outcome: executed_block.payload_verification_outcome,
|
||||
}
|
||||
}
|
||||
|
||||
/// Recover the `AvailabilityPendingExecutedBlock` from the diet version.
|
||||
/// This method will first check the cache and if the state is not found
|
||||
/// it will reconstruct the state by loading the parent state from disk and
|
||||
/// replaying the block.
|
||||
pub fn recover_pending_executed_block(
|
||||
&self,
|
||||
diet_executed_block: DietAvailabilityPendingExecutedBlock<T::EthSpec>,
|
||||
) -> Result<AvailabilityPendingExecutedBlock<T::EthSpec>, AvailabilityCheckError> {
|
||||
let maybe_state = self.states.write().pop(&diet_executed_block.state_root);
|
||||
if let Some(state) = maybe_state {
|
||||
let block_root = diet_executed_block.block.canonical_root();
|
||||
Ok(AvailabilityPendingExecutedBlock {
|
||||
block: diet_executed_block.block,
|
||||
import_data: BlockImportData {
|
||||
block_root,
|
||||
state,
|
||||
parent_block: diet_executed_block.parent_block,
|
||||
parent_eth1_finalization_data: diet_executed_block
|
||||
.parent_eth1_finalization_data,
|
||||
confirmed_state_roots: diet_executed_block.confirmed_state_roots,
|
||||
consensus_context: diet_executed_block.consensus_context,
|
||||
},
|
||||
payload_verification_outcome: diet_executed_block.payload_verification_outcome,
|
||||
})
|
||||
} else {
|
||||
self.reconstruct_pending_executed_block(diet_executed_block)
|
||||
}
|
||||
}
|
||||
|
||||
/// Reconstruct the `AvailabilityPendingExecutedBlock` by loading the parent
|
||||
/// state from disk and replaying the block. This function does NOT check the
|
||||
/// LRU cache.
|
||||
pub fn reconstruct_pending_executed_block(
|
||||
&self,
|
||||
diet_executed_block: DietAvailabilityPendingExecutedBlock<T::EthSpec>,
|
||||
) -> Result<AvailabilityPendingExecutedBlock<T::EthSpec>, AvailabilityCheckError> {
|
||||
let block_root = diet_executed_block.block.canonical_root();
|
||||
let state = self.reconstruct_state(&diet_executed_block)?;
|
||||
Ok(AvailabilityPendingExecutedBlock {
|
||||
block: diet_executed_block.block,
|
||||
import_data: BlockImportData {
|
||||
block_root,
|
||||
state,
|
||||
parent_block: diet_executed_block.parent_block,
|
||||
parent_eth1_finalization_data: diet_executed_block.parent_eth1_finalization_data,
|
||||
confirmed_state_roots: diet_executed_block.confirmed_state_roots,
|
||||
consensus_context: diet_executed_block.consensus_context,
|
||||
},
|
||||
payload_verification_outcome: diet_executed_block.payload_verification_outcome,
|
||||
})
|
||||
}
|
||||
|
||||
/// Reconstruct the state by loading the parent state from disk and replaying
|
||||
/// the block.
|
||||
fn reconstruct_state(
|
||||
&self,
|
||||
diet_executed_block: &DietAvailabilityPendingExecutedBlock<T::EthSpec>,
|
||||
) -> Result<BeaconState<T::EthSpec>, AvailabilityCheckError> {
|
||||
let parent_block_root = diet_executed_block.parent_block.canonical_root();
|
||||
let parent_block_state_root = diet_executed_block.parent_block.state_root();
|
||||
let (parent_state_root, parent_state) = self
|
||||
.store
|
||||
.get_advanced_hot_state(
|
||||
parent_block_root,
|
||||
diet_executed_block.parent_block.slot(),
|
||||
parent_block_state_root,
|
||||
)
|
||||
.map_err(AvailabilityCheckError::StoreError)?
|
||||
.ok_or(AvailabilityCheckError::ParentStateMissing(
|
||||
parent_block_state_root,
|
||||
))?;
|
||||
|
||||
let state_roots = vec![
|
||||
Ok((parent_state_root, diet_executed_block.parent_block.slot())),
|
||||
Ok((
|
||||
diet_executed_block.state_root,
|
||||
diet_executed_block.block.slot(),
|
||||
)),
|
||||
];
|
||||
|
||||
let block_replayer: BlockReplayer<'_, T::EthSpec, AvailabilityCheckError, _> =
|
||||
BlockReplayer::new(parent_state, &self.spec)
|
||||
.no_signature_verification()
|
||||
.state_processing_strategy(StateProcessingStrategy::Accurate)
|
||||
.state_root_iter(state_roots.into_iter())
|
||||
.minimal_block_root_verification();
|
||||
|
||||
block_replayer
|
||||
.apply_blocks(vec![diet_executed_block.block.clone_as_blinded()], None)
|
||||
.map(|block_replayer| block_replayer.into_state())
|
||||
.and_then(|mut state| {
|
||||
state
|
||||
.build_exit_cache(&self.spec)
|
||||
.map_err(AvailabilityCheckError::RebuildingStateCaches)?;
|
||||
state
|
||||
.update_tree_hash_cache()
|
||||
.map_err(AvailabilityCheckError::RebuildingStateCaches)?;
|
||||
Ok(state)
|
||||
})
|
||||
}
|
||||
|
||||
/// returns the state cache for inspection in tests
|
||||
#[cfg(test)]
|
||||
pub fn lru_cache(&self) -> &RwLock<LruCache<Hash256, BeaconState<T::EthSpec>>> {
|
||||
&self.states
|
||||
}
|
||||
|
||||
/// remove any states from the cache from before the given epoch
|
||||
pub fn do_maintenance(&self, cutoff_epoch: Epoch) {
|
||||
let mut write_lock = self.states.write();
|
||||
while let Some((_, state)) = write_lock.peek_lru() {
|
||||
if state.slot().epoch(T::EthSpec::slots_per_epoch()) < cutoff_epoch {
|
||||
write_lock.pop_lru();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This can only be used during testing. The intended way to
|
||||
/// obtain a `DietAvailabilityPendingExecutedBlock` is to call
|
||||
/// `register_pending_executed_block` on the `StateLRUCache`.
|
||||
#[cfg(test)]
|
||||
impl<E: EthSpec> From<AvailabilityPendingExecutedBlock<E>>
|
||||
for DietAvailabilityPendingExecutedBlock<E>
|
||||
{
|
||||
fn from(value: AvailabilityPendingExecutedBlock<E>) -> Self {
|
||||
Self {
|
||||
block: value.block,
|
||||
state_root: value.import_data.state.canonical_root(),
|
||||
parent_block: value.import_data.parent_block,
|
||||
parent_eth1_finalization_data: value.import_data.parent_eth1_finalization_data,
|
||||
confirmed_state_roots: value.import_data.confirmed_state_roots,
|
||||
consensus_context: value.import_data.consensus_context,
|
||||
payload_verification_outcome: value.payload_verification_outcome,
|
||||
}
|
||||
}
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
use crate::data_availability_checker::AvailableBlock;
|
||||
use crate::{
|
||||
attester_cache::{CommitteeLengths, Error},
|
||||
metrics,
|
||||
@ -5,6 +6,7 @@ use crate::{
|
||||
use parking_lot::RwLock;
|
||||
use proto_array::Block as ProtoBlock;
|
||||
use std::sync::Arc;
|
||||
use types::blob_sidecar::BlobSidecarList;
|
||||
use types::*;
|
||||
|
||||
pub struct CacheItem<E: EthSpec> {
|
||||
@ -20,6 +22,7 @@ pub struct CacheItem<E: EthSpec> {
|
||||
* Values used to make the block available.
|
||||
*/
|
||||
block: Arc<SignedBeaconBlock<E>>,
|
||||
blobs: Option<BlobSidecarList<E>>,
|
||||
proto_block: ProtoBlock,
|
||||
}
|
||||
|
||||
@ -49,7 +52,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
|
||||
pub fn add_head_block(
|
||||
&self,
|
||||
beacon_block_root: Hash256,
|
||||
block: Arc<SignedBeaconBlock<E>>,
|
||||
block: AvailableBlock<E>,
|
||||
proto_block: ProtoBlock,
|
||||
state: &BeaconState<E>,
|
||||
spec: &ChainSpec,
|
||||
@ -67,6 +70,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
|
||||
},
|
||||
};
|
||||
|
||||
let (_, block, blobs) = block.deconstruct();
|
||||
let item = CacheItem {
|
||||
epoch,
|
||||
committee_lengths,
|
||||
@ -74,6 +78,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
|
||||
source,
|
||||
target,
|
||||
block,
|
||||
blobs,
|
||||
proto_block,
|
||||
};
|
||||
|
||||
@ -155,6 +160,15 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
|
||||
.map(|item| item.block.clone())
|
||||
}
|
||||
|
||||
/// Returns the blobs, if `block_root` matches the cached item.
|
||||
pub fn get_blobs(&self, block_root: Hash256) -> Option<BlobSidecarList<E>> {
|
||||
self.item
|
||||
.read()
|
||||
.as_ref()
|
||||
.filter(|item| item.beacon_block_root == block_root)
|
||||
.and_then(|item| item.blobs.clone())
|
||||
}
|
||||
|
||||
/// Returns the proto-array block, if `block_root` matches the cached item.
|
||||
pub fn get_proto_block(&self, block_root: Hash256) -> Option<ProtoBlock> {
|
||||
self.item
|
||||
|
@ -2,12 +2,14 @@ use crate::attester_cache::Error as AttesterCacheError;
|
||||
use crate::beacon_block_streamer::Error as BlockStreamerError;
|
||||
use crate::beacon_chain::ForkChoiceError;
|
||||
use crate::beacon_fork_choice_store::Error as ForkChoiceStoreError;
|
||||
use crate::data_availability_checker::AvailabilityCheckError;
|
||||
use crate::eth1_chain::Error as Eth1ChainError;
|
||||
use crate::historical_blocks::HistoricalBlockError;
|
||||
use crate::migrate::PruningError;
|
||||
use crate::naive_aggregation_pool::Error as NaiveAggregationError;
|
||||
use crate::observed_aggregates::Error as ObservedAttestationsError;
|
||||
use crate::observed_attesters::Error as ObservedAttestersError;
|
||||
use crate::observed_blob_sidecars::Error as ObservedBlobSidecarsError;
|
||||
use crate::observed_block_producers::Error as ObservedBlockProducersError;
|
||||
use execution_layer::PayloadStatus;
|
||||
use fork_choice::ExecutionStatus;
|
||||
@ -102,6 +104,7 @@ pub enum BeaconChainError {
|
||||
ObservedAttestationsError(ObservedAttestationsError),
|
||||
ObservedAttestersError(ObservedAttestersError),
|
||||
ObservedBlockProducersError(ObservedBlockProducersError),
|
||||
ObservedBlobSidecarsError(ObservedBlobSidecarsError),
|
||||
AttesterCacheError(AttesterCacheError),
|
||||
PruningError(PruningError),
|
||||
ArithError(ArithError),
|
||||
@ -217,6 +220,7 @@ pub enum BeaconChainError {
|
||||
InconsistentFork(InconsistentFork),
|
||||
ProposerHeadForkChoiceError(fork_choice::Error<proto_array::Error>),
|
||||
UnableToPublish,
|
||||
AvailabilityCheckError(AvailabilityCheckError),
|
||||
}
|
||||
|
||||
easy_from_to!(SlotProcessingError, BeaconChainError);
|
||||
@ -233,6 +237,7 @@ easy_from_to!(NaiveAggregationError, BeaconChainError);
|
||||
easy_from_to!(ObservedAttestationsError, BeaconChainError);
|
||||
easy_from_to!(ObservedAttestersError, BeaconChainError);
|
||||
easy_from_to!(ObservedBlockProducersError, BeaconChainError);
|
||||
easy_from_to!(ObservedBlobSidecarsError, BeaconChainError);
|
||||
easy_from_to!(AttesterCacheError, BeaconChainError);
|
||||
easy_from_to!(BlockSignatureVerifierError, BeaconChainError);
|
||||
easy_from_to!(PruningError, BeaconChainError);
|
||||
@ -242,6 +247,7 @@ easy_from_to!(HistoricalBlockError, BeaconChainError);
|
||||
easy_from_to!(StateAdvanceError, BeaconChainError);
|
||||
easy_from_to!(BlockReplayError, BeaconChainError);
|
||||
easy_from_to!(InconsistentFork, BeaconChainError);
|
||||
easy_from_to!(AvailabilityCheckError, BeaconChainError);
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BlockProductionError {
|
||||
@ -270,11 +276,17 @@ pub enum BlockProductionError {
|
||||
MissingFinalizedBlock(Hash256),
|
||||
BlockTooLarge(usize),
|
||||
ShuttingDown,
|
||||
MissingBlobs,
|
||||
MissingSyncAggregate,
|
||||
MissingExecutionPayload,
|
||||
TokioJoin(tokio::task::JoinError),
|
||||
MissingKzgCommitment(String),
|
||||
TokioJoin(JoinError),
|
||||
BeaconChain(BeaconChainError),
|
||||
InvalidPayloadFork,
|
||||
TrustedSetupNotInitialized,
|
||||
InvalidBlockVariant(String),
|
||||
KzgError(kzg::Error),
|
||||
FailedToBuildBlobSidecars(String),
|
||||
}
|
||||
|
||||
easy_from_to!(BlockProcessingError, BlockProductionError);
|
||||
|
@ -1,4 +1,5 @@
|
||||
use slog::{debug, Logger};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use std::cmp;
|
||||
use std::collections::BTreeMap;
|
||||
use types::{Checkpoint, Epoch, Eth1Data, Hash256 as Root};
|
||||
@ -10,7 +11,7 @@ pub const DEFAULT_ETH1_CACHE_SIZE: usize = 5;
|
||||
|
||||
/// These fields are named the same as the corresponding fields in the `BeaconState`
|
||||
/// as this structure stores these values from the `BeaconState` at a `Checkpoint`
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug, PartialEq, Encode, Decode)]
|
||||
pub struct Eth1FinalizationData {
|
||||
pub eth1_data: Eth1Data,
|
||||
pub eth1_deposit_index: u64,
|
||||
|
@ -9,6 +9,7 @@ const DEFAULT_CHANNEL_CAPACITY: usize = 16;
|
||||
pub struct ServerSentEventHandler<T: EthSpec> {
|
||||
attestation_tx: Sender<EventKind<T>>,
|
||||
block_tx: Sender<EventKind<T>>,
|
||||
blob_sidecar_tx: Sender<EventKind<T>>,
|
||||
finalized_tx: Sender<EventKind<T>>,
|
||||
head_tx: Sender<EventKind<T>>,
|
||||
exit_tx: Sender<EventKind<T>>,
|
||||
@ -31,6 +32,7 @@ impl<T: EthSpec> ServerSentEventHandler<T> {
|
||||
pub fn new_with_capacity(log: Logger, capacity: usize) -> Self {
|
||||
let (attestation_tx, _) = broadcast::channel(capacity);
|
||||
let (block_tx, _) = broadcast::channel(capacity);
|
||||
let (blob_sidecar_tx, _) = broadcast::channel(capacity);
|
||||
let (finalized_tx, _) = broadcast::channel(capacity);
|
||||
let (head_tx, _) = broadcast::channel(capacity);
|
||||
let (exit_tx, _) = broadcast::channel(capacity);
|
||||
@ -43,6 +45,7 @@ impl<T: EthSpec> ServerSentEventHandler<T> {
|
||||
Self {
|
||||
attestation_tx,
|
||||
block_tx,
|
||||
blob_sidecar_tx,
|
||||
finalized_tx,
|
||||
head_tx,
|
||||
exit_tx,
|
||||
@ -73,6 +76,10 @@ impl<T: EthSpec> ServerSentEventHandler<T> {
|
||||
.block_tx
|
||||
.send(kind)
|
||||
.map(|count| log_count("block", count)),
|
||||
EventKind::BlobSidecar(_) => self
|
||||
.blob_sidecar_tx
|
||||
.send(kind)
|
||||
.map(|count| log_count("blob sidecar", count)),
|
||||
EventKind::FinalizedCheckpoint(_) => self
|
||||
.finalized_tx
|
||||
.send(kind)
|
||||
@ -119,6 +126,10 @@ impl<T: EthSpec> ServerSentEventHandler<T> {
|
||||
self.block_tx.subscribe()
|
||||
}
|
||||
|
||||
pub fn subscribe_blob_sidecar(&self) -> Receiver<EventKind<T>> {
|
||||
self.blob_sidecar_tx.subscribe()
|
||||
}
|
||||
|
||||
pub fn subscribe_finalized(&self) -> Receiver<EventKind<T>> {
|
||||
self.finalized_tx.subscribe()
|
||||
}
|
||||
@ -159,6 +170,10 @@ impl<T: EthSpec> ServerSentEventHandler<T> {
|
||||
self.block_tx.receiver_count() > 0
|
||||
}
|
||||
|
||||
pub fn has_blob_sidecar_subscribers(&self) -> bool {
|
||||
self.blob_sidecar_tx.receiver_count() > 0
|
||||
}
|
||||
|
||||
pub fn has_finalized_subscribers(&self) -> bool {
|
||||
self.finalized_tx.receiver_count() > 0
|
||||
}
|
||||
|
@ -12,7 +12,9 @@ use crate::{
|
||||
BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError,
|
||||
ExecutionPayloadError,
|
||||
};
|
||||
use execution_layer::{BlockProposalContents, BuilderParams, PayloadAttributes, PayloadStatus};
|
||||
use execution_layer::{
|
||||
BlockProposalContents, BuilderParams, NewPayloadRequest, PayloadAttributes, PayloadStatus,
|
||||
};
|
||||
use fork_choice::{InvalidationOperation, PayloadVerificationStatus};
|
||||
use proto_array::{Block as ProtoBlock, ExecutionStatus};
|
||||
use slog::{debug, warn};
|
||||
@ -68,11 +70,10 @@ impl<T: BeaconChainTypes> PayloadNotifier<T> {
|
||||
// the block as optimistically imported. This is particularly relevant in the case
|
||||
// where we do not send the block to the EL at all.
|
||||
let block_message = block.message();
|
||||
let payload = block_message.execution_payload()?;
|
||||
partially_verify_execution_payload::<_, FullPayload<_>>(
|
||||
state,
|
||||
block.slot(),
|
||||
payload,
|
||||
block_message.body(),
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BlockError::PerBlockProcessingError)?;
|
||||
@ -86,13 +87,11 @@ impl<T: BeaconChainTypes> PayloadNotifier<T> {
|
||||
.as_ref()
|
||||
.ok_or(ExecutionPayloadError::NoExecutionConnection)?;
|
||||
|
||||
if let Err(e) =
|
||||
execution_layer.verify_payload_block_hash(payload.execution_payload_ref())
|
||||
{
|
||||
if let Err(e) = execution_layer.verify_payload_block_hash(block_message) {
|
||||
warn!(
|
||||
chain.log,
|
||||
"Falling back to slow block hash verification";
|
||||
"block_number" => payload.block_number(),
|
||||
"block_number" => ?block_message.execution_payload().map(|payload| payload.block_number()),
|
||||
"info" => "you can silence this warning with --disable-optimistic-finalized-sync",
|
||||
"error" => ?e,
|
||||
);
|
||||
@ -138,15 +137,15 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>(
|
||||
chain: &Arc<BeaconChain<T>>,
|
||||
block: BeaconBlockRef<'a, T::EthSpec>,
|
||||
) -> Result<PayloadVerificationStatus, BlockError<T::EthSpec>> {
|
||||
let execution_payload = block.execution_payload()?;
|
||||
|
||||
let execution_layer = chain
|
||||
.execution_layer
|
||||
.as_ref()
|
||||
.ok_or(ExecutionPayloadError::NoExecutionConnection)?;
|
||||
|
||||
let new_payload_request: NewPayloadRequest<T::EthSpec> = block.try_into()?;
|
||||
let execution_block_hash = new_payload_request.block_hash();
|
||||
let new_payload_response = execution_layer
|
||||
.notify_new_payload(&execution_payload.into())
|
||||
.notify_new_payload(new_payload_request)
|
||||
.await;
|
||||
|
||||
match new_payload_response {
|
||||
@ -164,7 +163,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>(
|
||||
"Invalid execution payload";
|
||||
"validation_error" => ?validation_error,
|
||||
"latest_valid_hash" => ?latest_valid_hash,
|
||||
"execution_block_hash" => ?execution_payload.block_hash(),
|
||||
"execution_block_hash" => ?execution_block_hash,
|
||||
"root" => ?block.tree_hash_root(),
|
||||
"graffiti" => block.body().graffiti().as_utf8_lossy(),
|
||||
"proposer_index" => block.proposer_index(),
|
||||
@ -210,7 +209,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>(
|
||||
chain.log,
|
||||
"Invalid execution payload block hash";
|
||||
"validation_error" => ?validation_error,
|
||||
"execution_block_hash" => ?execution_payload.block_hash(),
|
||||
"execution_block_hash" => ?execution_block_hash,
|
||||
"root" => ?block.tree_hash_root(),
|
||||
"graffiti" => block.body().graffiti().as_utf8_lossy(),
|
||||
"proposer_index" => block.proposer_index(),
|
||||
@ -405,6 +404,7 @@ pub fn get_execution_payload<
|
||||
>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
parent_block_root: Hash256,
|
||||
proposer_index: u64,
|
||||
builder_params: BuilderParams,
|
||||
) -> Result<PreparePayloadHandle<T::EthSpec, Payload>, BlockProductionError> {
|
||||
@ -419,11 +419,19 @@ pub fn get_execution_payload<
|
||||
let latest_execution_payload_header_block_hash =
|
||||
state.latest_execution_payload_header()?.block_hash();
|
||||
let withdrawals = match state {
|
||||
&BeaconState::Capella(_) => Some(get_expected_withdrawals(state, spec)?.into()),
|
||||
&BeaconState::Capella(_) | &BeaconState::Deneb(_) => {
|
||||
Some(get_expected_withdrawals(state, spec)?.into())
|
||||
}
|
||||
&BeaconState::Merge(_) => None,
|
||||
// These shouldn't happen but they're here to make the pattern irrefutable
|
||||
&BeaconState::Base(_) | &BeaconState::Altair(_) => None,
|
||||
};
|
||||
let parent_beacon_block_root = match state {
|
||||
BeaconState::Deneb(_) => Some(parent_block_root),
|
||||
BeaconState::Merge(_) | BeaconState::Capella(_) => None,
|
||||
// These shouldn't happen but they're here to make the pattern irrefutable
|
||||
BeaconState::Base(_) | BeaconState::Altair(_) => None,
|
||||
};
|
||||
|
||||
// Spawn a task to obtain the execution payload from the EL via a series of async calls. The
|
||||
// `join_handle` can be used to await the result of the function.
|
||||
@ -441,6 +449,7 @@ pub fn get_execution_payload<
|
||||
latest_execution_payload_header_block_hash,
|
||||
builder_params,
|
||||
withdrawals,
|
||||
parent_beacon_block_root,
|
||||
)
|
||||
.await
|
||||
},
|
||||
@ -475,6 +484,7 @@ pub async fn prepare_execution_payload<T, Payload>(
|
||||
latest_execution_payload_header_block_hash: ExecutionBlockHash,
|
||||
builder_params: BuilderParams,
|
||||
withdrawals: Option<Vec<Withdrawal>>,
|
||||
parent_beacon_block_root: Option<Hash256>,
|
||||
) -> Result<BlockProposalContents<T::EthSpec, Payload>, BlockProductionError>
|
||||
where
|
||||
T: BeaconChainTypes,
|
||||
@ -536,8 +546,13 @@ where
|
||||
let suggested_fee_recipient = execution_layer
|
||||
.get_suggested_fee_recipient(proposer_index)
|
||||
.await;
|
||||
let payload_attributes =
|
||||
PayloadAttributes::new(timestamp, random, suggested_fee_recipient, withdrawals);
|
||||
let payload_attributes = PayloadAttributes::new(
|
||||
timestamp,
|
||||
random,
|
||||
suggested_fee_recipient,
|
||||
withdrawals,
|
||||
parent_beacon_block_root,
|
||||
);
|
||||
|
||||
// Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter.
|
||||
//
|
||||
|
@ -1,3 +1,4 @@
|
||||
use crate::data_availability_checker::AvailableBlock;
|
||||
use crate::{errors::BeaconChainError as Error, metrics, BeaconChain, BeaconChainTypes};
|
||||
use itertools::Itertools;
|
||||
use slog::debug;
|
||||
@ -7,10 +8,9 @@ use state_processing::{
|
||||
};
|
||||
use std::borrow::Cow;
|
||||
use std::iter;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use store::{chunked_vector::BlockRoots, AnchorInfo, ChunkWriter, KeyValueStore};
|
||||
use types::{Hash256, SignedBlindedBeaconBlock, Slot};
|
||||
use store::{chunked_vector::BlockRoots, AnchorInfo, BlobInfo, ChunkWriter, KeyValueStore};
|
||||
use types::{Hash256, Slot};
|
||||
|
||||
/// Use a longer timeout on the pubkey cache.
|
||||
///
|
||||
@ -59,27 +59,30 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// Return the number of blocks successfully imported.
|
||||
pub fn import_historical_block_batch(
|
||||
&self,
|
||||
blocks: Vec<Arc<SignedBlindedBeaconBlock<T::EthSpec>>>,
|
||||
mut blocks: Vec<AvailableBlock<T::EthSpec>>,
|
||||
) -> Result<usize, Error> {
|
||||
let anchor_info = self
|
||||
.store
|
||||
.get_anchor_info()
|
||||
.ok_or(HistoricalBlockError::NoAnchorInfo)?;
|
||||
let blob_info = self.store.get_blob_info();
|
||||
|
||||
// Take all blocks with slots less than the oldest block slot.
|
||||
let num_relevant =
|
||||
blocks.partition_point(|block| block.slot() < anchor_info.oldest_block_slot);
|
||||
let blocks_to_import = &blocks
|
||||
.get(..num_relevant)
|
||||
.ok_or(HistoricalBlockError::IndexOutOfBounds)?;
|
||||
let num_relevant = blocks.partition_point(|available_block| {
|
||||
available_block.block().slot() < anchor_info.oldest_block_slot
|
||||
});
|
||||
|
||||
if blocks_to_import.len() != blocks.len() {
|
||||
let total_blocks = blocks.len();
|
||||
blocks.truncate(num_relevant);
|
||||
let blocks_to_import = blocks;
|
||||
|
||||
if blocks_to_import.len() != total_blocks {
|
||||
debug!(
|
||||
self.log,
|
||||
"Ignoring some historic blocks";
|
||||
"oldest_block_slot" => anchor_info.oldest_block_slot,
|
||||
"total_blocks" => blocks.len(),
|
||||
"ignored" => blocks.len().saturating_sub(blocks_to_import.len()),
|
||||
"total_blocks" => total_blocks,
|
||||
"ignored" => total_blocks.saturating_sub(blocks_to_import.len()),
|
||||
);
|
||||
}
|
||||
|
||||
@ -87,17 +90,23 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
let n_blobs_lists_to_import = blocks_to_import
|
||||
.iter()
|
||||
.filter(|available_block| available_block.blobs().is_some())
|
||||
.count();
|
||||
|
||||
let mut expected_block_root = anchor_info.oldest_block_parent;
|
||||
let mut prev_block_slot = anchor_info.oldest_block_slot;
|
||||
let mut chunk_writer =
|
||||
ChunkWriter::<BlockRoots, _, _>::new(&self.store.cold_db, prev_block_slot.as_usize())?;
|
||||
let mut new_oldest_blob_slot = blob_info.oldest_blob_slot;
|
||||
|
||||
let mut cold_batch = Vec::with_capacity(blocks.len());
|
||||
let mut hot_batch = Vec::with_capacity(blocks.len());
|
||||
let mut cold_batch = Vec::with_capacity(blocks_to_import.len());
|
||||
let mut hot_batch = Vec::with_capacity(blocks_to_import.len() + n_blobs_lists_to_import);
|
||||
let mut signed_blocks = Vec::with_capacity(blocks_to_import.len());
|
||||
|
||||
for block in blocks_to_import.iter().rev() {
|
||||
// Check chain integrity.
|
||||
let block_root = block.canonical_root();
|
||||
for available_block in blocks_to_import.into_iter().rev() {
|
||||
let (block_root, block, maybe_blobs) = available_block.deconstruct();
|
||||
|
||||
if block_root != expected_block_root {
|
||||
return Err(HistoricalBlockError::MismatchedBlockRoot {
|
||||
@ -107,9 +116,16 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.into());
|
||||
}
|
||||
|
||||
let blinded_block = block.clone_as_blinded();
|
||||
// Store block in the hot database without payload.
|
||||
self.store
|
||||
.blinded_block_as_kv_store_ops(&block_root, block, &mut hot_batch);
|
||||
.blinded_block_as_kv_store_ops(&block_root, &blinded_block, &mut hot_batch);
|
||||
// Store the blobs too
|
||||
if let Some(blobs) = maybe_blobs {
|
||||
new_oldest_blob_slot = Some(block.slot());
|
||||
self.store
|
||||
.blobs_as_kv_store_ops(&block_root, blobs, &mut hot_batch);
|
||||
}
|
||||
|
||||
// Store block roots, including at all skip slots in the freezer DB.
|
||||
for slot in (block.slot().as_usize()..prev_block_slot.as_usize()).rev() {
|
||||
@ -132,8 +148,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
expected_block_root = Hash256::zero();
|
||||
break;
|
||||
}
|
||||
signed_blocks.push(block);
|
||||
}
|
||||
chunk_writer.write(&mut cold_batch)?;
|
||||
// these were pushed in reverse order so we reverse again
|
||||
signed_blocks.reverse();
|
||||
|
||||
// Verify signatures in one batch, holding the pubkey cache lock for the shortest duration
|
||||
// possible. For each block fetch the parent root from its successor. Slicing from index 1
|
||||
@ -144,15 +163,16 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.validator_pubkey_cache
|
||||
.try_read_for(PUBKEY_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(HistoricalBlockError::ValidatorPubkeyCacheTimeout)?;
|
||||
let block_roots = blocks_to_import
|
||||
let block_roots = signed_blocks
|
||||
.get(1..)
|
||||
.ok_or(HistoricalBlockError::IndexOutOfBounds)?
|
||||
.iter()
|
||||
.map(|block| block.parent_root())
|
||||
.chain(iter::once(anchor_info.oldest_block_parent));
|
||||
let signature_set = blocks_to_import
|
||||
let signature_set = signed_blocks
|
||||
.iter()
|
||||
.zip_eq(block_roots)
|
||||
.filter(|&(_block, block_root)| (block_root != self.genesis_block_root))
|
||||
.map(|(block, block_root)| {
|
||||
block_proposal_signature_set_from_parts(
|
||||
block,
|
||||
@ -183,6 +203,22 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self.store.hot_db.do_atomically(hot_batch)?;
|
||||
self.store.cold_db.do_atomically(cold_batch)?;
|
||||
|
||||
let mut anchor_and_blob_batch = Vec::with_capacity(2);
|
||||
|
||||
// Update the blob info.
|
||||
if new_oldest_blob_slot != blob_info.oldest_blob_slot {
|
||||
if let Some(oldest_blob_slot) = new_oldest_blob_slot {
|
||||
let new_blob_info = BlobInfo {
|
||||
oldest_blob_slot: Some(oldest_blob_slot),
|
||||
..blob_info.clone()
|
||||
};
|
||||
anchor_and_blob_batch.push(
|
||||
self.store
|
||||
.compare_and_set_blob_info(blob_info, new_blob_info)?,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Update the anchor.
|
||||
let new_anchor = AnchorInfo {
|
||||
oldest_block_slot: prev_block_slot,
|
||||
@ -190,8 +226,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
..anchor_info
|
||||
};
|
||||
let backfill_complete = new_anchor.block_backfill_complete(self.genesis_backfill_slot);
|
||||
self.store
|
||||
.compare_and_set_anchor_info_with_write(Some(anchor_info), Some(new_anchor))?;
|
||||
anchor_and_blob_batch.push(
|
||||
self.store
|
||||
.compare_and_set_anchor_info(Some(anchor_info), Some(new_anchor))?,
|
||||
);
|
||||
self.store.hot_db.do_atomically(anchor_and_blob_batch)?;
|
||||
|
||||
// If backfill has completed and the chain is configured to reconstruct historic states,
|
||||
// send a message to the background migrator instructing it to begin reconstruction.
|
||||
@ -203,6 +242,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self.store_migrator.process_reconstruction();
|
||||
}
|
||||
|
||||
Ok(blocks_to_import.len())
|
||||
Ok(num_relevant)
|
||||
}
|
||||
}
|
||||
|
79
beacon_node/beacon_chain/src/kzg_utils.rs
Normal file
79
beacon_node/beacon_chain/src/kzg_utils.rs
Normal file
@ -0,0 +1,79 @@
|
||||
use kzg::{Error as KzgError, Kzg, KzgPreset};
|
||||
use types::{Blob, EthSpec, Hash256, KzgCommitment, KzgProof};
|
||||
|
||||
/// Converts a blob ssz List object to an array to be used with the kzg
|
||||
/// crypto library.
|
||||
fn ssz_blob_to_crypto_blob<T: EthSpec>(
|
||||
blob: &Blob<T>,
|
||||
) -> Result<<<T as EthSpec>::Kzg as KzgPreset>::Blob, KzgError> {
|
||||
T::blob_from_bytes(blob.as_ref())
|
||||
}
|
||||
|
||||
/// Validate a single blob-commitment-proof triplet from a `BlobSidecar`.
|
||||
pub fn validate_blob<T: EthSpec>(
|
||||
kzg: &Kzg<T::Kzg>,
|
||||
blob: Blob<T>,
|
||||
kzg_commitment: KzgCommitment,
|
||||
kzg_proof: KzgProof,
|
||||
) -> Result<bool, KzgError> {
|
||||
kzg.verify_blob_kzg_proof(
|
||||
&ssz_blob_to_crypto_blob::<T>(&blob)?,
|
||||
kzg_commitment,
|
||||
kzg_proof,
|
||||
)
|
||||
}
|
||||
|
||||
/// Validate a batch of blob-commitment-proof triplets from multiple `BlobSidecars`.
|
||||
pub fn validate_blobs<T: EthSpec>(
|
||||
kzg: &Kzg<T::Kzg>,
|
||||
expected_kzg_commitments: &[KzgCommitment],
|
||||
blobs: &[Blob<T>],
|
||||
kzg_proofs: &[KzgProof],
|
||||
) -> Result<bool, KzgError> {
|
||||
let blobs = blobs
|
||||
.iter()
|
||||
.map(|blob| ssz_blob_to_crypto_blob::<T>(blob)) // Avoid this clone
|
||||
.collect::<Result<Vec<_>, KzgError>>()?;
|
||||
|
||||
kzg.verify_blob_kzg_proof_batch(&blobs, expected_kzg_commitments, kzg_proofs)
|
||||
}
|
||||
|
||||
/// Compute the kzg proof given an ssz blob and its kzg commitment.
|
||||
pub fn compute_blob_kzg_proof<T: EthSpec>(
|
||||
kzg: &Kzg<T::Kzg>,
|
||||
blob: &Blob<T>,
|
||||
kzg_commitment: KzgCommitment,
|
||||
) -> Result<KzgProof, KzgError> {
|
||||
// Avoid this blob clone
|
||||
kzg.compute_blob_kzg_proof(&ssz_blob_to_crypto_blob::<T>(blob)?, kzg_commitment)
|
||||
}
|
||||
|
||||
/// Compute the kzg commitment for a given blob.
|
||||
pub fn blob_to_kzg_commitment<T: EthSpec>(
|
||||
kzg: &Kzg<T::Kzg>,
|
||||
blob: &Blob<T>,
|
||||
) -> Result<KzgCommitment, KzgError> {
|
||||
kzg.blob_to_kzg_commitment(&ssz_blob_to_crypto_blob::<T>(blob)?)
|
||||
}
|
||||
|
||||
/// Compute the kzg proof for a given blob and an evaluation point z.
|
||||
pub fn compute_kzg_proof<T: EthSpec>(
|
||||
kzg: &Kzg<T::Kzg>,
|
||||
blob: &Blob<T>,
|
||||
z: Hash256,
|
||||
) -> Result<(KzgProof, Hash256), KzgError> {
|
||||
let z = z.0.into();
|
||||
kzg.compute_kzg_proof(&ssz_blob_to_crypto_blob::<T>(blob)?, &z)
|
||||
.map(|(proof, z)| (proof, Hash256::from_slice(&z.to_vec())))
|
||||
}
|
||||
|
||||
/// Verify a `kzg_proof` for a `kzg_commitment` that evaluating a polynomial at `z` results in `y`
|
||||
pub fn verify_kzg_proof<T: EthSpec>(
|
||||
kzg: &Kzg<T::Kzg>,
|
||||
kzg_commitment: KzgCommitment,
|
||||
kzg_proof: KzgProof,
|
||||
z: Hash256,
|
||||
y: Hash256,
|
||||
) -> Result<bool, KzgError> {
|
||||
kzg.verify_kzg_proof(kzg_commitment, &z.0.into(), &y.0.into(), kzg_proof)
|
||||
}
|
@ -7,13 +7,16 @@ mod beacon_chain;
|
||||
mod beacon_fork_choice_store;
|
||||
pub mod beacon_proposer_cache;
|
||||
mod beacon_snapshot;
|
||||
pub mod blob_verification;
|
||||
pub mod block_reward;
|
||||
mod block_times_cache;
|
||||
mod block_verification;
|
||||
pub mod block_verification_types;
|
||||
pub mod builder;
|
||||
pub mod canonical_head;
|
||||
pub mod capella_readiness;
|
||||
pub mod chain_config;
|
||||
pub mod data_availability_checker;
|
||||
mod early_attester_cache;
|
||||
mod errors;
|
||||
pub mod eth1_chain;
|
||||
@ -24,6 +27,7 @@ pub mod fork_choice_signal;
|
||||
pub mod fork_revert;
|
||||
mod head_tracker;
|
||||
pub mod historical_blocks;
|
||||
pub mod kzg_utils;
|
||||
pub mod light_client_finality_update_verification;
|
||||
pub mod light_client_optimistic_update_verification;
|
||||
pub mod merge_readiness;
|
||||
@ -32,6 +36,7 @@ pub mod migrate;
|
||||
mod naive_aggregation_pool;
|
||||
mod observed_aggregates;
|
||||
mod observed_attesters;
|
||||
mod observed_blob_sidecars;
|
||||
pub mod observed_block_producers;
|
||||
pub mod observed_operations;
|
||||
pub mod otb_verification_service;
|
||||
@ -51,9 +56,10 @@ pub mod validator_monitor;
|
||||
pub mod validator_pubkey_cache;
|
||||
|
||||
pub use self::beacon_chain::{
|
||||
AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult,
|
||||
ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig,
|
||||
WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
|
||||
AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes,
|
||||
BeaconStore, ChainSegmentResult, ForkChoiceError, OverrideForkchoiceUpdate,
|
||||
ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped,
|
||||
INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
|
||||
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON,
|
||||
};
|
||||
pub use self::beacon_snapshot::BeaconSnapshot;
|
||||
@ -63,15 +69,19 @@ pub use self::historical_blocks::HistoricalBlockError;
|
||||
pub use attestation_verification::Error as AttestationError;
|
||||
pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError};
|
||||
pub use block_verification::{
|
||||
get_block_root, BlockError, ExecutionPayloadError, GossipVerifiedBlock,
|
||||
IntoExecutionPendingBlock, IntoGossipVerifiedBlock,
|
||||
get_block_root, BlockError, ExecutionPayloadError, ExecutionPendingBlock, GossipVerifiedBlock,
|
||||
IntoExecutionPendingBlock, IntoGossipVerifiedBlockContents, PayloadVerificationOutcome,
|
||||
PayloadVerificationStatus,
|
||||
};
|
||||
pub use block_verification_types::AvailabilityPendingExecutedBlock;
|
||||
pub use block_verification_types::ExecutedBlock;
|
||||
pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock};
|
||||
pub use eth1_chain::{Eth1Chain, Eth1ChainBackend};
|
||||
pub use events::ServerSentEventHandler;
|
||||
pub use execution_layer::EngineState;
|
||||
pub use execution_payload::NotifyExecutionLayer;
|
||||
pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters};
|
||||
pub use kzg::TrustedSetup;
|
||||
pub use metrics::scrape_for_metrics;
|
||||
pub use migrate::MigratorConfig;
|
||||
pub use parking_lot;
|
||||
|
@ -40,6 +40,10 @@ lazy_static! {
|
||||
"beacon_block_processing_block_root_seconds",
|
||||
"Time spent calculating the block root when processing a block."
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_BLOB_ROOT: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_blob_root_seconds",
|
||||
"Time spent calculating the blob root when processing a block."
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_DB_READ: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_db_read_seconds",
|
||||
"Time spent loading block and state from DB for block processing"
|
||||
@ -282,6 +286,11 @@ lazy_static! {
|
||||
"Count of times the early attester cache returns an attestation"
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
// Second lazy-static block is used to account for macro recursion limit.
|
||||
lazy_static! {
|
||||
|
||||
/*
|
||||
* Attestation Production
|
||||
*/
|
||||
@ -301,10 +310,7 @@ lazy_static! {
|
||||
"attestation_production_cache_prime_seconds",
|
||||
"Time spent loading a new state from the disk due to a cache miss"
|
||||
);
|
||||
}
|
||||
|
||||
// Second lazy-static block is used to account for macro recursion limit.
|
||||
lazy_static! {
|
||||
/*
|
||||
* Fork Choice
|
||||
*/
|
||||
@ -380,6 +386,8 @@ lazy_static! {
|
||||
try_create_histogram("beacon_persist_eth1_cache", "Time taken to persist the eth1 caches");
|
||||
pub static ref PERSIST_FORK_CHOICE: Result<Histogram> =
|
||||
try_create_histogram("beacon_persist_fork_choice", "Time taken to persist the fork choice struct");
|
||||
pub static ref PERSIST_DATA_AVAILABILITY_CHECKER: Result<Histogram> =
|
||||
try_create_histogram("beacon_persist_data_availability_checker", "Time taken to persist the data availability checker");
|
||||
|
||||
/*
|
||||
* Eth1
|
||||
@ -980,6 +988,22 @@ lazy_static! {
|
||||
"beacon_pre_finalization_block_lookup_count",
|
||||
"Number of block roots subject to single block lookups"
|
||||
);
|
||||
|
||||
/*
|
||||
* Blob sidecar Verification
|
||||
*/
|
||||
pub static ref BLOBS_SIDECAR_PROCESSING_REQUESTS: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_blobs_sidecar_processing_requests_total",
|
||||
"Count of all blob sidecars submitted for processing"
|
||||
);
|
||||
pub static ref BLOBS_SIDECAR_PROCESSING_SUCCESSES: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_blobs_sidecar_processing_successes_total",
|
||||
"Number of blob sidecars verified for gossip"
|
||||
);
|
||||
pub static ref BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_blobs_sidecar_gossip_verification_seconds",
|
||||
"Full runtime of blob sidecars gossip verification"
|
||||
);
|
||||
}
|
||||
|
||||
// Fifth lazy-static block is used to account for macro recursion limit.
|
||||
@ -1009,6 +1033,28 @@ lazy_static! {
|
||||
"beacon_aggregated_attestation_subsets_total",
|
||||
"Count of new aggregated attestations that are subsets of already known aggregates"
|
||||
);
|
||||
|
||||
/*
|
||||
* Kzg related metrics
|
||||
*/
|
||||
pub static ref KZG_VERIFICATION_SINGLE_TIMES: Result<Histogram> =
|
||||
try_create_histogram("kzg_verification_single_seconds", "Runtime of single kzg verification");
|
||||
pub static ref KZG_VERIFICATION_BATCH_TIMES: Result<Histogram> =
|
||||
try_create_histogram("kzg_verification_batch_seconds", "Runtime of batched kzg verification");
|
||||
|
||||
pub static ref BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_production_blobs_verification_seconds",
|
||||
"Time taken to verify blobs against commitments and creating BlobSidecar objects in block production"
|
||||
);
|
||||
/*
|
||||
* Availability related metrics
|
||||
*/
|
||||
pub static ref BLOCK_AVAILABILITY_DELAY: Result<Histogram> = try_create_histogram_with_buckets(
|
||||
"block_availability_delay",
|
||||
"Duration between start of the slot and the time at which all components of the block are available.",
|
||||
// Create a custom bucket list for greater granularity in block delay
|
||||
Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0])
|
||||
);
|
||||
}
|
||||
|
||||
/// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot,
|
||||
|
@ -117,6 +117,7 @@ pub enum PruningError {
|
||||
pub enum Notification {
|
||||
Finalization(FinalizationNotification),
|
||||
Reconstruction,
|
||||
PruneBlobs(Epoch),
|
||||
}
|
||||
|
||||
pub struct FinalizationNotification {
|
||||
@ -191,6 +192,14 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_prune_blobs(&self, data_availability_boundary: Epoch) {
|
||||
if let Some(Notification::PruneBlobs(data_availability_boundary)) =
|
||||
self.send_background_notification(Notification::PruneBlobs(data_availability_boundary))
|
||||
{
|
||||
Self::run_prune_blobs(self.db.clone(), data_availability_boundary, &self.log);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run_reconstruction(db: Arc<HotColdDB<E, Hot, Cold>>, log: &Logger) {
|
||||
if let Err(e) = db.reconstruct_historic_states() {
|
||||
error!(
|
||||
@ -201,6 +210,20 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run_prune_blobs(
|
||||
db: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
data_availability_boundary: Epoch,
|
||||
log: &Logger,
|
||||
) {
|
||||
if let Err(e) = db.try_prune_blobs(false, data_availability_boundary) {
|
||||
error!(
|
||||
log,
|
||||
"Blob pruning failed";
|
||||
"error" => ?e,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// If configured to run in the background, send `notif` to the background thread.
|
||||
///
|
||||
/// Return `None` if the message was sent to the background thread, `Some(notif)` otherwise.
|
||||
@ -367,29 +390,44 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let thread = thread::spawn(move || {
|
||||
while let Ok(notif) = rx.recv() {
|
||||
// Read the rest of the messages in the channel, preferring any reconstruction
|
||||
// notification, or the finalization notification with the greatest finalized epoch.
|
||||
let notif =
|
||||
rx.try_iter()
|
||||
.fold(notif, |best, other: Notification| match (&best, &other) {
|
||||
(Notification::Reconstruction, _)
|
||||
| (_, Notification::Reconstruction) => Notification::Reconstruction,
|
||||
(
|
||||
Notification::Finalization(fin1),
|
||||
Notification::Finalization(fin2),
|
||||
) => {
|
||||
if fin2.finalized_checkpoint.epoch > fin1.finalized_checkpoint.epoch
|
||||
{
|
||||
other
|
||||
} else {
|
||||
best
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let mut reconstruction_notif = None;
|
||||
let mut finalization_notif = None;
|
||||
let mut prune_blobs_notif = None;
|
||||
match notif {
|
||||
Notification::Reconstruction => Self::run_reconstruction(db.clone(), &log),
|
||||
Notification::Finalization(fin) => Self::run_migration(db.clone(), fin, &log),
|
||||
Notification::Reconstruction => reconstruction_notif = Some(notif),
|
||||
Notification::Finalization(fin) => finalization_notif = Some(fin),
|
||||
Notification::PruneBlobs(dab) => prune_blobs_notif = Some(dab),
|
||||
}
|
||||
// Read the rest of the messages in the channel, taking the best of each type.
|
||||
for notif in rx.try_iter() {
|
||||
match notif {
|
||||
Notification::Reconstruction => reconstruction_notif = Some(notif),
|
||||
Notification::Finalization(fin) => {
|
||||
if let Some(current) = finalization_notif.as_mut() {
|
||||
if fin.finalized_checkpoint.epoch
|
||||
> current.finalized_checkpoint.epoch
|
||||
{
|
||||
*current = fin;
|
||||
}
|
||||
} else {
|
||||
finalization_notif = Some(fin);
|
||||
}
|
||||
}
|
||||
Notification::PruneBlobs(dab) => {
|
||||
prune_blobs_notif = std::cmp::max(prune_blobs_notif, Some(dab));
|
||||
}
|
||||
}
|
||||
}
|
||||
// If reconstruction is on-going, ignore finalization migration and blob pruning.
|
||||
if reconstruction_notif.is_some() {
|
||||
Self::run_reconstruction(db.clone(), &log);
|
||||
} else {
|
||||
if let Some(fin) = finalization_notif {
|
||||
Self::run_migration(db.clone(), fin, &log);
|
||||
}
|
||||
if let Some(dab) = prune_blobs_notif {
|
||||
Self::run_prune_blobs(db.clone(), dab, &log);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
@ -630,13 +668,14 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
head_tracker_lock.remove(&head_hash);
|
||||
}
|
||||
|
||||
let batch: Vec<StoreOp<E>> = abandoned_blocks
|
||||
let mut batch: Vec<StoreOp<E>> = abandoned_blocks
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.flat_map(|block_root: Hash256| {
|
||||
[
|
||||
StoreOp::DeleteBlock(block_root),
|
||||
StoreOp::DeleteExecutionPayload(block_root),
|
||||
StoreOp::DeleteBlobs(block_root),
|
||||
]
|
||||
})
|
||||
.chain(
|
||||
@ -646,8 +685,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
)
|
||||
.collect();
|
||||
|
||||
let mut kv_batch = store.convert_to_kv_batch(batch)?;
|
||||
|
||||
// Persist the head in case the process is killed or crashes here. This prevents
|
||||
// the head tracker reverting after our mutation above.
|
||||
let persisted_head = PersistedBeaconChain {
|
||||
@ -656,12 +693,16 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
ssz_head_tracker: SszHeadTracker::from_map(&head_tracker_lock),
|
||||
};
|
||||
drop(head_tracker_lock);
|
||||
kv_batch.push(persisted_head.as_kv_store_op(BEACON_CHAIN_DB_KEY));
|
||||
batch.push(StoreOp::KeyValueOp(
|
||||
persisted_head.as_kv_store_op(BEACON_CHAIN_DB_KEY),
|
||||
));
|
||||
|
||||
// Persist the new finalized checkpoint as the pruning checkpoint.
|
||||
kv_batch.push(store.pruning_checkpoint_store_op(new_finalized_checkpoint));
|
||||
batch.push(StoreOp::KeyValueOp(
|
||||
store.pruning_checkpoint_store_op(new_finalized_checkpoint),
|
||||
));
|
||||
|
||||
store.hot_db.do_atomically(kv_batch)?;
|
||||
store.do_atomically_with_block_and_blobs_cache(batch)?;
|
||||
debug!(log, "Database pruning complete");
|
||||
|
||||
Ok(PruningOutcome::Successful {
|
||||
|
389
beacon_node/beacon_chain/src/observed_blob_sidecars.rs
Normal file
389
beacon_node/beacon_chain/src/observed_blob_sidecars.rs
Normal file
@ -0,0 +1,389 @@
|
||||
//! Provides the `ObservedBlobSidecars` struct which allows for rejecting `BlobSidecar`s
|
||||
//! that we have already seen over the gossip network.
|
||||
//! Only `BlobSidecar`s that have completed proposer signature verification can be added
|
||||
//! to this cache to reduce DoS risks.
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
use types::{BlobSidecar, EthSpec, Hash256, Slot};
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
/// The slot of the provided `BlobSidecar` is prior to finalization and should not have been provided
|
||||
/// to this function. This is an internal error.
|
||||
FinalizedBlob { slot: Slot, finalized_slot: Slot },
|
||||
/// The blob sidecar contains an invalid blob index, the blob sidecar is invalid.
|
||||
/// Note: The invalid blob should have been caught and flagged as an error much before reaching
|
||||
/// here.
|
||||
InvalidBlobIndex(u64),
|
||||
}
|
||||
|
||||
/// Maintains a cache of seen `BlobSidecar`s that are received over gossip
|
||||
/// and have been gossip verified.
|
||||
///
|
||||
/// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you
|
||||
/// must call `Self::prune` manually.
|
||||
///
|
||||
/// Note: To prevent DoS attacks, this cache must include only items that have received some DoS resistance
|
||||
/// like checking the proposer signature.
|
||||
pub struct ObservedBlobSidecars<T: EthSpec> {
|
||||
finalized_slot: Slot,
|
||||
/// Stores all received blob indices for a given `(Root, Slot)` tuple.
|
||||
items: HashMap<(Hash256, Slot), HashSet<u64>>,
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Default for ObservedBlobSidecars<E> {
|
||||
/// Instantiates `Self` with `finalized_slot == 0`.
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
finalized_slot: Slot::new(0),
|
||||
items: HashMap::new(),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> ObservedBlobSidecars<T> {
|
||||
/// Observe the `blob_sidecar` at (`blob_sidecar.block_root, blob_sidecar.slot`).
|
||||
/// This will update `self` so future calls to it indicate that this `blob_sidecar` is known.
|
||||
///
|
||||
/// The supplied `blob_sidecar` **MUST** have completed proposer signature verification.
|
||||
pub fn observe_sidecar(&mut self, blob_sidecar: &Arc<BlobSidecar<T>>) -> Result<bool, Error> {
|
||||
self.sanitize_blob_sidecar(blob_sidecar)?;
|
||||
|
||||
let did_not_exist = self
|
||||
.items
|
||||
.entry((blob_sidecar.block_root, blob_sidecar.slot))
|
||||
.or_insert_with(|| HashSet::with_capacity(T::max_blobs_per_block()))
|
||||
.insert(blob_sidecar.index);
|
||||
|
||||
Ok(!did_not_exist)
|
||||
}
|
||||
|
||||
/// Returns `true` if the `blob_sidecar` has already been observed in the cache within the prune window.
|
||||
pub fn is_known(&self, blob_sidecar: &Arc<BlobSidecar<T>>) -> Result<bool, Error> {
|
||||
self.sanitize_blob_sidecar(blob_sidecar)?;
|
||||
let is_known = self
|
||||
.items
|
||||
.get(&(blob_sidecar.block_root, blob_sidecar.slot))
|
||||
.map_or(false, |set| set.contains(&blob_sidecar.index));
|
||||
Ok(is_known)
|
||||
}
|
||||
|
||||
fn sanitize_blob_sidecar(&self, blob_sidecar: &Arc<BlobSidecar<T>>) -> Result<(), Error> {
|
||||
if blob_sidecar.index >= T::max_blobs_per_block() as u64 {
|
||||
return Err(Error::InvalidBlobIndex(blob_sidecar.index));
|
||||
}
|
||||
let finalized_slot = self.finalized_slot;
|
||||
if finalized_slot > 0 && blob_sidecar.slot <= finalized_slot {
|
||||
return Err(Error::FinalizedBlob {
|
||||
slot: blob_sidecar.slot,
|
||||
finalized_slot,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Prune all values earlier than the given slot.
|
||||
pub fn prune(&mut self, finalized_slot: Slot) {
|
||||
if finalized_slot == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
self.finalized_slot = finalized_slot;
|
||||
self.items.retain(|k, _| k.1 > finalized_slot);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use types::{BlobSidecar, Hash256, MainnetEthSpec};
|
||||
|
||||
type E = MainnetEthSpec;
|
||||
|
||||
fn get_blob_sidecar(slot: u64, block_root: Hash256, index: u64) -> Arc<BlobSidecar<E>> {
|
||||
let mut blob_sidecar = BlobSidecar::empty();
|
||||
blob_sidecar.block_root = block_root;
|
||||
blob_sidecar.slot = slot.into();
|
||||
blob_sidecar.index = index;
|
||||
Arc::new(blob_sidecar)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pruning() {
|
||||
let mut cache = ObservedBlobSidecars::default();
|
||||
|
||||
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.len(), 0, "no slots should be present");
|
||||
|
||||
// Slot 0, index 0
|
||||
let block_root_a = Hash256::random();
|
||||
let sidecar_a = get_blob_sidecar(0, block_root_a, 0);
|
||||
|
||||
assert_eq!(
|
||||
cache.observe_sidecar(&sidecar_a),
|
||||
Ok(false),
|
||||
"can observe proposer, indicates proposer unobserved"
|
||||
);
|
||||
|
||||
/*
|
||||
* Preconditions.
|
||||
*/
|
||||
|
||||
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
|
||||
assert_eq!(
|
||||
cache.items.len(),
|
||||
1,
|
||||
"only one (slot, root) tuple should be present"
|
||||
);
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.get(&(block_root_a, Slot::new(0)))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
1,
|
||||
"only one item should be present"
|
||||
);
|
||||
|
||||
/*
|
||||
* Check that a prune at the genesis slot does nothing.
|
||||
*/
|
||||
|
||||
cache.prune(Slot::new(0));
|
||||
|
||||
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.len(), 1, "only one slot should be present");
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.get(&(block_root_a, Slot::new(0)))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
1,
|
||||
"only one item should be present"
|
||||
);
|
||||
|
||||
/*
|
||||
* Check that a prune empties the cache
|
||||
*/
|
||||
|
||||
cache.prune(E::slots_per_epoch().into());
|
||||
assert_eq!(
|
||||
cache.finalized_slot,
|
||||
Slot::from(E::slots_per_epoch()),
|
||||
"finalized slot is updated"
|
||||
);
|
||||
assert_eq!(cache.items.len(), 0, "no items left");
|
||||
|
||||
/*
|
||||
* Check that we can't insert a finalized sidecar
|
||||
*/
|
||||
|
||||
// First slot of finalized epoch
|
||||
let block_b = get_blob_sidecar(E::slots_per_epoch(), Hash256::random(), 0);
|
||||
|
||||
assert_eq!(
|
||||
cache.observe_sidecar(&block_b),
|
||||
Err(Error::FinalizedBlob {
|
||||
slot: E::slots_per_epoch().into(),
|
||||
finalized_slot: E::slots_per_epoch().into(),
|
||||
}),
|
||||
"cant insert finalized sidecar"
|
||||
);
|
||||
|
||||
assert_eq!(cache.items.len(), 0, "sidecar was not added");
|
||||
|
||||
/*
|
||||
* Check that we _can_ insert a non-finalized block
|
||||
*/
|
||||
|
||||
let three_epochs = E::slots_per_epoch() * 3;
|
||||
|
||||
// First slot of finalized epoch
|
||||
let block_root_b = Hash256::random();
|
||||
let block_b = get_blob_sidecar(three_epochs, block_root_b, 0);
|
||||
|
||||
assert_eq!(
|
||||
cache.observe_sidecar(&block_b),
|
||||
Ok(false),
|
||||
"can insert non-finalized block"
|
||||
);
|
||||
|
||||
assert_eq!(cache.items.len(), 1, "only one slot should be present");
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.get(&(block_root_b, Slot::new(three_epochs)))
|
||||
.expect("the three epochs slot should be present")
|
||||
.len(),
|
||||
1,
|
||||
"only one proposer should be present"
|
||||
);
|
||||
|
||||
/*
|
||||
* Check that a prune doesnt wipe later blocks
|
||||
*/
|
||||
|
||||
let two_epochs = E::slots_per_epoch() * 2;
|
||||
cache.prune(two_epochs.into());
|
||||
|
||||
assert_eq!(
|
||||
cache.finalized_slot,
|
||||
Slot::from(two_epochs),
|
||||
"finalized slot is updated"
|
||||
);
|
||||
|
||||
assert_eq!(cache.items.len(), 1, "only one slot should be present");
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.get(&(block_root_b, Slot::new(three_epochs)))
|
||||
.expect("the three epochs slot should be present")
|
||||
.len(),
|
||||
1,
|
||||
"only one proposer should be present"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn simple_observations() {
|
||||
let mut cache = ObservedBlobSidecars::default();
|
||||
|
||||
// Slot 0, index 0
|
||||
let block_root_a = Hash256::random();
|
||||
let sidecar_a = get_blob_sidecar(0, block_root_a, 0);
|
||||
|
||||
assert_eq!(
|
||||
cache.is_known(&sidecar_a),
|
||||
Ok(false),
|
||||
"no observation in empty cache"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
cache.observe_sidecar(&sidecar_a),
|
||||
Ok(false),
|
||||
"can observe proposer, indicates proposer unobserved"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
cache.is_known(&sidecar_a),
|
||||
Ok(true),
|
||||
"observed block is indicated as true"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
cache.observe_sidecar(&sidecar_a),
|
||||
Ok(true),
|
||||
"observing again indicates true"
|
||||
);
|
||||
|
||||
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.len(), 1, "only one slot should be present");
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.get(&(block_root_a, Slot::new(0)))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
1,
|
||||
"only one proposer should be present"
|
||||
);
|
||||
|
||||
// Slot 1, proposer 0
|
||||
|
||||
let block_root_b = Hash256::random();
|
||||
let sidecar_b = get_blob_sidecar(1, block_root_b, 0);
|
||||
|
||||
assert_eq!(
|
||||
cache.is_known(&sidecar_b),
|
||||
Ok(false),
|
||||
"no observation for new slot"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.observe_sidecar(&sidecar_b),
|
||||
Ok(false),
|
||||
"can observe proposer for new slot, indicates proposer unobserved"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.is_known(&sidecar_b),
|
||||
Ok(true),
|
||||
"observed block in slot 1 is indicated as true"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.observe_sidecar(&sidecar_b),
|
||||
Ok(true),
|
||||
"observing slot 1 again indicates true"
|
||||
);
|
||||
|
||||
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.len(), 2, "two slots should be present");
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.get(&(block_root_a, Slot::new(0)))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
1,
|
||||
"only one proposer should be present in slot 0"
|
||||
);
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.get(&(block_root_b, Slot::new(1)))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
1,
|
||||
"only one proposer should be present in slot 1"
|
||||
);
|
||||
|
||||
// Slot 0, index 1
|
||||
let sidecar_c = get_blob_sidecar(0, block_root_a, 1);
|
||||
|
||||
assert_eq!(
|
||||
cache.is_known(&sidecar_c),
|
||||
Ok(false),
|
||||
"no observation for new index"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.observe_sidecar(&sidecar_c),
|
||||
Ok(false),
|
||||
"can observe new index, indicates sidecar unobserved for new index"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.is_known(&sidecar_c),
|
||||
Ok(true),
|
||||
"observed new sidecar is indicated as true"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.observe_sidecar(&sidecar_c),
|
||||
Ok(true),
|
||||
"observing new sidecar again indicates true"
|
||||
);
|
||||
|
||||
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.len(), 2, "two slots should be present");
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.get(&(block_root_a, Slot::new(0)))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
2,
|
||||
"two blob indices should be present in slot 0"
|
||||
);
|
||||
|
||||
// Try adding an out of bounds index
|
||||
let invalid_index = E::max_blobs_per_block() as u64;
|
||||
let sidecar_d = get_blob_sidecar(0, block_root_a, invalid_index);
|
||||
assert_eq!(
|
||||
cache.observe_sidecar(&sidecar_d),
|
||||
Err(Error::InvalidBlobIndex(invalid_index)),
|
||||
"cannot add an index > MaxBlobsPerBlock"
|
||||
);
|
||||
}
|
||||
}
|
@ -5,6 +5,7 @@ mod migration_schema_v14;
|
||||
mod migration_schema_v15;
|
||||
mod migration_schema_v16;
|
||||
mod migration_schema_v17;
|
||||
mod migration_schema_v18;
|
||||
|
||||
use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY};
|
||||
use crate::eth1_chain::SszEth1;
|
||||
@ -150,6 +151,14 @@ pub fn migrate_schema<T: BeaconChainTypes>(
|
||||
let ops = migration_schema_v17::downgrade_from_v17::<T>(db.clone(), log)?;
|
||||
db.store_schema_version_atomically(to, ops)
|
||||
}
|
||||
(SchemaVersion(17), SchemaVersion(18)) => {
|
||||
let ops = migration_schema_v18::upgrade_to_v18::<T>(db.clone(), log)?;
|
||||
db.store_schema_version_atomically(to, ops)
|
||||
}
|
||||
(SchemaVersion(18), SchemaVersion(17)) => {
|
||||
let ops = migration_schema_v18::downgrade_from_v18::<T>(db.clone(), log)?;
|
||||
db.store_schema_version_atomically(to, ops)
|
||||
}
|
||||
// Anything else is an error.
|
||||
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
|
||||
target_version: to,
|
||||
|
@ -0,0 +1,120 @@
|
||||
use crate::beacon_chain::BeaconChainTypes;
|
||||
use slog::{error, info, warn, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use store::{
|
||||
get_key_for_col, metadata::BLOB_INFO_KEY, DBColumn, Error, HotColdDB, KeyValueStoreOp,
|
||||
};
|
||||
use types::{Epoch, EthSpec, Hash256, Slot};
|
||||
|
||||
/// The slot clock isn't usually available before the database is initialized, so we construct a
|
||||
/// temporary slot clock by reading the genesis state. It should always exist if the database is
|
||||
/// initialized at a prior schema version, however we still handle the lack of genesis state
|
||||
/// gracefully.
|
||||
fn get_slot_clock<T: BeaconChainTypes>(
|
||||
db: &HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>,
|
||||
log: &Logger,
|
||||
) -> Result<Option<T::SlotClock>, Error> {
|
||||
let spec = db.get_chain_spec();
|
||||
let genesis_block = if let Some(block) = db.get_blinded_block(&Hash256::zero())? {
|
||||
block
|
||||
} else {
|
||||
error!(log, "Missing genesis block");
|
||||
return Ok(None);
|
||||
};
|
||||
let genesis_state =
|
||||
if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? {
|
||||
state
|
||||
} else {
|
||||
error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root());
|
||||
return Ok(None);
|
||||
};
|
||||
Ok(Some(T::SlotClock::new(
|
||||
spec.genesis_slot,
|
||||
Duration::from_secs(genesis_state.genesis_time()),
|
||||
Duration::from_secs(spec.seconds_per_slot),
|
||||
)))
|
||||
}
|
||||
|
||||
fn get_current_epoch<T: BeaconChainTypes>(
|
||||
db: &Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
log: &Logger,
|
||||
) -> Result<Epoch, Error> {
|
||||
get_slot_clock::<T>(db, log)?
|
||||
.and_then(|clock| clock.now())
|
||||
.map(|slot| slot.epoch(T::EthSpec::slots_per_epoch()))
|
||||
.ok_or(Error::SlotClockUnavailableForMigration)
|
||||
}
|
||||
|
||||
pub fn upgrade_to_v18<T: BeaconChainTypes>(
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
log: Logger,
|
||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||
// No-op, even if Deneb has already occurred. The database is probably borked in this case, but
|
||||
// *maybe* the fork recovery will revert the minority fork and succeed.
|
||||
if let Some(deneb_fork_epoch) = db.get_chain_spec().deneb_fork_epoch {
|
||||
let current_epoch = get_current_epoch::<T>(&db, &log)?;
|
||||
if current_epoch >= deneb_fork_epoch {
|
||||
warn!(
|
||||
log,
|
||||
"Attempting upgrade to v18 schema";
|
||||
"info" => "this may not work as Deneb has already been activated"
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Upgrading to v18 schema";
|
||||
"info" => "ready for Deneb",
|
||||
"epochs_until_deneb" => deneb_fork_epoch - current_epoch
|
||||
);
|
||||
}
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Upgrading to v18 schema";
|
||||
"info" => "ready for Deneb once it is scheduled"
|
||||
);
|
||||
}
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
pub fn downgrade_from_v18<T: BeaconChainTypes>(
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
log: Logger,
|
||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||
// We cannot downgrade from V18 once the Deneb fork has been activated, because there will
|
||||
// be blobs and blob metadata in the database that aren't understood by the V17 schema.
|
||||
if let Some(deneb_fork_epoch) = db.get_chain_spec().deneb_fork_epoch {
|
||||
let current_epoch = get_current_epoch::<T>(&db, &log)?;
|
||||
if current_epoch >= deneb_fork_epoch {
|
||||
error!(
|
||||
log,
|
||||
"Deneb already active: v18+ is mandatory";
|
||||
"current_epoch" => current_epoch,
|
||||
"deneb_fork_epoch" => deneb_fork_epoch,
|
||||
);
|
||||
return Err(Error::UnableToDowngrade);
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Downgrading to v17 schema";
|
||||
"info" => "you will need to upgrade before Deneb",
|
||||
"epochs_until_deneb" => deneb_fork_epoch - current_epoch
|
||||
);
|
||||
}
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Downgrading to v17 schema";
|
||||
"info" => "you need to upgrade before Deneb",
|
||||
);
|
||||
}
|
||||
|
||||
let ops = vec![KeyValueStoreOp::DeleteKey(get_key_for_col(
|
||||
DBColumn::BeaconMeta.into(),
|
||||
BLOB_INFO_KEY.as_bytes(),
|
||||
))];
|
||||
|
||||
Ok(ops)
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
use crate::block_verification_types::{AsBlock, RpcBlock};
|
||||
use crate::observed_operations::ObservationOutcome;
|
||||
pub use crate::persisted_beacon_chain::PersistedBeaconChain;
|
||||
pub use crate::{
|
||||
@ -14,6 +15,8 @@ use crate::{
|
||||
StateSkipConfig,
|
||||
};
|
||||
use bls::get_withdrawal_credentials;
|
||||
use eth2::types::SignedBlockContentsTuple;
|
||||
use execution_layer::test_utils::generate_genesis_header;
|
||||
use execution_layer::{
|
||||
auth::JwtKey,
|
||||
test_utils::{
|
||||
@ -25,16 +28,19 @@ use execution_layer::{
|
||||
use futures::channel::mpsc::Receiver;
|
||||
pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH};
|
||||
use int_to_bytes::int_to_bytes32;
|
||||
use kzg::{Kzg, TrustedSetup};
|
||||
use merkle_proof::MerkleTree;
|
||||
use operation_pool::ReceivedPreCapella;
|
||||
use parking_lot::Mutex;
|
||||
use parking_lot::RwLockWriteGuard;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use rand::rngs::StdRng;
|
||||
use rand::Rng;
|
||||
use rand::SeedableRng;
|
||||
use rayon::prelude::*;
|
||||
use sensitive_url::SensitiveUrl;
|
||||
use slog::Logger;
|
||||
use slog::{o, Drain, Logger};
|
||||
use slog_async::Async;
|
||||
use slog_term::{FullFormat, TermDecorator};
|
||||
use slot_clock::{SlotClock, TestingSlotClock};
|
||||
use state_processing::per_block_processing::compute_timestamp_at_slot;
|
||||
use state_processing::{
|
||||
@ -44,20 +50,23 @@ use state_processing::{
|
||||
use std::borrow::Cow;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore};
|
||||
use task_executor::TaskExecutor;
|
||||
use task_executor::{test_utils::TestRuntime, ShutdownReason};
|
||||
use tree_hash::TreeHash;
|
||||
use types::sync_selection_proof::SyncSelectionProof;
|
||||
pub use types::test_utils::generate_deterministic_keypairs;
|
||||
use types::test_utils::TestRandom;
|
||||
use types::{typenum::U4294967296, *};
|
||||
|
||||
// 4th September 2019
|
||||
pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690;
|
||||
// Environment variable to read if `fork_from_env` feature is enabled.
|
||||
const FORK_NAME_ENV_VAR: &str = "FORK_NAME";
|
||||
pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME";
|
||||
|
||||
// Default target aggregators to set during testing, this ensures an aggregator at each slot.
|
||||
//
|
||||
@ -189,11 +198,12 @@ impl<E: EthSpec> Builder<EphemeralHarnessType<E>> {
|
||||
.unwrap(),
|
||||
);
|
||||
let mutator = move |builder: BeaconChainBuilder<_>| {
|
||||
let header = generate_genesis_header::<E>(builder.get_spec(), false);
|
||||
let genesis_state = interop_genesis_state_with_eth1::<E>(
|
||||
&validator_keypairs,
|
||||
HARNESS_GENESIS_TIME,
|
||||
Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH),
|
||||
None,
|
||||
header,
|
||||
builder.get_spec(),
|
||||
)
|
||||
.expect("should generate interop state");
|
||||
@ -250,11 +260,12 @@ impl<E: EthSpec> Builder<DiskHarnessType<E>> {
|
||||
.expect("cannot build without validator keypairs");
|
||||
|
||||
let mutator = move |builder: BeaconChainBuilder<_>| {
|
||||
let header = generate_genesis_header::<E>(builder.get_spec(), false);
|
||||
let genesis_state = interop_genesis_state_with_eth1::<E>(
|
||||
&validator_keypairs,
|
||||
HARNESS_GENESIS_TIME,
|
||||
Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH),
|
||||
None,
|
||||
header,
|
||||
builder.get_spec(),
|
||||
)
|
||||
.expect("should generate interop state");
|
||||
@ -317,6 +328,11 @@ where
|
||||
self
|
||||
}
|
||||
|
||||
pub fn withdrawal_keypairs(mut self, withdrawal_keypairs: Vec<Option<Keypair>>) -> Self {
|
||||
self.withdrawal_keypairs = withdrawal_keypairs;
|
||||
self
|
||||
}
|
||||
|
||||
/// Initializes the BLS withdrawal keypairs for `num_keypairs` validators to
|
||||
/// the "determistic" values, regardless of wether or not the validator has
|
||||
/// a BLS or execution address in the genesis deposits.
|
||||
@ -332,11 +348,6 @@ where
|
||||
)
|
||||
}
|
||||
|
||||
pub fn withdrawal_keypairs(mut self, withdrawal_keypairs: Vec<Option<Keypair>>) -> Self {
|
||||
self.withdrawal_keypairs = withdrawal_keypairs;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn default_spec(self) -> Self {
|
||||
self.spec_or_default(None)
|
||||
}
|
||||
@ -385,7 +396,7 @@ where
|
||||
self
|
||||
}
|
||||
|
||||
pub fn execution_layer(mut self, urls: &[&str]) -> Self {
|
||||
pub fn execution_layer_from_urls(mut self, urls: &[&str]) -> Self {
|
||||
assert!(
|
||||
self.execution_layer.is_none(),
|
||||
"execution layer already defined"
|
||||
@ -414,6 +425,11 @@ where
|
||||
self
|
||||
}
|
||||
|
||||
pub fn execution_layer(mut self, el: Option<ExecutionLayer<E>>) -> Self {
|
||||
self.execution_layer = el;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn recalculate_fork_times_with_genesis(mut self, genesis_time: u64) -> Self {
|
||||
let mock = self
|
||||
.mock_execution_layer
|
||||
@ -427,6 +443,9 @@ where
|
||||
spec.capella_fork_epoch.map(|epoch| {
|
||||
genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
||||
});
|
||||
mock.server.execution_block_generator().cancun_time = spec.deneb_fork_epoch.map(|epoch| {
|
||||
genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
||||
});
|
||||
|
||||
self
|
||||
}
|
||||
@ -436,17 +455,10 @@ where
|
||||
}
|
||||
|
||||
pub fn mock_execution_layer_with_config(mut self, builder_threshold: Option<u128>) -> Self {
|
||||
let spec = self.spec.clone().expect("cannot build without spec");
|
||||
let shanghai_time = spec.capella_fork_epoch.map(|epoch| {
|
||||
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
||||
});
|
||||
let mock = MockExecutionLayer::new(
|
||||
let mock = mock_execution_layer_from_parts::<E>(
|
||||
self.spec.as_ref().expect("cannot build without spec"),
|
||||
self.runtime.task_executor.clone(),
|
||||
DEFAULT_TERMINAL_BLOCK,
|
||||
shanghai_time,
|
||||
builder_threshold,
|
||||
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
||||
spec,
|
||||
);
|
||||
self.execution_layer = Some(mock.el.clone());
|
||||
self.mock_execution_layer = Some(mock);
|
||||
@ -479,6 +491,10 @@ where
|
||||
.validator_keypairs
|
||||
.expect("cannot build without validator keypairs");
|
||||
let chain_config = self.chain_config.unwrap_or_default();
|
||||
let trusted_setup: TrustedSetup =
|
||||
serde_json::from_reader(eth2_network_config::get_trusted_setup::<E::Kzg>())
|
||||
.map_err(|e| format!("Unable to read trusted setup file: {}", e))
|
||||
.unwrap();
|
||||
|
||||
let mut builder = BeaconChainBuilder::new(self.eth_spec_instance)
|
||||
.logger(log.clone())
|
||||
@ -499,7 +515,8 @@ where
|
||||
log.clone(),
|
||||
5,
|
||||
)))
|
||||
.monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log);
|
||||
.monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log)
|
||||
.trusted_setup(trusted_setup);
|
||||
|
||||
builder = if let Some(mutator) = self.initial_mutator {
|
||||
mutator(builder)
|
||||
@ -535,11 +552,42 @@ where
|
||||
runtime: self.runtime,
|
||||
mock_execution_layer: self.mock_execution_layer,
|
||||
mock_builder: None,
|
||||
blob_signature_cache: <_>::default(),
|
||||
rng: make_rng(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mock_execution_layer_from_parts<T: EthSpec>(
|
||||
spec: &ChainSpec,
|
||||
task_executor: TaskExecutor,
|
||||
builder_threshold: Option<u128>,
|
||||
) -> MockExecutionLayer<T> {
|
||||
let shanghai_time = spec.capella_fork_epoch.map(|epoch| {
|
||||
HARNESS_GENESIS_TIME + spec.seconds_per_slot * T::slots_per_epoch() * epoch.as_u64()
|
||||
});
|
||||
let cancun_time = spec.deneb_fork_epoch.map(|epoch| {
|
||||
HARNESS_GENESIS_TIME + spec.seconds_per_slot * T::slots_per_epoch() * epoch.as_u64()
|
||||
});
|
||||
|
||||
let trusted_setup: TrustedSetup =
|
||||
serde_json::from_reader(eth2_network_config::get_trusted_setup::<T::Kzg>())
|
||||
.map_err(|e| format!("Unable to read trusted setup file: {}", e))
|
||||
.expect("should have trusted setup");
|
||||
let kzg = Kzg::new_from_trusted_setup(trusted_setup).expect("should create kzg");
|
||||
|
||||
MockExecutionLayer::new(
|
||||
task_executor,
|
||||
DEFAULT_TERMINAL_BLOCK,
|
||||
shanghai_time,
|
||||
cancun_time,
|
||||
builder_threshold,
|
||||
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
||||
spec.clone(),
|
||||
Some(kzg),
|
||||
)
|
||||
}
|
||||
|
||||
/// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and
|
||||
/// attestations.
|
||||
///
|
||||
@ -561,9 +609,29 @@ pub struct BeaconChainHarness<T: BeaconChainTypes> {
|
||||
pub mock_execution_layer: Option<MockExecutionLayer<T::EthSpec>>,
|
||||
pub mock_builder: Option<Arc<MockBuilder<T::EthSpec>>>,
|
||||
|
||||
/// Cache for blob signature because we don't need them for import, but we do need them
|
||||
/// to test gossip validation. We always make them during block production but drop them
|
||||
/// before storing them in the db.
|
||||
pub blob_signature_cache: Arc<RwLock<HashMap<BlobSignatureKey, Signature>>>,
|
||||
|
||||
pub rng: Mutex<StdRng>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
|
||||
pub struct BlobSignatureKey {
|
||||
block_root: Hash256,
|
||||
blob_index: u64,
|
||||
}
|
||||
|
||||
impl BlobSignatureKey {
|
||||
pub fn new(block_root: Hash256, blob_index: u64) -> Self {
|
||||
Self {
|
||||
block_root,
|
||||
blob_index,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type CommitteeAttestations<E> = Vec<(Attestation<E>, SubnetId)>;
|
||||
pub type HarnessAttestations<E> =
|
||||
Vec<(CommitteeAttestations<E>, Option<SignedAggregateAndProof<E>>)>;
|
||||
@ -640,6 +708,20 @@ where
|
||||
mock_builder_server
|
||||
}
|
||||
|
||||
pub fn get_head_block(&self) -> RpcBlock<E> {
|
||||
let block = self.chain.head_beacon_block();
|
||||
let block_root = block.canonical_root();
|
||||
let blobs = self.chain.get_blobs(&block_root).unwrap();
|
||||
RpcBlock::new(Some(block_root), block, Some(blobs)).unwrap()
|
||||
}
|
||||
|
||||
pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock<E> {
|
||||
let block = self.chain.get_blinded_block(block_root).unwrap().unwrap();
|
||||
let full_block = self.chain.store.make_full_block(block_root, block).unwrap();
|
||||
let blobs = self.chain.get_blobs(block_root).unwrap();
|
||||
RpcBlock::new(Some(*block_root), Arc::new(full_block), Some(blobs)).unwrap()
|
||||
}
|
||||
|
||||
pub fn get_all_validators(&self) -> Vec<usize> {
|
||||
(0..self.validator_keypairs.len()).collect()
|
||||
}
|
||||
@ -749,9 +831,28 @@ where
|
||||
&self,
|
||||
state: BeaconState<E>,
|
||||
slot: Slot,
|
||||
) -> (SignedBlindedBeaconBlock<E>, BeaconState<E>) {
|
||||
) -> (
|
||||
SignedBlockContentsTuple<E, BlindedPayload<E>>,
|
||||
BeaconState<E>,
|
||||
) {
|
||||
let (unblinded, new_state) = self.make_block(state, slot).await;
|
||||
(unblinded.into(), new_state)
|
||||
let maybe_blinded_blob_sidecars = unblinded.1.map(|blob_sidecar_list| {
|
||||
VariableList::new(
|
||||
blob_sidecar_list
|
||||
.into_iter()
|
||||
.map(|blob_sidecar| {
|
||||
let blinded_sidecar: BlindedBlobSidecar = blob_sidecar.message.into();
|
||||
SignedSidecar {
|
||||
message: Arc::new(blinded_sidecar),
|
||||
signature: blob_sidecar.signature,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
.unwrap()
|
||||
});
|
||||
((unblinded.0.into(), maybe_blinded_blob_sidecars), new_state)
|
||||
}
|
||||
|
||||
/// Returns a newly created block, signed by the proposer for the given slot.
|
||||
@ -759,7 +860,7 @@ where
|
||||
&self,
|
||||
mut state: BeaconState<E>,
|
||||
slot: Slot,
|
||||
) -> (SignedBeaconBlock<E>, BeaconState<E>) {
|
||||
) -> (SignedBlockContentsTuple<E, FullPayload<E>>, BeaconState<E>) {
|
||||
assert_ne!(slot, 0, "can't produce a block at slot 0");
|
||||
assert!(slot >= state.slot());
|
||||
|
||||
@ -777,7 +878,7 @@ where
|
||||
|
||||
let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot);
|
||||
|
||||
let (block, state) = self
|
||||
let (block, state, maybe_blob_sidecars) = self
|
||||
.chain
|
||||
.produce_block_on_state(
|
||||
state,
|
||||
@ -797,7 +898,18 @@ where
|
||||
&self.spec,
|
||||
);
|
||||
|
||||
(signed_block, state)
|
||||
let block_contents: SignedBlockContentsTuple<E, FullPayload<E>> = match &signed_block {
|
||||
SignedBeaconBlock::Base(_)
|
||||
| SignedBeaconBlock::Altair(_)
|
||||
| SignedBeaconBlock::Merge(_)
|
||||
| SignedBeaconBlock::Capella(_) => (signed_block, None),
|
||||
SignedBeaconBlock::Deneb(_) => (
|
||||
signed_block,
|
||||
maybe_blob_sidecars.map(|blobs| self.sign_blobs(blobs, &state, proposer_index)),
|
||||
),
|
||||
};
|
||||
|
||||
(block_contents, state)
|
||||
}
|
||||
|
||||
/// Useful for the `per_block_processing` tests. Creates a block, and returns the state after
|
||||
@ -806,7 +918,7 @@ where
|
||||
&self,
|
||||
mut state: BeaconState<E>,
|
||||
slot: Slot,
|
||||
) -> (SignedBeaconBlock<E>, BeaconState<E>) {
|
||||
) -> (SignedBlockContentsTuple<E, FullPayload<E>>, BeaconState<E>) {
|
||||
assert_ne!(slot, 0, "can't produce a block at slot 0");
|
||||
assert!(slot >= state.slot());
|
||||
|
||||
@ -826,7 +938,7 @@ where
|
||||
|
||||
let pre_state = state.clone();
|
||||
|
||||
let (block, state) = self
|
||||
let (block, state, maybe_blob_sidecars) = self
|
||||
.chain
|
||||
.produce_block_on_state(
|
||||
state,
|
||||
@ -846,7 +958,40 @@ where
|
||||
&self.spec,
|
||||
);
|
||||
|
||||
(signed_block, pre_state)
|
||||
let block_contents: SignedBlockContentsTuple<E, FullPayload<E>> = match &signed_block {
|
||||
SignedBeaconBlock::Base(_)
|
||||
| SignedBeaconBlock::Altair(_)
|
||||
| SignedBeaconBlock::Merge(_)
|
||||
| SignedBeaconBlock::Capella(_) => (signed_block, None),
|
||||
SignedBeaconBlock::Deneb(_) => {
|
||||
if let Some(blobs) = maybe_blob_sidecars {
|
||||
let signed_blobs: SignedSidecarList<E, BlobSidecar<E>> = Vec::from(blobs)
|
||||
.into_iter()
|
||||
.map(|blob| {
|
||||
blob.sign(
|
||||
&self.validator_keypairs[proposer_index].sk,
|
||||
&state.fork(),
|
||||
state.genesis_validators_root(),
|
||||
&self.spec,
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.into();
|
||||
let mut guard = self.blob_signature_cache.write();
|
||||
for blob in &signed_blobs {
|
||||
guard.insert(
|
||||
BlobSignatureKey::new(blob.message.block_root, blob.message.index),
|
||||
blob.signature.clone(),
|
||||
);
|
||||
}
|
||||
(signed_block, Some(signed_blobs))
|
||||
} else {
|
||||
(signed_block, None)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
(block_contents, pre_state)
|
||||
}
|
||||
|
||||
/// Create a randao reveal for a block at `slot`.
|
||||
@ -883,6 +1028,35 @@ where
|
||||
)
|
||||
}
|
||||
|
||||
/// Sign blobs, and cache their signatures.
|
||||
pub fn sign_blobs(
|
||||
&self,
|
||||
blobs: BlobSidecarList<E>,
|
||||
state: &BeaconState<E>,
|
||||
proposer_index: usize,
|
||||
) -> SignedSidecarList<E, BlobSidecar<E>> {
|
||||
let signed_blobs: SignedSidecarList<E, BlobSidecar<E>> = Vec::from(blobs)
|
||||
.into_iter()
|
||||
.map(|blob| {
|
||||
blob.sign(
|
||||
&self.validator_keypairs[proposer_index].sk,
|
||||
&state.fork(),
|
||||
state.genesis_validators_root(),
|
||||
&self.spec,
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.into();
|
||||
let mut guard = self.blob_signature_cache.write();
|
||||
for blob in &signed_blobs {
|
||||
guard.insert(
|
||||
BlobSignatureKey::new(blob.message.block_root, blob.message.index),
|
||||
blob.signature.clone(),
|
||||
);
|
||||
}
|
||||
signed_blobs
|
||||
}
|
||||
|
||||
/// Produces an "unaggregated" attestation for the given `slot` and `index` that attests to
|
||||
/// `beacon_block_root`. The provided `state` should match the `block.state_root` for the
|
||||
/// `block` identified by `beacon_block_root`.
|
||||
@ -1521,14 +1695,13 @@ where
|
||||
|
||||
pub fn make_voluntary_exit(&self, validator_index: u64, epoch: Epoch) -> SignedVoluntaryExit {
|
||||
let sk = &self.validator_keypairs[validator_index as usize].sk;
|
||||
let fork = self.chain.canonical_head.cached_head().head_fork();
|
||||
let genesis_validators_root = self.chain.genesis_validators_root;
|
||||
|
||||
VoluntaryExit {
|
||||
epoch,
|
||||
validator_index,
|
||||
}
|
||||
.sign(sk, &fork, genesis_validators_root, &self.chain.spec)
|
||||
.sign(sk, genesis_validators_root, &self.chain.spec)
|
||||
}
|
||||
|
||||
pub fn add_proposer_slashing(&self, validator_index: u64) -> Result<(), String> {
|
||||
@ -1637,11 +1810,12 @@ where
|
||||
state: BeaconState<E>,
|
||||
slot: Slot,
|
||||
block_modifier: impl FnOnce(&mut BeaconBlock<E>),
|
||||
) -> (SignedBeaconBlock<E>, BeaconState<E>) {
|
||||
) -> (SignedBlockContentsTuple<E, FullPayload<E>>, BeaconState<E>) {
|
||||
assert_ne!(slot, 0, "can't produce a block at slot 0");
|
||||
assert!(slot >= state.slot());
|
||||
|
||||
let (block, state) = self.make_block_return_pre_state(state, slot).await;
|
||||
let ((block, blobs), state) = self.make_block_return_pre_state(state, slot).await;
|
||||
|
||||
let (mut block, _) = block.deconstruct();
|
||||
|
||||
block_modifier(&mut block);
|
||||
@ -1654,7 +1828,7 @@ where
|
||||
state.genesis_validators_root(),
|
||||
&self.spec,
|
||||
);
|
||||
(signed_block, state)
|
||||
((signed_block, blobs), state)
|
||||
}
|
||||
|
||||
pub fn make_deposits<'a>(
|
||||
@ -1734,37 +1908,60 @@ where
|
||||
&self,
|
||||
slot: Slot,
|
||||
block_root: Hash256,
|
||||
block: SignedBeaconBlock<E>,
|
||||
block_contents: SignedBlockContentsTuple<E, FullPayload<E>>,
|
||||
) -> Result<SignedBeaconBlockHash, BlockError<E>> {
|
||||
self.set_current_slot(slot);
|
||||
let (block, blobs) = block_contents;
|
||||
// Note: we are just dropping signatures here and skipping signature verification.
|
||||
let blobs_without_signatures = blobs.map(|blobs| {
|
||||
VariableList::from(
|
||||
blobs
|
||||
.into_iter()
|
||||
.map(|blob| blob.message)
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
});
|
||||
let block_hash: SignedBeaconBlockHash = self
|
||||
.chain
|
||||
.process_block(
|
||||
block_root,
|
||||
Arc::new(block),
|
||||
RpcBlock::new(Some(block_root), Arc::new(block), blobs_without_signatures).unwrap(),
|
||||
NotifyExecutionLayer::Yes,
|
||||
|| Ok(()),
|
||||
)
|
||||
.await?
|
||||
.into();
|
||||
.try_into()
|
||||
.unwrap();
|
||||
self.chain.recompute_head_at_current_slot().await;
|
||||
Ok(block_hash)
|
||||
}
|
||||
|
||||
pub async fn process_block_result(
|
||||
&self,
|
||||
block: SignedBeaconBlock<E>,
|
||||
block_contents: SignedBlockContentsTuple<E, FullPayload<E>>,
|
||||
) -> Result<SignedBeaconBlockHash, BlockError<E>> {
|
||||
let (block, blobs) = block_contents;
|
||||
// Note: we are just dropping signatures here and skipping signature verification.
|
||||
let blobs_without_signatures = blobs.map(|blobs| {
|
||||
VariableList::from(
|
||||
blobs
|
||||
.into_iter()
|
||||
.map(|blob| blob.message)
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
});
|
||||
let block_root = block.canonical_root();
|
||||
let block_hash: SignedBeaconBlockHash = self
|
||||
.chain
|
||||
.process_block(
|
||||
block.canonical_root(),
|
||||
Arc::new(block),
|
||||
block_root,
|
||||
RpcBlock::new(Some(block_root), Arc::new(block), blobs_without_signatures).unwrap(),
|
||||
NotifyExecutionLayer::Yes,
|
||||
|| Ok(()),
|
||||
)
|
||||
.await?
|
||||
.into();
|
||||
.try_into()
|
||||
.expect("block blobs are available");
|
||||
self.chain.recompute_head_at_current_slot().await;
|
||||
Ok(block_hash)
|
||||
}
|
||||
@ -1824,13 +2021,25 @@ where
|
||||
&self,
|
||||
slot: Slot,
|
||||
state: BeaconState<E>,
|
||||
) -> Result<(SignedBeaconBlockHash, SignedBeaconBlock<E>, BeaconState<E>), BlockError<E>> {
|
||||
) -> Result<
|
||||
(
|
||||
SignedBeaconBlockHash,
|
||||
SignedBlockContentsTuple<E, FullPayload<E>>,
|
||||
BeaconState<E>,
|
||||
),
|
||||
BlockError<E>,
|
||||
> {
|
||||
self.set_current_slot(slot);
|
||||
let (block, new_state) = self.make_block(state, slot).await;
|
||||
let (block_contents, new_state) = self.make_block(state, slot).await;
|
||||
|
||||
let block_hash = self
|
||||
.process_block(slot, block.canonical_root(), block.clone())
|
||||
.process_block(
|
||||
slot,
|
||||
block_contents.0.canonical_root(),
|
||||
block_contents.clone(),
|
||||
)
|
||||
.await?;
|
||||
Ok((block_hash, block, new_state))
|
||||
Ok((block_hash, block_contents, new_state))
|
||||
}
|
||||
|
||||
pub fn attest_block(
|
||||
@ -1884,7 +2093,7 @@ where
|
||||
sync_committee_strategy: SyncCommitteeStrategy,
|
||||
) -> Result<(SignedBeaconBlockHash, BeaconState<E>), BlockError<E>> {
|
||||
let (block_hash, block, state) = self.add_block_at_slot(slot, state).await?;
|
||||
self.attest_block(&state, state_root, block_hash, &block, validators);
|
||||
self.attest_block(&state, state_root, block_hash, &block.0, validators);
|
||||
|
||||
if sync_committee_strategy == SyncCommitteeStrategy::AllValidators
|
||||
&& state.current_sync_committee().is_ok()
|
||||
@ -2082,8 +2291,9 @@ where
|
||||
chain_dump
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root.into())
|
||||
.filter(|block_hash| *block_hash != Hash256::zero().into())
|
||||
.map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root)
|
||||
.filter(|block_hash| *block_hash != Hash256::zero())
|
||||
.map(|hash| hash.into())
|
||||
.collect()
|
||||
}
|
||||
|
||||
@ -2295,3 +2505,76 @@ pub struct MakeAttestationOptions {
|
||||
/// Fork to use for signing attestations.
|
||||
pub fork: Fork,
|
||||
}
|
||||
|
||||
pub fn build_log(level: slog::Level, enabled: bool) -> Logger {
|
||||
let decorator = TermDecorator::new().build();
|
||||
let drain = FullFormat::new(decorator).build().fuse();
|
||||
let drain = Async::new(drain).build().fuse();
|
||||
|
||||
if enabled {
|
||||
Logger::root(drain.filter_level(level).fuse(), o!())
|
||||
} else {
|
||||
Logger::root(drain.filter(|_| false).fuse(), o!())
|
||||
}
|
||||
}
|
||||
|
||||
pub enum NumBlobs {
|
||||
Random,
|
||||
None,
|
||||
}
|
||||
|
||||
pub fn generate_rand_block_and_blobs<E: EthSpec>(
|
||||
fork_name: ForkName,
|
||||
num_blobs: NumBlobs,
|
||||
kzg: &Kzg<E::Kzg>,
|
||||
rng: &mut impl Rng,
|
||||
) -> (SignedBeaconBlock<E, FullPayload<E>>, Vec<BlobSidecar<E>>) {
|
||||
let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng));
|
||||
let mut block = SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(rng));
|
||||
let mut blob_sidecars = vec![];
|
||||
if let Ok(message) = block.message_deneb_mut() {
|
||||
// Get either zero blobs or a random number of blobs between 1 and Max Blobs.
|
||||
let payload: &mut FullPayloadDeneb<E> = &mut message.body.execution_payload;
|
||||
let num_blobs = match num_blobs {
|
||||
NumBlobs::Random => rng.gen_range(1..=E::max_blobs_per_block()),
|
||||
NumBlobs::None => 0,
|
||||
};
|
||||
let (bundle, transactions) =
|
||||
execution_layer::test_utils::generate_random_blobs::<E, _>(num_blobs, kzg, rng)
|
||||
.unwrap();
|
||||
|
||||
payload.execution_payload.transactions = <_>::default();
|
||||
for tx in Vec::from(transactions) {
|
||||
payload.execution_payload.transactions.push(tx).unwrap();
|
||||
}
|
||||
message.body.blob_kzg_commitments = bundle.commitments.clone();
|
||||
|
||||
let eth2::types::BlobsBundle {
|
||||
commitments,
|
||||
proofs,
|
||||
blobs,
|
||||
} = bundle;
|
||||
|
||||
let block_root = block.canonical_root();
|
||||
|
||||
for (index, ((blob, kzg_commitment), kzg_proof)) in blobs
|
||||
.into_iter()
|
||||
.zip(commitments.into_iter())
|
||||
.zip(proofs.into_iter())
|
||||
.enumerate()
|
||||
{
|
||||
blob_sidecars.push(BlobSidecar {
|
||||
block_root,
|
||||
index: index as u64,
|
||||
slot: block.slot(),
|
||||
block_parent_root: block.parent_root(),
|
||||
proposer_index: block.message().proposer_index(),
|
||||
blob: blob.clone(),
|
||||
kzg_commitment,
|
||||
kzg_proof,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
(block, blob_sidecars)
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ impl<T: BeaconChainTypes> ValidatorPubkeyCache<T> {
|
||||
};
|
||||
|
||||
let store_ops = cache.import_new_pubkeys(state)?;
|
||||
store.do_atomically(store_ops)?;
|
||||
store.do_atomically_with_block_and_blobs_cache(store_ops)?;
|
||||
|
||||
Ok(cache)
|
||||
}
|
||||
@ -299,7 +299,7 @@ mod test {
|
||||
let ops = cache
|
||||
.import_new_pubkeys(&state)
|
||||
.expect("should import pubkeys");
|
||||
store.do_atomically(ops).unwrap();
|
||||
store.do_atomically_with_block_and_blobs_cache(ops).unwrap();
|
||||
check_cache_get(&cache, &keypairs[..]);
|
||||
drop(cache);
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
use beacon_chain::block_verification_types::RpcBlock;
|
||||
use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy};
|
||||
use beacon_chain::{StateSkipConfig, WhenSlotSkipped};
|
||||
use lazy_static::lazy_static;
|
||||
@ -67,6 +68,7 @@ async fn produces_attestations() {
|
||||
.store
|
||||
.make_full_block(&block_root, blinded_block)
|
||||
.unwrap();
|
||||
let blobs = chain.get_blobs(&block_root).unwrap();
|
||||
|
||||
let epoch_boundary_slot = state
|
||||
.current_epoch()
|
||||
@ -131,6 +133,19 @@ async fn produces_attestations() {
|
||||
assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch");
|
||||
assert_eq!(data.target.root, target_root, "bad target root");
|
||||
|
||||
let rpc_block =
|
||||
RpcBlock::<MainnetEthSpec>::new(None, Arc::new(block.clone()), Some(blobs.clone()))
|
||||
.unwrap();
|
||||
let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(
|
||||
available_block,
|
||||
) = chain
|
||||
.data_availability_checker
|
||||
.check_rpc_block_availability(rpc_block)
|
||||
.unwrap()
|
||||
else {
|
||||
panic!("block should be available")
|
||||
};
|
||||
|
||||
let early_attestation = {
|
||||
let proto_block = chain
|
||||
.canonical_head
|
||||
@ -141,7 +156,7 @@ async fn produces_attestations() {
|
||||
.early_attester_cache
|
||||
.add_head_block(
|
||||
block_root,
|
||||
Arc::new(block.clone()),
|
||||
available_block,
|
||||
proto_block,
|
||||
&state,
|
||||
&chain.spec,
|
||||
@ -192,12 +207,29 @@ async fn early_attester_cache_old_request() {
|
||||
.get_block(&head.beacon_block_root)
|
||||
.unwrap();
|
||||
|
||||
let head_blobs = harness
|
||||
.chain
|
||||
.get_blobs(&head.beacon_block_root)
|
||||
.expect("should get blobs");
|
||||
|
||||
let rpc_block =
|
||||
RpcBlock::<MainnetEthSpec>::new(None, head.beacon_block.clone(), Some(head_blobs)).unwrap();
|
||||
let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(available_block) =
|
||||
harness
|
||||
.chain
|
||||
.data_availability_checker
|
||||
.check_rpc_block_availability(rpc_block)
|
||||
.unwrap()
|
||||
else {
|
||||
panic!("block should be available")
|
||||
};
|
||||
|
||||
harness
|
||||
.chain
|
||||
.early_attester_cache
|
||||
.add_head_block(
|
||||
head.beacon_block_root,
|
||||
head.beacon_block.clone(),
|
||||
available_block,
|
||||
head_proto_block,
|
||||
&head.beacon_state,
|
||||
&harness.chain.spec,
|
||||
|
@ -334,10 +334,28 @@ impl GossipTester {
|
||||
self.harness.chain.epoch().unwrap()
|
||||
}
|
||||
|
||||
pub fn two_epochs_ago(&self) -> Slot {
|
||||
pub fn earliest_valid_attestation_slot(&self) -> Slot {
|
||||
let offset = match self.harness.spec.fork_name_at_epoch(self.epoch()) {
|
||||
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
|
||||
// Subtract an additional slot since the harness will be exactly on the start of the
|
||||
// slot and the propagation tolerance will allow an extra slot.
|
||||
E::slots_per_epoch() + 1
|
||||
}
|
||||
// EIP-7045
|
||||
ForkName::Deneb => {
|
||||
let epoch_slot_offset = (self.slot() % E::slots_per_epoch()).as_u64();
|
||||
if epoch_slot_offset != 0 {
|
||||
E::slots_per_epoch() + epoch_slot_offset
|
||||
} else {
|
||||
// Here the propagation tolerance will cause the cutoff to be an entire epoch earlier
|
||||
2 * E::slots_per_epoch()
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
self.slot()
|
||||
.as_u64()
|
||||
.checked_sub(E::slots_per_epoch() + 2)
|
||||
.checked_sub(offset)
|
||||
.expect("chain is not sufficiently deep for test")
|
||||
.into()
|
||||
}
|
||||
@ -484,18 +502,21 @@ async fn aggregated_gossip_verification() {
|
||||
)
|
||||
.inspect_aggregate_err(
|
||||
"aggregate from past slot",
|
||||
|tester, a| a.message.aggregate.data.slot = tester.two_epochs_ago(),
|
||||
|tester, a| {
|
||||
let too_early_slot = tester.earliest_valid_attestation_slot() - 1;
|
||||
a.message.aggregate.data.slot = too_early_slot;
|
||||
a.message.aggregate.data.target.epoch = too_early_slot.epoch(E::slots_per_epoch());
|
||||
},
|
||||
|tester, err| {
|
||||
let valid_early_slot = tester.earliest_valid_attestation_slot();
|
||||
assert!(matches!(
|
||||
err,
|
||||
AttnError::PastSlot {
|
||||
attestation_slot,
|
||||
// Subtract an additional slot since the harness will be exactly on the start of the
|
||||
// slot and the propagation tolerance will allow an extra slot.
|
||||
earliest_permissible_slot
|
||||
}
|
||||
if attestation_slot == tester.two_epochs_ago()
|
||||
&& earliest_permissible_slot == tester.slot() - E::slots_per_epoch() - 1
|
||||
if attestation_slot == valid_early_slot - 1
|
||||
&& earliest_permissible_slot == valid_early_slot
|
||||
))
|
||||
},
|
||||
)
|
||||
@ -800,22 +821,20 @@ async fn unaggregated_gossip_verification() {
|
||||
.inspect_unaggregate_err(
|
||||
"attestation from past slot",
|
||||
|tester, a, _| {
|
||||
let early_slot = tester.two_epochs_ago();
|
||||
a.data.slot = early_slot;
|
||||
a.data.target.epoch = early_slot.epoch(E::slots_per_epoch());
|
||||
let too_early_slot = tester.earliest_valid_attestation_slot() - 1;
|
||||
a.data.slot = too_early_slot;
|
||||
a.data.target.epoch = too_early_slot.epoch(E::slots_per_epoch());
|
||||
},
|
||||
|tester, err| {
|
||||
dbg!(&err);
|
||||
let valid_early_slot = tester.earliest_valid_attestation_slot();
|
||||
assert!(matches!(
|
||||
err,
|
||||
AttnError::PastSlot {
|
||||
attestation_slot,
|
||||
// Subtract an additional slot since the harness will be exactly on the start of the
|
||||
// slot and the propagation tolerance will allow an extra slot.
|
||||
earliest_permissible_slot,
|
||||
}
|
||||
if attestation_slot == tester.two_epochs_ago()
|
||||
&& earliest_permissible_slot == tester.slot() - E::slots_per_epoch() - 1
|
||||
if attestation_slot == valid_early_slot - 1
|
||||
&& earliest_permissible_slot == valid_early_slot
|
||||
))
|
||||
},
|
||||
)
|
||||
|
@ -1,7 +1,10 @@
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
use beacon_chain::test_utils::{
|
||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
||||
use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock};
|
||||
use beacon_chain::test_utils::BlobSignatureKey;
|
||||
use beacon_chain::{
|
||||
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
|
||||
AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, ExecutionPendingBlock,
|
||||
};
|
||||
use beacon_chain::{
|
||||
BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock,
|
||||
@ -33,7 +36,7 @@ lazy_static! {
|
||||
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||
}
|
||||
|
||||
async fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
|
||||
async fn get_chain_segment() -> (Vec<BeaconSnapshot<E>>, Vec<Option<BlobSidecarList<E>>>) {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
harness
|
||||
@ -45,6 +48,7 @@ async fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
|
||||
.await;
|
||||
|
||||
let mut segment = Vec::with_capacity(CHAIN_SEGMENT_LENGTH);
|
||||
let mut segment_blobs = Vec::with_capacity(CHAIN_SEGMENT_LENGTH);
|
||||
for snapshot in harness
|
||||
.chain
|
||||
.chain_dump()
|
||||
@ -63,8 +67,73 @@ async fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
|
||||
beacon_block: Arc::new(full_block),
|
||||
beacon_state: snapshot.beacon_state,
|
||||
});
|
||||
segment_blobs.push(Some(
|
||||
harness
|
||||
.chain
|
||||
.get_blobs(&snapshot.beacon_block_root)
|
||||
.unwrap(),
|
||||
))
|
||||
}
|
||||
segment
|
||||
(segment, segment_blobs)
|
||||
}
|
||||
|
||||
async fn get_chain_segment_with_signed_blobs() -> (
|
||||
Vec<BeaconSnapshot<E>>,
|
||||
Vec<Option<VariableList<SignedBlobSidecar<E>, <E as EthSpec>::MaxBlobsPerBlock>>>,
|
||||
) {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
harness
|
||||
.extend_chain(
|
||||
CHAIN_SEGMENT_LENGTH,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
)
|
||||
.await;
|
||||
|
||||
let mut segment = Vec::with_capacity(CHAIN_SEGMENT_LENGTH);
|
||||
let mut segment_blobs = Vec::with_capacity(CHAIN_SEGMENT_LENGTH);
|
||||
for snapshot in harness
|
||||
.chain
|
||||
.chain_dump()
|
||||
.expect("should dump chain")
|
||||
.into_iter()
|
||||
.skip(1)
|
||||
{
|
||||
let full_block = harness
|
||||
.chain
|
||||
.get_block(&snapshot.beacon_block_root)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
segment.push(BeaconSnapshot {
|
||||
beacon_block_root: snapshot.beacon_block_root,
|
||||
beacon_block: Arc::new(full_block),
|
||||
beacon_state: snapshot.beacon_state,
|
||||
});
|
||||
let signed_blobs = harness
|
||||
.chain
|
||||
.get_blobs(&snapshot.beacon_block_root)
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|blob| {
|
||||
let block_root = blob.block_root;
|
||||
let blob_index = blob.index;
|
||||
SignedBlobSidecar {
|
||||
message: blob,
|
||||
signature: harness
|
||||
.blob_signature_cache
|
||||
.read()
|
||||
.get(&BlobSignatureKey::new(block_root, blob_index))
|
||||
.unwrap()
|
||||
.clone(),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
segment_blobs.push(Some(VariableList::from(signed_blobs)))
|
||||
}
|
||||
(segment, segment_blobs)
|
||||
}
|
||||
|
||||
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<E>> {
|
||||
@ -84,10 +153,16 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessTyp
|
||||
harness
|
||||
}
|
||||
|
||||
fn chain_segment_blocks(chain_segment: &[BeaconSnapshot<E>]) -> Vec<Arc<SignedBeaconBlock<E>>> {
|
||||
fn chain_segment_blocks(
|
||||
chain_segment: &[BeaconSnapshot<E>],
|
||||
blobs: &[Option<BlobSidecarList<E>>],
|
||||
) -> Vec<RpcBlock<E>> {
|
||||
chain_segment
|
||||
.iter()
|
||||
.map(|snapshot| snapshot.beacon_block.clone())
|
||||
.zip(blobs.into_iter())
|
||||
.map(|(snapshot, blobs)| {
|
||||
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
@ -129,22 +204,57 @@ fn update_proposal_signatures(
|
||||
}
|
||||
}
|
||||
|
||||
fn update_parent_roots(snapshots: &mut [BeaconSnapshot<E>]) {
|
||||
fn update_parent_roots(
|
||||
snapshots: &mut [BeaconSnapshot<E>],
|
||||
blobs: &mut [Option<BlobSidecarList<E>>],
|
||||
) {
|
||||
for i in 0..snapshots.len() {
|
||||
let root = snapshots[i].beacon_block.canonical_root();
|
||||
if let Some(child) = snapshots.get_mut(i + 1) {
|
||||
if let (Some(child), Some(child_blobs)) = (snapshots.get_mut(i + 1), blobs.get_mut(i + 1)) {
|
||||
let (mut block, signature) = child.beacon_block.as_ref().clone().deconstruct();
|
||||
*block.parent_root_mut() = root;
|
||||
child.beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature))
|
||||
let new_child = Arc::new(SignedBeaconBlock::from_block(block, signature));
|
||||
let new_child_root = new_child.canonical_root();
|
||||
child.beacon_block = new_child;
|
||||
if let Some(blobs) = child_blobs {
|
||||
update_blob_roots(new_child_root, blobs);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn update_blob_roots<E: EthSpec>(block_root: Hash256, blobs: &mut BlobSidecarList<E>) {
|
||||
for old_blob_sidecar in blobs.iter_mut() {
|
||||
let index = old_blob_sidecar.index;
|
||||
let slot = old_blob_sidecar.slot;
|
||||
let block_parent_root = old_blob_sidecar.block_parent_root;
|
||||
let proposer_index = old_blob_sidecar.proposer_index;
|
||||
let blob = old_blob_sidecar.blob.clone();
|
||||
let kzg_commitment = old_blob_sidecar.kzg_commitment;
|
||||
let kzg_proof = old_blob_sidecar.kzg_proof;
|
||||
|
||||
let new_blob = Arc::new(BlobSidecar::<E> {
|
||||
block_root,
|
||||
index,
|
||||
slot,
|
||||
block_parent_root,
|
||||
proposer_index,
|
||||
blob,
|
||||
kzg_commitment,
|
||||
kzg_proof,
|
||||
});
|
||||
*old_blob_sidecar = new_blob;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn chain_segment_full_segment() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let blocks = chain_segment_blocks(&chain_segment);
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
let blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
|
||||
harness
|
||||
.chain
|
||||
@ -179,8 +289,11 @@ async fn chain_segment_full_segment() {
|
||||
async fn chain_segment_varying_chunk_size() {
|
||||
for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let blocks = chain_segment_blocks(&chain_segment);
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
let blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
|
||||
harness
|
||||
.chain
|
||||
@ -209,7 +322,7 @@ async fn chain_segment_varying_chunk_size() {
|
||||
#[tokio::test]
|
||||
async fn chain_segment_non_linear_parent_roots() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
|
||||
harness
|
||||
.chain
|
||||
@ -219,7 +332,10 @@ async fn chain_segment_non_linear_parent_roots() {
|
||||
/*
|
||||
* Test with a block removed.
|
||||
*/
|
||||
let mut blocks = chain_segment_blocks(&chain_segment);
|
||||
let mut blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
blocks.remove(2);
|
||||
|
||||
assert!(
|
||||
@ -237,10 +353,17 @@ async fn chain_segment_non_linear_parent_roots() {
|
||||
/*
|
||||
* Test with a modified parent root.
|
||||
*/
|
||||
let mut blocks = chain_segment_blocks(&chain_segment);
|
||||
let (mut block, signature) = blocks[3].as_ref().clone().deconstruct();
|
||||
let mut blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
|
||||
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
|
||||
*block.parent_root_mut() = Hash256::zero();
|
||||
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature));
|
||||
blocks[3] = RpcBlock::new_without_blobs(
|
||||
None,
|
||||
Arc::new(SignedBeaconBlock::from_block(block, signature)),
|
||||
);
|
||||
|
||||
assert!(
|
||||
matches!(
|
||||
@ -258,7 +381,7 @@ async fn chain_segment_non_linear_parent_roots() {
|
||||
#[tokio::test]
|
||||
async fn chain_segment_non_linear_slots() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
harness
|
||||
.chain
|
||||
.slot_clock
|
||||
@ -268,10 +391,16 @@ async fn chain_segment_non_linear_slots() {
|
||||
* Test where a child is lower than the parent.
|
||||
*/
|
||||
|
||||
let mut blocks = chain_segment_blocks(&chain_segment);
|
||||
let (mut block, signature) = blocks[3].as_ref().clone().deconstruct();
|
||||
let mut blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
|
||||
*block.slot_mut() = Slot::new(0);
|
||||
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature));
|
||||
blocks[3] = RpcBlock::new_without_blobs(
|
||||
None,
|
||||
Arc::new(SignedBeaconBlock::from_block(block, signature)),
|
||||
);
|
||||
|
||||
assert!(
|
||||
matches!(
|
||||
@ -289,10 +418,16 @@ async fn chain_segment_non_linear_slots() {
|
||||
* Test where a child is equal to the parent.
|
||||
*/
|
||||
|
||||
let mut blocks = chain_segment_blocks(&chain_segment);
|
||||
let (mut block, signature) = blocks[3].as_ref().clone().deconstruct();
|
||||
let mut blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
|
||||
.into_iter()
|
||||
.map(|block| block.into())
|
||||
.collect();
|
||||
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
|
||||
*block.slot_mut() = blocks[2].slot();
|
||||
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature));
|
||||
blocks[3] = RpcBlock::new_without_blobs(
|
||||
None,
|
||||
Arc::new(SignedBeaconBlock::from_block(block, signature)),
|
||||
);
|
||||
|
||||
assert!(
|
||||
matches!(
|
||||
@ -309,14 +444,18 @@ async fn chain_segment_non_linear_slots() {
|
||||
|
||||
async fn assert_invalid_signature(
|
||||
chain_segment: &[BeaconSnapshot<E>],
|
||||
chain_segment_blobs: &[Option<BlobSidecarList<E>>],
|
||||
harness: &BeaconChainHarness<EphemeralHarnessType<E>>,
|
||||
block_index: usize,
|
||||
snapshots: &[BeaconSnapshot<E>],
|
||||
item: &str,
|
||||
) {
|
||||
let blocks = snapshots
|
||||
let blocks: Vec<RpcBlock<E>> = snapshots
|
||||
.iter()
|
||||
.map(|snapshot| snapshot.beacon_block.clone())
|
||||
.zip(chain_segment_blobs.iter())
|
||||
.map(|(snapshot, blobs)| {
|
||||
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Ensure the block will be rejected if imported in a chain segment.
|
||||
@ -340,7 +479,10 @@ async fn assert_invalid_signature(
|
||||
let ancestor_blocks = chain_segment
|
||||
.iter()
|
||||
.take(block_index)
|
||||
.map(|snapshot| snapshot.beacon_block.clone())
|
||||
.zip(chain_segment_blobs.iter())
|
||||
.map(|(snapshot, blobs)| {
|
||||
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
|
||||
})
|
||||
.collect();
|
||||
// We don't care if this fails, we just call this to ensure that all prior blocks have been
|
||||
// imported prior to this test.
|
||||
@ -354,7 +496,12 @@ async fn assert_invalid_signature(
|
||||
.chain
|
||||
.process_block(
|
||||
snapshots[block_index].beacon_block.canonical_root(),
|
||||
snapshots[block_index].beacon_block.clone(),
|
||||
RpcBlock::new(
|
||||
None,
|
||||
snapshots[block_index].beacon_block.clone(),
|
||||
chain_segment_blobs[block_index].clone(),
|
||||
)
|
||||
.unwrap(),
|
||||
NotifyExecutionLayer::Yes,
|
||||
|| Ok(()),
|
||||
)
|
||||
@ -386,7 +533,7 @@ async fn get_invalid_sigs_harness(
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn invalid_signature_gossip_block() {
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
for &block_index in BLOCK_INDICES {
|
||||
// Ensure the block will be rejected if imported on its own (without gossip checking).
|
||||
let harness = get_invalid_sigs_harness(&chain_segment).await;
|
||||
@ -404,7 +551,10 @@ async fn invalid_signature_gossip_block() {
|
||||
let ancestor_blocks = chain_segment
|
||||
.iter()
|
||||
.take(block_index)
|
||||
.map(|snapshot| snapshot.beacon_block.clone())
|
||||
.zip(chain_segment_blobs.iter())
|
||||
.map(|(snapshot, blobs)| {
|
||||
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
|
||||
})
|
||||
.collect();
|
||||
harness
|
||||
.chain
|
||||
@ -433,7 +583,7 @@ async fn invalid_signature_gossip_block() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_signature_block_proposal() {
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
for &block_index in BLOCK_INDICES {
|
||||
let harness = get_invalid_sigs_harness(&chain_segment).await;
|
||||
let mut snapshots = chain_segment.clone();
|
||||
@ -446,9 +596,12 @@ async fn invalid_signature_block_proposal() {
|
||||
block.clone(),
|
||||
junk_signature(),
|
||||
));
|
||||
let blocks = snapshots
|
||||
let blocks: Vec<RpcBlock<E>> = snapshots
|
||||
.iter()
|
||||
.map(|snapshot| snapshot.beacon_block.clone())
|
||||
.zip(chain_segment_blobs.iter())
|
||||
.map(|(snapshot, blobs)| {
|
||||
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
// Ensure the block will be rejected if imported in a chain segment.
|
||||
assert!(
|
||||
@ -467,7 +620,7 @@ async fn invalid_signature_block_proposal() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_signature_randao_reveal() {
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
|
||||
for &block_index in BLOCK_INDICES {
|
||||
let harness = get_invalid_sigs_harness(&chain_segment).await;
|
||||
let mut snapshots = chain_segment.clone();
|
||||
@ -479,15 +632,23 @@ async fn invalid_signature_randao_reveal() {
|
||||
*block.body_mut().randao_reveal_mut() = junk_signature();
|
||||
snapshots[block_index].beacon_block =
|
||||
Arc::new(SignedBeaconBlock::from_block(block, signature));
|
||||
update_parent_roots(&mut snapshots);
|
||||
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
assert_invalid_signature(&chain_segment, &harness, block_index, &snapshots, "randao").await;
|
||||
assert_invalid_signature(
|
||||
&chain_segment,
|
||||
&chain_segment_blobs,
|
||||
&harness,
|
||||
block_index,
|
||||
&snapshots,
|
||||
"randao",
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_signature_proposer_slashing() {
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
|
||||
for &block_index in BLOCK_INDICES {
|
||||
let harness = get_invalid_sigs_harness(&chain_segment).await;
|
||||
let mut snapshots = chain_segment.clone();
|
||||
@ -513,10 +674,11 @@ async fn invalid_signature_proposer_slashing() {
|
||||
.expect("should update proposer slashing");
|
||||
snapshots[block_index].beacon_block =
|
||||
Arc::new(SignedBeaconBlock::from_block(block, signature));
|
||||
update_parent_roots(&mut snapshots);
|
||||
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
assert_invalid_signature(
|
||||
&chain_segment,
|
||||
&chain_segment_blobs,
|
||||
&harness,
|
||||
block_index,
|
||||
&snapshots,
|
||||
@ -528,7 +690,7 @@ async fn invalid_signature_proposer_slashing() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_signature_attester_slashing() {
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
|
||||
for &block_index in BLOCK_INDICES {
|
||||
let harness = get_invalid_sigs_harness(&chain_segment).await;
|
||||
let mut snapshots = chain_segment.clone();
|
||||
@ -565,10 +727,11 @@ async fn invalid_signature_attester_slashing() {
|
||||
.expect("should update attester slashing");
|
||||
snapshots[block_index].beacon_block =
|
||||
Arc::new(SignedBeaconBlock::from_block(block, signature));
|
||||
update_parent_roots(&mut snapshots);
|
||||
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
assert_invalid_signature(
|
||||
&chain_segment,
|
||||
&chain_segment_blobs,
|
||||
&harness,
|
||||
block_index,
|
||||
&snapshots,
|
||||
@ -580,7 +743,7 @@ async fn invalid_signature_attester_slashing() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_signature_attestation() {
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
|
||||
let mut checked_attestation = false;
|
||||
|
||||
for &block_index in BLOCK_INDICES {
|
||||
@ -595,10 +758,11 @@ async fn invalid_signature_attestation() {
|
||||
attestation.signature = junk_aggregate_signature();
|
||||
snapshots[block_index].beacon_block =
|
||||
Arc::new(SignedBeaconBlock::from_block(block, signature));
|
||||
update_parent_roots(&mut snapshots);
|
||||
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
assert_invalid_signature(
|
||||
&chain_segment,
|
||||
&chain_segment_blobs,
|
||||
&harness,
|
||||
block_index,
|
||||
&snapshots,
|
||||
@ -617,7 +781,7 @@ async fn invalid_signature_attestation() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_signature_deposit() {
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
|
||||
for &block_index in BLOCK_INDICES {
|
||||
// Note: an invalid deposit signature is permitted!
|
||||
let harness = get_invalid_sigs_harness(&chain_segment).await;
|
||||
@ -643,11 +807,14 @@ async fn invalid_signature_deposit() {
|
||||
.expect("should update deposit");
|
||||
snapshots[block_index].beacon_block =
|
||||
Arc::new(SignedBeaconBlock::from_block(block, signature));
|
||||
update_parent_roots(&mut snapshots);
|
||||
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
let blocks = snapshots
|
||||
let blocks: Vec<RpcBlock<E>> = snapshots
|
||||
.iter()
|
||||
.map(|snapshot| snapshot.beacon_block.clone())
|
||||
.zip(chain_segment_blobs.iter())
|
||||
.map(|(snapshot, blobs)| {
|
||||
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
|
||||
})
|
||||
.collect();
|
||||
assert!(
|
||||
!matches!(
|
||||
@ -665,7 +832,7 @@ async fn invalid_signature_deposit() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_signature_exit() {
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
|
||||
for &block_index in BLOCK_INDICES {
|
||||
let harness = get_invalid_sigs_harness(&chain_segment).await;
|
||||
let mut snapshots = chain_segment.clone();
|
||||
@ -688,10 +855,11 @@ async fn invalid_signature_exit() {
|
||||
.expect("should update deposit");
|
||||
snapshots[block_index].beacon_block =
|
||||
Arc::new(SignedBeaconBlock::from_block(block, signature));
|
||||
update_parent_roots(&mut snapshots);
|
||||
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
assert_invalid_signature(
|
||||
&chain_segment,
|
||||
&chain_segment_blobs,
|
||||
&harness,
|
||||
block_index,
|
||||
&snapshots,
|
||||
@ -711,7 +879,7 @@ fn unwrap_err<T, E>(result: Result<T, E>) -> E {
|
||||
#[tokio::test]
|
||||
async fn block_gossip_verification() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let chain_segment = get_chain_segment().await;
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment_with_signed_blobs().await;
|
||||
|
||||
let block_index = CHAIN_SEGMENT_LENGTH - 2;
|
||||
|
||||
@ -721,7 +889,10 @@ async fn block_gossip_verification() {
|
||||
.set_slot(chain_segment[block_index].beacon_block.slot().as_u64());
|
||||
|
||||
// Import the ancestors prior to the block we're testing.
|
||||
for snapshot in &chain_segment[0..block_index] {
|
||||
for (snapshot, blobs_opt) in chain_segment[0..block_index]
|
||||
.iter()
|
||||
.zip(chain_segment_blobs.iter())
|
||||
{
|
||||
let gossip_verified = harness
|
||||
.chain
|
||||
.verify_block_for_gossip(snapshot.beacon_block.clone())
|
||||
@ -738,6 +909,21 @@ async fn block_gossip_verification() {
|
||||
)
|
||||
.await
|
||||
.expect("should import valid gossip verified block");
|
||||
if let Some(blobs) = blobs_opt {
|
||||
for blob in blobs {
|
||||
let blob_index = blob.message.index;
|
||||
let gossip_verified = harness
|
||||
.chain
|
||||
.verify_blob_sidecar_for_gossip(blob.clone(), blob_index)
|
||||
.expect("should obtain gossip verified blob");
|
||||
|
||||
harness
|
||||
.chain
|
||||
.process_gossip_blob(gossip_verified)
|
||||
.await
|
||||
.expect("should import valid gossip verified blob");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Recompute the head to ensure we cache the latest view of fork choice.
|
||||
@ -762,7 +948,7 @@ async fn block_gossip_verification() {
|
||||
*block.slot_mut() = expected_block_slot;
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await),
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await),
|
||||
BlockError::FutureSlot {
|
||||
present_slot,
|
||||
block_slot,
|
||||
@ -796,7 +982,7 @@ async fn block_gossip_verification() {
|
||||
*block.slot_mut() = expected_finalized_slot;
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await),
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await),
|
||||
BlockError::WouldRevertFinalizedSlot {
|
||||
block_slot,
|
||||
finalized_slot,
|
||||
@ -826,10 +1012,9 @@ async fn block_gossip_verification() {
|
||||
unwrap_err(
|
||||
harness
|
||||
.chain
|
||||
.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(
|
||||
block,
|
||||
junk_signature()
|
||||
)))
|
||||
.verify_block_for_gossip(
|
||||
Arc::new(SignedBeaconBlock::from_block(block, junk_signature())).into()
|
||||
)
|
||||
.await
|
||||
),
|
||||
BlockError::ProposalSignatureInvalid
|
||||
@ -854,7 +1039,7 @@ async fn block_gossip_verification() {
|
||||
*block.parent_root_mut() = parent_root;
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await),
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await),
|
||||
BlockError::ParentUnknown(block)
|
||||
if block.parent_root() == parent_root
|
||||
),
|
||||
@ -880,7 +1065,7 @@ async fn block_gossip_verification() {
|
||||
*block.parent_root_mut() = parent_root;
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await),
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await),
|
||||
BlockError::NotFinalizedDescendant { block_parent_root }
|
||||
if block_parent_root == parent_root
|
||||
),
|
||||
@ -918,7 +1103,7 @@ async fn block_gossip_verification() {
|
||||
);
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await),
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await),
|
||||
BlockError::IncorrectBlockProposer {
|
||||
block,
|
||||
local_shuffling,
|
||||
@ -930,7 +1115,7 @@ async fn block_gossip_verification() {
|
||||
// Check to ensure that we registered this is a valid block from this proposer.
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await),
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await),
|
||||
BlockError::BlockIsAlreadyKnown,
|
||||
),
|
||||
"should register any valid signature against the proposer, even if the block failed later verification"
|
||||
@ -956,7 +1141,7 @@ async fn block_gossip_verification() {
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_block_for_gossip(block.clone())
|
||||
.verify_block_for_gossip(block.clone().into())
|
||||
.await
|
||||
.err()
|
||||
.expect("should error when processing known block"),
|
||||
@ -984,14 +1169,29 @@ async fn verify_block_for_gossip_slashing_detection() {
|
||||
harness.advance_slot();
|
||||
|
||||
let state = harness.get_current_state();
|
||||
let (block1, _) = harness.make_block(state.clone(), Slot::new(1)).await;
|
||||
let (block2, _) = harness.make_block(state, Slot::new(1)).await;
|
||||
let ((block1, blobs1), _) = harness.make_block(state.clone(), Slot::new(1)).await;
|
||||
let ((block2, _blobs2), _) = harness.make_block(state, Slot::new(1)).await;
|
||||
|
||||
let verified_block = harness
|
||||
.chain
|
||||
.verify_block_for_gossip(Arc::new(block1))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
if let Some(blobs) = blobs1 {
|
||||
for blob in blobs {
|
||||
let blob_index = blob.message.index;
|
||||
let verified_blob = harness
|
||||
.chain
|
||||
.verify_blob_sidecar_for_gossip(blob, blob_index)
|
||||
.unwrap();
|
||||
harness
|
||||
.chain
|
||||
.process_gossip_blob(verified_blob)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
harness
|
||||
.chain
|
||||
.process_block(
|
||||
@ -1024,7 +1224,7 @@ async fn verify_block_for_gossip_doppelganger_detection() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
let state = harness.get_current_state();
|
||||
let (block, _) = harness.make_block(state.clone(), Slot::new(1)).await;
|
||||
let ((block, _), _) = harness.make_block(state.clone(), Slot::new(1)).await;
|
||||
|
||||
let verified_block = harness
|
||||
.chain
|
||||
@ -1111,7 +1311,7 @@ async fn add_base_block_to_altair_chain() {
|
||||
// Produce an Altair block.
|
||||
let state = harness.get_current_state();
|
||||
let slot = harness.get_current_slot();
|
||||
let (altair_signed_block, _) = harness.make_block(state.clone(), slot).await;
|
||||
let ((altair_signed_block, _), _) = harness.make_block(state.clone(), slot).await;
|
||||
let altair_block = &altair_signed_block
|
||||
.as_altair()
|
||||
.expect("test expects an altair block")
|
||||
@ -1168,7 +1368,7 @@ async fn add_base_block_to_altair_chain() {
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_block_for_gossip(Arc::new(base_block.clone()))
|
||||
.verify_block_for_gossip(Arc::new(base_block.clone()).into())
|
||||
.await
|
||||
.err()
|
||||
.expect("should error when processing base block"),
|
||||
@ -1201,7 +1401,10 @@ async fn add_base_block_to_altair_chain() {
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(vec![Arc::new(base_block)], NotifyExecutionLayer::Yes,)
|
||||
.process_chain_segment(
|
||||
vec![RpcBlock::new_without_blobs(None, Arc::new(base_block))],
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.await,
|
||||
ChainSegmentResult::Failed {
|
||||
imported_blocks: 0,
|
||||
@ -1245,7 +1448,7 @@ async fn add_altair_block_to_base_chain() {
|
||||
// Produce an altair block.
|
||||
let state = harness.get_current_state();
|
||||
let slot = harness.get_current_slot();
|
||||
let (base_signed_block, _) = harness.make_block(state.clone(), slot).await;
|
||||
let ((base_signed_block, _), _) = harness.make_block(state.clone(), slot).await;
|
||||
let base_block = &base_signed_block
|
||||
.as_base()
|
||||
.expect("test expects a base block")
|
||||
@ -1303,7 +1506,7 @@ async fn add_altair_block_to_base_chain() {
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_block_for_gossip(Arc::new(altair_block.clone()))
|
||||
.verify_block_for_gossip(Arc::new(altair_block.clone()).into())
|
||||
.await
|
||||
.err()
|
||||
.expect("should error when processing altair block"),
|
||||
@ -1336,7 +1539,10 @@ async fn add_altair_block_to_base_chain() {
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(vec![Arc::new(altair_block)], NotifyExecutionLayer::Yes)
|
||||
.process_chain_segment(
|
||||
vec![RpcBlock::new_without_blobs(None, Arc::new(altair_block))],
|
||||
NotifyExecutionLayer::Yes
|
||||
)
|
||||
.await,
|
||||
ChainSegmentResult::Failed {
|
||||
imported_blocks: 0,
|
||||
@ -1386,7 +1592,8 @@ async fn import_duplicate_block_unrealized_justification() {
|
||||
// Produce a block to justify epoch 2.
|
||||
let state = harness.get_current_state();
|
||||
let slot = harness.get_current_slot();
|
||||
let (block, _) = harness.make_block(state.clone(), slot).await;
|
||||
let (block_contents, _) = harness.make_block(state.clone(), slot).await;
|
||||
let (block, _) = block_contents;
|
||||
let block = Arc::new(block);
|
||||
let block_root = block.canonical_root();
|
||||
|
||||
@ -1402,9 +1609,7 @@ async fn import_duplicate_block_unrealized_justification() {
|
||||
.unwrap();
|
||||
|
||||
// Import the first block, simulating a block processed via a finalized chain segment.
|
||||
chain
|
||||
.clone()
|
||||
.import_execution_pending_block(verified_block1)
|
||||
import_execution_pending_block(chain.clone(), verified_block1)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@ -1423,9 +1628,7 @@ async fn import_duplicate_block_unrealized_justification() {
|
||||
drop(fc);
|
||||
|
||||
// Import the second verified block, simulating a block processed via RPC.
|
||||
chain
|
||||
.clone()
|
||||
.import_execution_pending_block(verified_block2)
|
||||
import_execution_pending_block(chain.clone(), verified_block2)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@ -1444,3 +1647,23 @@ async fn import_duplicate_block_unrealized_justification() {
|
||||
Some(unrealized_justification)
|
||||
);
|
||||
}
|
||||
|
||||
async fn import_execution_pending_block<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
execution_pending_block: ExecutionPendingBlock<T>,
|
||||
) -> Result<AvailabilityProcessingStatus, String> {
|
||||
match chain
|
||||
.clone()
|
||||
.into_executed_block(execution_pending_block)
|
||||
.await
|
||||
.unwrap()
|
||||
{
|
||||
ExecutedBlock::Available(block) => chain
|
||||
.import_available_block(Box::from(block))
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}")),
|
||||
ExecutedBlock::AvailabilityPending(_) => {
|
||||
Err("AvailabilityPending not expected in this test. Block not imported.".to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
99
beacon_node/beacon_chain/tests/events.rs
Normal file
99
beacon_node/beacon_chain/tests/events.rs
Normal file
@ -0,0 +1,99 @@
|
||||
use beacon_chain::blob_verification::GossipVerifiedBlob;
|
||||
use beacon_chain::test_utils::BeaconChainHarness;
|
||||
use bls::Signature;
|
||||
use eth2::types::{EventKind, SseBlobSidecar};
|
||||
use rand::rngs::StdRng;
|
||||
use rand::SeedableRng;
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
use types::blob_sidecar::FixedBlobSidecarList;
|
||||
use types::{BlobSidecar, EthSpec, ForkName, MinimalEthSpec, SignedBlobSidecar};
|
||||
|
||||
type E = MinimalEthSpec;
|
||||
|
||||
/// Verifies that a blob event is emitted when a gossip verified blob is received via gossip or the publish block API.
|
||||
#[tokio::test]
|
||||
async fn blob_sidecar_event_on_process_gossip_blob() {
|
||||
let spec = ForkName::Deneb.make_genesis_spec(E::default_spec());
|
||||
let harness = BeaconChainHarness::builder(E::default())
|
||||
.spec(spec)
|
||||
.deterministic_keypairs(8)
|
||||
.fresh_ephemeral_store()
|
||||
.mock_execution_layer()
|
||||
.build();
|
||||
|
||||
// subscribe to blob sidecar events
|
||||
let event_handler = harness.chain.event_handler.as_ref().unwrap();
|
||||
let mut blob_event_receiver = event_handler.subscribe_blob_sidecar();
|
||||
|
||||
// build and process a gossip verified blob
|
||||
let kzg = harness.chain.kzg.as_ref().unwrap();
|
||||
let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64);
|
||||
let signed_sidecar = SignedBlobSidecar {
|
||||
message: BlobSidecar::random_valid(&mut rng, kzg)
|
||||
.map(Arc::new)
|
||||
.unwrap(),
|
||||
signature: Signature::empty(),
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
let gossip_verified_blob = GossipVerifiedBlob::__assumed_valid(signed_sidecar);
|
||||
let expected_sse_blobs = SseBlobSidecar::from_blob_sidecar(gossip_verified_blob.as_blob());
|
||||
|
||||
let _ = harness
|
||||
.chain
|
||||
.process_gossip_blob(gossip_verified_blob)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let sidecar_event = blob_event_receiver.try_recv().unwrap();
|
||||
assert_eq!(sidecar_event, EventKind::BlobSidecar(expected_sse_blobs));
|
||||
}
|
||||
|
||||
/// Verifies that a blob event is emitted when blobs are received via RPC.
|
||||
#[tokio::test]
|
||||
async fn blob_sidecar_event_on_process_rpc_blobs() {
|
||||
let spec = ForkName::Deneb.make_genesis_spec(E::default_spec());
|
||||
let harness = BeaconChainHarness::builder(E::default())
|
||||
.spec(spec)
|
||||
.deterministic_keypairs(8)
|
||||
.fresh_ephemeral_store()
|
||||
.mock_execution_layer()
|
||||
.build();
|
||||
|
||||
// subscribe to blob sidecar events
|
||||
let event_handler = harness.chain.event_handler.as_ref().unwrap();
|
||||
let mut blob_event_receiver = event_handler.subscribe_blob_sidecar();
|
||||
|
||||
// build and process multiple rpc blobs
|
||||
let kzg = harness.chain.kzg.as_ref().unwrap();
|
||||
let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64);
|
||||
|
||||
let blob_1 = BlobSidecar::random_valid(&mut rng, kzg)
|
||||
.map(Arc::new)
|
||||
.unwrap();
|
||||
let blob_2 = Arc::new(BlobSidecar {
|
||||
index: 1,
|
||||
..BlobSidecar::random_valid(&mut rng, kzg).unwrap()
|
||||
});
|
||||
let blobs = FixedBlobSidecarList::from(vec![Some(blob_1.clone()), Some(blob_2.clone())]);
|
||||
let expected_sse_blobs = vec![
|
||||
SseBlobSidecar::from_blob_sidecar(blob_1.as_ref()),
|
||||
SseBlobSidecar::from_blob_sidecar(blob_2.as_ref()),
|
||||
];
|
||||
|
||||
let _ = harness
|
||||
.chain
|
||||
.process_rpc_blobs(blob_1.slot, blob_1.block_root, blobs)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut sse_blobs: Vec<SseBlobSidecar> = vec![];
|
||||
while let Ok(sidecar_event) = blob_event_receiver.try_recv() {
|
||||
if let EventKind::BlobSidecar(sse_blob_sidecar) = sidecar_event {
|
||||
sse_blobs.push(sse_blob_sidecar);
|
||||
} else {
|
||||
panic!("`BlobSidecar` event kind expected.");
|
||||
}
|
||||
}
|
||||
assert_eq!(sse_blobs, expected_sse_blobs);
|
||||
}
|
@ -2,6 +2,7 @@ mod attestation_production;
|
||||
mod attestation_verification;
|
||||
mod block_verification;
|
||||
mod capella;
|
||||
mod events;
|
||||
mod merge;
|
||||
mod op_verification;
|
||||
mod payload_invalidation;
|
||||
|
@ -31,8 +31,16 @@ fn get_store(db_path: &TempDir) -> Arc<HotColdDB> {
|
||||
let cold_path = db_path.path().join("cold_db");
|
||||
let config = StoreConfig::default();
|
||||
let log = NullLoggerBuilder.build().expect("logger should build");
|
||||
HotColdDB::open(&hot_path, &cold_path, |_, _, _| Ok(()), config, spec, log)
|
||||
.expect("disk store should initialize")
|
||||
HotColdDB::open(
|
||||
&hot_path,
|
||||
&cold_path,
|
||||
None,
|
||||
|_, _, _| Ok(()),
|
||||
config,
|
||||
spec,
|
||||
log,
|
||||
)
|
||||
.expect("disk store should initialize")
|
||||
}
|
||||
|
||||
fn get_harness(store: Arc<HotColdDB>, validator_count: usize) -> TestHarness {
|
||||
|
@ -171,7 +171,7 @@ impl InvalidPayloadRig {
|
||||
async fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec<Hash256> {
|
||||
let mut roots = Vec::with_capacity(num_blocks as usize);
|
||||
for _ in 0..num_blocks {
|
||||
roots.push(self.import_block(is_valid.clone()).await);
|
||||
roots.push(self.import_block(is_valid).await);
|
||||
}
|
||||
roots
|
||||
}
|
||||
@ -225,7 +225,7 @@ impl InvalidPayloadRig {
|
||||
let head = self.harness.chain.head_snapshot();
|
||||
let state = head.beacon_state.clone_with_only_committee_caches();
|
||||
let slot = slot_override.unwrap_or(state.slot() + 1);
|
||||
let (block, post_state) = self.harness.make_block(state, slot).await;
|
||||
let ((block, blobs), post_state) = self.harness.make_block(state, slot).await;
|
||||
let block_root = block.canonical_root();
|
||||
|
||||
let set_new_payload = |payload: Payload| match payload {
|
||||
@ -289,7 +289,7 @@ impl InvalidPayloadRig {
|
||||
}
|
||||
let root = self
|
||||
.harness
|
||||
.process_block(slot, block.canonical_root(), block.clone())
|
||||
.process_block(slot, block.canonical_root(), (block.clone(), blobs.clone()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@ -330,7 +330,7 @@ impl InvalidPayloadRig {
|
||||
|
||||
match self
|
||||
.harness
|
||||
.process_block(slot, block.canonical_root(), block)
|
||||
.process_block(slot, block.canonical_root(), (block, blobs))
|
||||
.await
|
||||
{
|
||||
Err(error) if evaluate_error(&error) => (),
|
||||
@ -693,7 +693,8 @@ async fn invalidates_all_descendants() {
|
||||
.state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots)
|
||||
.unwrap();
|
||||
assert_eq!(fork_parent_state.slot(), fork_parent_slot);
|
||||
let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await;
|
||||
let ((fork_block, _), _fork_post_state) =
|
||||
rig.harness.make_block(fork_parent_state, fork_slot).await;
|
||||
let fork_block_root = rig
|
||||
.harness
|
||||
.chain
|
||||
@ -704,6 +705,8 @@ async fn invalidates_all_descendants() {
|
||||
|| Ok(()),
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.try_into()
|
||||
.unwrap();
|
||||
rig.recompute_head().await;
|
||||
|
||||
@ -789,7 +792,8 @@ async fn switches_heads() {
|
||||
.state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots)
|
||||
.unwrap();
|
||||
assert_eq!(fork_parent_state.slot(), fork_parent_slot);
|
||||
let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await;
|
||||
let ((fork_block, _), _fork_post_state) =
|
||||
rig.harness.make_block(fork_parent_state, fork_slot).await;
|
||||
let fork_parent_root = fork_block.parent_root();
|
||||
let fork_block_root = rig
|
||||
.harness
|
||||
@ -801,6 +805,8 @@ async fn switches_heads() {
|
||||
|| Ok(()),
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.try_into()
|
||||
.unwrap();
|
||||
rig.recompute_head().await;
|
||||
|
||||
@ -815,13 +821,16 @@ async fn switches_heads() {
|
||||
})
|
||||
.await;
|
||||
|
||||
// The fork block should become the head.
|
||||
assert_eq!(rig.harness.head_block_root(), fork_block_root);
|
||||
// NOTE: The `import_block` method above will cause the `ExecutionStatus` of the
|
||||
// `fork_block_root`'s payload to switch from `Optimistic` to `Invalid`. This means it *won't*
|
||||
// be set as head, it's parent block will instead. This is an issue with the mock EL and/or
|
||||
// the payload invalidation rig.
|
||||
assert_eq!(rig.harness.head_block_root(), fork_parent_root);
|
||||
|
||||
// The fork block has not yet been validated.
|
||||
assert!(rig
|
||||
.execution_status(fork_block_root)
|
||||
.is_strictly_optimistic());
|
||||
.is_optimistic_or_invalid());
|
||||
|
||||
for root in blocks {
|
||||
let slot = rig
|
||||
@ -1012,6 +1021,7 @@ async fn payload_preparation() {
|
||||
.unwrap(),
|
||||
fee_recipient,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
assert_eq!(rig.previous_payload_attributes(), payload_attributes);
|
||||
}
|
||||
@ -1034,8 +1044,8 @@ async fn invalid_parent() {
|
||||
// Produce another block atop the parent, but don't import yet.
|
||||
let slot = parent_block.slot() + 1;
|
||||
rig.harness.set_current_slot(slot);
|
||||
let (block, state) = rig.harness.make_block(parent_state, slot).await;
|
||||
let block = Arc::new(block);
|
||||
let (block_tuple, state) = rig.harness.make_block(parent_state, slot).await;
|
||||
let block = Arc::new(block_tuple.0);
|
||||
let block_root = block.canonical_root();
|
||||
assert_eq!(block.parent_root(), parent_root);
|
||||
|
||||
@ -1045,7 +1055,7 @@ async fn invalid_parent() {
|
||||
|
||||
// Ensure the block built atop an invalid payload is invalid for gossip.
|
||||
assert!(matches!(
|
||||
rig.harness.chain.clone().verify_block_for_gossip(block.clone()).await,
|
||||
rig.harness.chain.clone().verify_block_for_gossip(block.clone().into()).await,
|
||||
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
|
||||
if invalid_root == parent_root
|
||||
));
|
||||
@ -1428,13 +1438,13 @@ async fn build_optimistic_chain(
|
||||
.server
|
||||
.all_get_block_by_hash_requests_return_natural_value();
|
||||
|
||||
return rig;
|
||||
rig
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn optimistic_transition_block_valid_unfinalized() {
|
||||
let ttd = 42;
|
||||
let num_blocks = 16 as usize;
|
||||
let num_blocks = 16_usize;
|
||||
let rig = build_optimistic_chain(ttd, ttd, num_blocks).await;
|
||||
|
||||
let post_transition_block_root = rig
|
||||
@ -1488,7 +1498,7 @@ async fn optimistic_transition_block_valid_unfinalized() {
|
||||
#[tokio::test]
|
||||
async fn optimistic_transition_block_valid_finalized() {
|
||||
let ttd = 42;
|
||||
let num_blocks = 130 as usize;
|
||||
let num_blocks = 130_usize;
|
||||
let rig = build_optimistic_chain(ttd, ttd, num_blocks).await;
|
||||
|
||||
let post_transition_block_root = rig
|
||||
@ -1543,7 +1553,7 @@ async fn optimistic_transition_block_valid_finalized() {
|
||||
async fn optimistic_transition_block_invalid_unfinalized() {
|
||||
let block_ttd = 42;
|
||||
let rig_ttd = 1337;
|
||||
let num_blocks = 22 as usize;
|
||||
let num_blocks = 22_usize;
|
||||
let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await;
|
||||
|
||||
let post_transition_block_root = rig
|
||||
@ -1619,7 +1629,7 @@ async fn optimistic_transition_block_invalid_unfinalized() {
|
||||
async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() {
|
||||
let block_ttd = 42;
|
||||
let rig_ttd = 1337;
|
||||
let num_blocks = 22 as usize;
|
||||
let num_blocks = 22_usize;
|
||||
let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await;
|
||||
|
||||
let post_transition_block_root = rig
|
||||
@ -1732,7 +1742,7 @@ async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() {
|
||||
async fn optimistic_transition_block_invalid_finalized() {
|
||||
let block_ttd = 42;
|
||||
let rig_ttd = 1337;
|
||||
let num_blocks = 130 as usize;
|
||||
let num_blocks = 130_usize;
|
||||
let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await;
|
||||
|
||||
let post_transition_block_root = rig
|
||||
@ -1854,8 +1864,8 @@ impl InvalidHeadSetup {
|
||||
.chain
|
||||
.state_at_slot(slot - 1, StateSkipConfig::WithStateRoots)
|
||||
.unwrap();
|
||||
let (fork_block, _) = rig.harness.make_block(parent_state, slot).await;
|
||||
opt_fork_block = Some(Arc::new(fork_block));
|
||||
let (fork_block_tuple, _) = rig.harness.make_block(parent_state, slot).await;
|
||||
opt_fork_block = Some(Arc::new(fork_block_tuple.0));
|
||||
} else {
|
||||
// Skipped slot.
|
||||
};
|
||||
|
@ -1,17 +1,21 @@
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
use beacon_chain::attestation_verification::Error as AttnError;
|
||||
use beacon_chain::block_verification_types::RpcBlock;
|
||||
use beacon_chain::builder::BeaconChainBuilder;
|
||||
use beacon_chain::schema_change::migrate_schema;
|
||||
use beacon_chain::test_utils::{
|
||||
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
|
||||
mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness,
|
||||
BlockStrategy, DiskHarnessType,
|
||||
};
|
||||
use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD;
|
||||
use beacon_chain::{
|
||||
historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain,
|
||||
BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig,
|
||||
NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped,
|
||||
data_availability_checker::MaybeAvailableBlock, historical_blocks::HistoricalBlockError,
|
||||
migrate::MigratorConfig, BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot,
|
||||
BlockError, ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped,
|
||||
};
|
||||
use eth2_network_config::get_trusted_setup;
|
||||
use kzg::TrustedSetup;
|
||||
use lazy_static::lazy_static;
|
||||
use logging::test_logger;
|
||||
use maplit::hashset;
|
||||
@ -47,20 +51,28 @@ type E = MinimalEthSpec;
|
||||
type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
|
||||
|
||||
fn get_store(db_path: &TempDir) -> Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>> {
|
||||
get_store_with_spec(db_path, test_spec::<E>())
|
||||
get_store_generic(db_path, StoreConfig::default(), test_spec::<E>())
|
||||
}
|
||||
|
||||
fn get_store_with_spec(
|
||||
fn get_store_generic(
|
||||
db_path: &TempDir,
|
||||
config: StoreConfig,
|
||||
spec: ChainSpec,
|
||||
) -> Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>> {
|
||||
let hot_path = db_path.path().join("hot_db");
|
||||
let cold_path = db_path.path().join("cold_db");
|
||||
let config = StoreConfig::default();
|
||||
let log = test_logger();
|
||||
|
||||
HotColdDB::open(&hot_path, &cold_path, |_, _, _| Ok(()), config, spec, log)
|
||||
.expect("disk store should initialize")
|
||||
HotColdDB::open(
|
||||
&hot_path,
|
||||
&cold_path,
|
||||
None,
|
||||
|_, _, _| Ok(()),
|
||||
config,
|
||||
spec,
|
||||
log,
|
||||
)
|
||||
.expect("disk store should initialize")
|
||||
}
|
||||
|
||||
fn get_harness(
|
||||
@ -80,8 +92,8 @@ fn get_harness_generic(
|
||||
validator_count: usize,
|
||||
chain_config: ChainConfig,
|
||||
) -> TestHarness {
|
||||
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
||||
.default_spec()
|
||||
let harness = TestHarness::builder(MinimalEthSpec)
|
||||
.spec(store.get_chain_spec().clone())
|
||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||
.logger(store.logger().clone())
|
||||
.fresh_disk_store(store)
|
||||
@ -707,7 +719,7 @@ async fn multi_epoch_fork_valid_blocks_test(
|
||||
let store = get_store(&db_path);
|
||||
let validators_keypairs =
|
||||
types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT);
|
||||
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
||||
let harness = TestHarness::builder(MinimalEthSpec)
|
||||
.default_spec()
|
||||
.keypairs(validators_keypairs)
|
||||
.fresh_disk_store(store)
|
||||
@ -1079,7 +1091,7 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() {
|
||||
);
|
||||
}
|
||||
|
||||
assert_eq!(rig.get_finalized_checkpoints(), hashset! {},);
|
||||
assert_eq!(rig.get_finalized_checkpoints(), hashset! {});
|
||||
|
||||
assert!(rig.chain.knows_head(&stray_head));
|
||||
|
||||
@ -1106,8 +1118,11 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() {
|
||||
for &block_hash in stray_blocks.values() {
|
||||
assert!(
|
||||
!rig.block_exists(block_hash),
|
||||
"abandoned block {} should have been pruned",
|
||||
block_hash
|
||||
"abandoned block {block_hash:?} should have been pruned",
|
||||
);
|
||||
assert!(
|
||||
!rig.chain.store.blobs_exist(&block_hash.into()).unwrap(),
|
||||
"blobs for abandoned block {block_hash:?} should have been pruned"
|
||||
);
|
||||
}
|
||||
|
||||
@ -1796,6 +1811,10 @@ fn check_no_blocks_exist<'a>(
|
||||
"did not expect block {:?} to be in the DB",
|
||||
block_hash
|
||||
);
|
||||
assert!(
|
||||
!harness.chain.store.blobs_exist(&block_hash.into()).unwrap(),
|
||||
"blobs for abandoned block {block_hash:?} should have been pruned"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1997,7 +2016,7 @@ async fn garbage_collect_temp_states_from_failed_block() {
|
||||
|
||||
let genesis_state = harness.get_current_state();
|
||||
let block_slot = Slot::new(2 * slots_per_epoch);
|
||||
let (signed_block, state) = harness.make_block(genesis_state, block_slot).await;
|
||||
let ((signed_block, _), state) = harness.make_block(genesis_state, block_slot).await;
|
||||
|
||||
let (mut block, _) = signed_block.deconstruct();
|
||||
|
||||
@ -2013,7 +2032,10 @@ async fn garbage_collect_temp_states_from_failed_block() {
|
||||
|
||||
// The block should be rejected, but should store a bunch of temporary states.
|
||||
harness.set_current_slot(block_slot);
|
||||
harness.process_block_result(block).await.unwrap_err();
|
||||
harness
|
||||
.process_block_result((block, None))
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
assert_eq!(
|
||||
store.iter_temporary_state_roots().count(),
|
||||
@ -2132,6 +2154,13 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
|
||||
let store = get_store(&temp2);
|
||||
let spec = test_spec::<E>();
|
||||
let seconds_per_slot = spec.seconds_per_slot;
|
||||
let trusted_setup: TrustedSetup =
|
||||
serde_json::from_reader(get_trusted_setup::<<E as EthSpec>::Kzg>())
|
||||
.map_err(|e| println!("Unable to read trusted setup file: {}", e))
|
||||
.unwrap();
|
||||
|
||||
let mock =
|
||||
mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone(), None);
|
||||
|
||||
// Initialise a new beacon chain from the finalized checkpoint.
|
||||
// The slot clock must be set to a time ahead of the checkpoint state.
|
||||
@ -2141,28 +2170,30 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
|
||||
Duration::from_secs(seconds_per_slot),
|
||||
);
|
||||
slot_clock.set_slot(harness.get_current_slot().as_u64());
|
||||
let beacon_chain = Arc::new(
|
||||
BeaconChainBuilder::new(MinimalEthSpec)
|
||||
.store(store.clone())
|
||||
.custom_spec(test_spec::<E>())
|
||||
.task_executor(harness.chain.task_executor.clone())
|
||||
.logger(log.clone())
|
||||
.weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state)
|
||||
.unwrap()
|
||||
.store_migrator_config(MigratorConfig::default().blocking())
|
||||
.dummy_eth1_backend()
|
||||
.expect("should build dummy backend")
|
||||
.slot_clock(slot_clock)
|
||||
.shutdown_sender(shutdown_tx)
|
||||
.chain_config(ChainConfig::default())
|
||||
.event_handler(Some(ServerSentEventHandler::new_with_capacity(
|
||||
log.clone(),
|
||||
1,
|
||||
)))
|
||||
.monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log)
|
||||
.build()
|
||||
.expect("should build"),
|
||||
);
|
||||
let beacon_chain = BeaconChainBuilder::<DiskHarnessType<E>>::new(MinimalEthSpec)
|
||||
.store(store.clone())
|
||||
.custom_spec(test_spec::<E>())
|
||||
.task_executor(harness.chain.task_executor.clone())
|
||||
.logger(log.clone())
|
||||
.weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state)
|
||||
.unwrap()
|
||||
.store_migrator_config(MigratorConfig::default().blocking())
|
||||
.dummy_eth1_backend()
|
||||
.expect("should build dummy backend")
|
||||
.slot_clock(slot_clock)
|
||||
.shutdown_sender(shutdown_tx)
|
||||
.chain_config(ChainConfig::default())
|
||||
.event_handler(Some(ServerSentEventHandler::new_with_capacity(
|
||||
log.clone(),
|
||||
1,
|
||||
)))
|
||||
.execution_layer(Some(mock.el))
|
||||
.monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log)
|
||||
.trusted_setup(trusted_setup)
|
||||
.build()
|
||||
.expect("should build");
|
||||
|
||||
let beacon_chain = Arc::new(beacon_chain);
|
||||
|
||||
// Apply blocks forward to reach head.
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
@ -2171,12 +2202,14 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
|
||||
.filter(|snapshot| snapshot.beacon_block.slot() > checkpoint_slot);
|
||||
|
||||
for snapshot in new_blocks {
|
||||
let block_root = snapshot.beacon_block_root;
|
||||
let full_block = harness
|
||||
.chain
|
||||
.get_block(&snapshot.beacon_block_root)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let blobs = harness.chain.get_blobs(&block_root).expect("blobs");
|
||||
let slot = full_block.slot();
|
||||
let state_root = full_block.state_root();
|
||||
|
||||
@ -2184,7 +2217,7 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
|
||||
beacon_chain
|
||||
.process_block(
|
||||
full_block.canonical_root(),
|
||||
Arc::new(full_block),
|
||||
RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(),
|
||||
NotifyExecutionLayer::Yes,
|
||||
|| Ok(()),
|
||||
)
|
||||
@ -2229,14 +2262,38 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
|
||||
.filter(|s| s.beacon_block.slot() != 0)
|
||||
.map(|s| s.beacon_block.clone())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut available_blocks = vec![];
|
||||
for blinded in historical_blocks {
|
||||
let block_root = blinded.canonical_root();
|
||||
let full_block = harness
|
||||
.chain
|
||||
.get_block(&block_root)
|
||||
.await
|
||||
.expect("should get block")
|
||||
.expect("should get block");
|
||||
let blobs = harness.chain.get_blobs(&block_root).expect("blobs");
|
||||
|
||||
if let MaybeAvailableBlock::Available(block) = harness
|
||||
.chain
|
||||
.data_availability_checker
|
||||
.check_rpc_block_availability(
|
||||
RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(),
|
||||
)
|
||||
.expect("should check availability")
|
||||
{
|
||||
available_blocks.push(block);
|
||||
}
|
||||
}
|
||||
|
||||
beacon_chain
|
||||
.import_historical_block_batch(historical_blocks.clone())
|
||||
.import_historical_block_batch(available_blocks.clone())
|
||||
.unwrap();
|
||||
assert_eq!(beacon_chain.store.get_oldest_block_slot(), 0);
|
||||
|
||||
// Resupplying the blocks should not fail, they can be safely ignored.
|
||||
beacon_chain
|
||||
.import_historical_block_batch(historical_blocks)
|
||||
.import_historical_block_batch(available_blocks)
|
||||
.unwrap();
|
||||
|
||||
// The forwards iterator should now match the original chain
|
||||
@ -2328,10 +2385,10 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() {
|
||||
let (unadvanced_split_state, unadvanced_split_state_root) =
|
||||
harness.get_current_state_and_root();
|
||||
|
||||
let (invalid_fork_block, _) = harness
|
||||
let ((invalid_fork_block, _), _) = harness
|
||||
.make_block(unadvanced_split_state.clone(), split_slot)
|
||||
.await;
|
||||
let (valid_fork_block, _) = harness
|
||||
let ((valid_fork_block, _), _) = harness
|
||||
.make_block(unadvanced_split_state.clone(), split_slot + 1)
|
||||
.await;
|
||||
|
||||
@ -2468,12 +2525,12 @@ async fn finalizes_after_resuming_from_db() {
|
||||
|
||||
let original_chain = harness.chain;
|
||||
|
||||
let resumed_harness = BeaconChainHarness::builder(MinimalEthSpec)
|
||||
let resumed_harness = BeaconChainHarness::<DiskHarnessType<E>>::builder(MinimalEthSpec)
|
||||
.default_spec()
|
||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||
.resumed_disk_store(store)
|
||||
.testing_slot_clock(original_chain.slot_clock.clone())
|
||||
.mock_execution_layer()
|
||||
.execution_layer(original_chain.execution_layer.clone())
|
||||
.build();
|
||||
|
||||
assert_chains_pretty_much_the_same(&original_chain, &resumed_harness.chain);
|
||||
@ -2538,7 +2595,7 @@ async fn revert_minority_fork_on_resume() {
|
||||
|
||||
// Chain with no fork epoch configured.
|
||||
let db_path1 = tempdir().unwrap();
|
||||
let store1 = get_store_with_spec(&db_path1, spec1.clone());
|
||||
let store1 = get_store_generic(&db_path1, StoreConfig::default(), spec1.clone());
|
||||
let harness1 = BeaconChainHarness::builder(MinimalEthSpec)
|
||||
.spec(spec1)
|
||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||
@ -2548,7 +2605,7 @@ async fn revert_minority_fork_on_resume() {
|
||||
|
||||
// Chain with fork epoch configured.
|
||||
let db_path2 = tempdir().unwrap();
|
||||
let store2 = get_store_with_spec(&db_path2, spec2.clone());
|
||||
let store2 = get_store_generic(&db_path2, StoreConfig::default(), spec2.clone());
|
||||
let harness2 = BeaconChainHarness::builder(MinimalEthSpec)
|
||||
.spec(spec2.clone())
|
||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||
@ -2574,14 +2631,14 @@ async fn revert_minority_fork_on_resume() {
|
||||
harness1.process_attestations(attestations.clone());
|
||||
harness2.process_attestations(attestations);
|
||||
|
||||
let (block, new_state) = harness1.make_block(state, slot).await;
|
||||
let ((block, blobs), new_state) = harness1.make_block(state, slot).await;
|
||||
|
||||
harness1
|
||||
.process_block(slot, block.canonical_root(), block.clone())
|
||||
.process_block(slot, block.canonical_root(), (block.clone(), blobs.clone()))
|
||||
.await
|
||||
.unwrap();
|
||||
harness2
|
||||
.process_block(slot, block.canonical_root(), block.clone())
|
||||
.process_block(slot, block.canonical_root(), (block.clone(), blobs.clone()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@ -2615,17 +2672,17 @@ async fn revert_minority_fork_on_resume() {
|
||||
harness2.process_attestations(attestations);
|
||||
|
||||
// Minority chain block (no attesters).
|
||||
let (block1, new_state1) = harness1.make_block(state1, slot).await;
|
||||
let ((block1, blobs1), new_state1) = harness1.make_block(state1, slot).await;
|
||||
harness1
|
||||
.process_block(slot, block1.canonical_root(), block1)
|
||||
.process_block(slot, block1.canonical_root(), (block1, blobs1))
|
||||
.await
|
||||
.unwrap();
|
||||
state1 = new_state1;
|
||||
|
||||
// Majority chain block (all attesters).
|
||||
let (block2, new_state2) = harness2.make_block(state2, slot).await;
|
||||
let ((block2, blobs2), new_state2) = harness2.make_block(state2, slot).await;
|
||||
harness2
|
||||
.process_block(slot, block2.canonical_root(), block2.clone())
|
||||
.process_block(slot, block2.canonical_root(), (block2.clone(), blobs2))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@ -2643,9 +2700,9 @@ async fn revert_minority_fork_on_resume() {
|
||||
// We have to do some hackery with the `slot_clock` so that the correct slot is set when
|
||||
// the beacon chain builder loads the head block.
|
||||
drop(harness1);
|
||||
let resume_store = get_store_with_spec(&db_path1, spec2.clone());
|
||||
let resume_store = get_store_generic(&db_path1, StoreConfig::default(), spec2.clone());
|
||||
|
||||
let resumed_harness = BeaconChainHarness::builder(MinimalEthSpec)
|
||||
let resumed_harness = TestHarness::builder(MinimalEthSpec)
|
||||
.spec(spec2)
|
||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||
.resumed_disk_store(resume_store)
|
||||
@ -2678,7 +2735,7 @@ async fn revert_minority_fork_on_resume() {
|
||||
let initial_split_slot = resumed_harness.chain.store.get_split_slot();
|
||||
for block in &majority_blocks {
|
||||
resumed_harness
|
||||
.process_block_result(block.clone())
|
||||
.process_block_result((block.clone(), None))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@ -2718,9 +2775,11 @@ async fn schema_downgrade_to_min_version() {
|
||||
)
|
||||
.await;
|
||||
|
||||
let min_version = if harness.spec.capella_fork_epoch.is_some() {
|
||||
// Can't downgrade beyond V14 once Capella is reached, for simplicity don't test that
|
||||
// at all if Capella is enabled.
|
||||
let min_version = if harness.spec.deneb_fork_epoch.is_some() {
|
||||
// Can't downgrade beyond V18 once Deneb is reached, for simplicity don't test that
|
||||
// at all if Deneb is enabled.
|
||||
SchemaVersion(18)
|
||||
} else if harness.spec.capella_fork_epoch.is_some() {
|
||||
SchemaVersion(14)
|
||||
} else {
|
||||
SchemaVersion(11)
|
||||
@ -2760,15 +2819,6 @@ async fn schema_downgrade_to_min_version() {
|
||||
.expect("schema upgrade from minimum version should work");
|
||||
|
||||
// Recreate the harness.
|
||||
/*
|
||||
let slot_clock = TestingSlotClock::new(
|
||||
Slot::new(0),
|
||||
Duration::from_secs(harness.chain.genesis_time),
|
||||
Duration::from_secs(spec.seconds_per_slot),
|
||||
);
|
||||
slot_clock.set_slot(harness.get_current_slot().as_u64());
|
||||
*/
|
||||
|
||||
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
||||
.default_spec()
|
||||
.keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec())
|
||||
@ -2796,6 +2846,278 @@ async fn schema_downgrade_to_min_version() {
|
||||
.expect_err("should not downgrade below minimum version");
|
||||
}
|
||||
|
||||
/// Check that blob pruning prunes blobs older than the data availability boundary.
|
||||
#[tokio::test]
|
||||
async fn deneb_prune_blobs_happy_case() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
|
||||
let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else {
|
||||
// No-op prior to Deneb.
|
||||
return;
|
||||
};
|
||||
let deneb_fork_slot = deneb_fork_epoch.start_slot(E::slots_per_epoch());
|
||||
|
||||
let num_blocks_produced = E::slots_per_epoch() * 8;
|
||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||
|
||||
harness
|
||||
.extend_chain(
|
||||
num_blocks_produced as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Prior to manual pruning with an artifically low data availability boundary all blobs should
|
||||
// be stored.
|
||||
assert_eq!(
|
||||
store.get_blob_info().oldest_blob_slot,
|
||||
Some(deneb_fork_slot)
|
||||
);
|
||||
check_blob_existence(&harness, Slot::new(1), harness.head_slot(), true);
|
||||
|
||||
// Trigger blob pruning of blobs older than epoch 2.
|
||||
let data_availability_boundary = Epoch::new(2);
|
||||
store
|
||||
.try_prune_blobs(true, data_availability_boundary)
|
||||
.unwrap();
|
||||
|
||||
// Check oldest blob slot is updated accordingly and prior blobs have been deleted.
|
||||
let oldest_blob_slot = store.get_blob_info().oldest_blob_slot.unwrap();
|
||||
assert_eq!(
|
||||
oldest_blob_slot,
|
||||
data_availability_boundary.start_slot(E::slots_per_epoch())
|
||||
);
|
||||
check_blob_existence(&harness, Slot::new(0), oldest_blob_slot - 1, false);
|
||||
check_blob_existence(&harness, oldest_blob_slot, harness.head_slot(), true);
|
||||
}
|
||||
|
||||
/// Check that blob pruning does not prune without finalization.
|
||||
#[tokio::test]
|
||||
async fn deneb_prune_blobs_no_finalization() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
|
||||
let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else {
|
||||
// No-op prior to Deneb.
|
||||
return;
|
||||
};
|
||||
let deneb_fork_slot = deneb_fork_epoch.start_slot(E::slots_per_epoch());
|
||||
|
||||
let initial_num_blocks = E::slots_per_epoch() * 5;
|
||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||
|
||||
// Finalize to epoch 3.
|
||||
harness
|
||||
.extend_chain(
|
||||
initial_num_blocks as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Extend the chain for another few epochs without attestations.
|
||||
let unfinalized_num_blocks = E::slots_per_epoch() * 3;
|
||||
harness.advance_slot();
|
||||
harness
|
||||
.extend_chain(
|
||||
unfinalized_num_blocks as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::SomeValidators(vec![]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Finalization should be at epoch 3.
|
||||
let finalized_slot = Slot::new(E::slots_per_epoch() * 3);
|
||||
assert_eq!(harness.get_current_state().finalized_checkpoint().epoch, 3);
|
||||
assert_eq!(store.get_split_slot(), finalized_slot);
|
||||
|
||||
// All blobs should still be available.
|
||||
assert_eq!(
|
||||
store.get_blob_info().oldest_blob_slot,
|
||||
Some(deneb_fork_slot)
|
||||
);
|
||||
check_blob_existence(&harness, Slot::new(0), harness.head_slot(), true);
|
||||
|
||||
// Attempt blob pruning of blobs older than epoch 4, which is newer than finalization.
|
||||
let data_availability_boundary = Epoch::new(4);
|
||||
store
|
||||
.try_prune_blobs(true, data_availability_boundary)
|
||||
.unwrap();
|
||||
|
||||
// Check oldest blob slot is only updated to finalization, and NOT to the DAB.
|
||||
let oldest_blob_slot = store.get_blob_info().oldest_blob_slot.unwrap();
|
||||
assert_eq!(oldest_blob_slot, finalized_slot);
|
||||
check_blob_existence(&harness, Slot::new(0), finalized_slot - 1, false);
|
||||
check_blob_existence(&harness, finalized_slot, harness.head_slot(), true);
|
||||
}
|
||||
|
||||
/// Check that blob pruning does not fail trying to prune across the fork boundary.
|
||||
#[tokio::test]
|
||||
async fn deneb_prune_blobs_fork_boundary() {
|
||||
let deneb_fork_epoch = Epoch::new(4);
|
||||
let mut spec = ForkName::Capella.make_genesis_spec(E::default_spec());
|
||||
spec.deneb_fork_epoch = Some(deneb_fork_epoch);
|
||||
let deneb_fork_slot = deneb_fork_epoch.start_slot(E::slots_per_epoch());
|
||||
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store_generic(&db_path, StoreConfig::default(), spec);
|
||||
|
||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||
|
||||
let num_blocks = E::slots_per_epoch() * 7;
|
||||
|
||||
// Finalize to epoch 5.
|
||||
harness
|
||||
.extend_chain(
|
||||
num_blocks as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Finalization should be at epoch 5.
|
||||
let finalized_epoch = Epoch::new(5);
|
||||
let finalized_slot = finalized_epoch.start_slot(E::slots_per_epoch());
|
||||
assert_eq!(
|
||||
harness.get_current_state().finalized_checkpoint().epoch,
|
||||
finalized_epoch
|
||||
);
|
||||
assert_eq!(store.get_split_slot(), finalized_slot);
|
||||
|
||||
// All blobs should still be available.
|
||||
assert_eq!(
|
||||
store.get_blob_info().oldest_blob_slot,
|
||||
Some(deneb_fork_slot)
|
||||
);
|
||||
check_blob_existence(&harness, Slot::new(0), harness.head_slot(), true);
|
||||
|
||||
// Attempt pruning with data availability epochs that precede the fork epoch.
|
||||
// No pruning should occur.
|
||||
assert!(deneb_fork_epoch < finalized_epoch);
|
||||
for data_availability_boundary in [Epoch::new(0), Epoch::new(3), deneb_fork_epoch] {
|
||||
store
|
||||
.try_prune_blobs(true, data_availability_boundary)
|
||||
.unwrap();
|
||||
|
||||
// Check oldest blob slot is not updated.
|
||||
assert_eq!(
|
||||
store.get_blob_info().oldest_blob_slot,
|
||||
Some(deneb_fork_slot)
|
||||
);
|
||||
}
|
||||
// All blobs should still be available.
|
||||
check_blob_existence(&harness, Slot::new(0), harness.head_slot(), true);
|
||||
|
||||
// Prune one epoch past the fork.
|
||||
let pruned_slot = (deneb_fork_epoch + 1).start_slot(E::slots_per_epoch());
|
||||
store.try_prune_blobs(true, deneb_fork_epoch + 1).unwrap();
|
||||
assert_eq!(store.get_blob_info().oldest_blob_slot, Some(pruned_slot));
|
||||
check_blob_existence(&harness, Slot::new(0), pruned_slot - 1, false);
|
||||
check_blob_existence(&harness, pruned_slot, harness.head_slot(), true);
|
||||
}
|
||||
|
||||
/// Check that blob pruning prunes blobs older than the data availability boundary with margin
|
||||
/// applied.
|
||||
#[tokio::test]
|
||||
async fn deneb_prune_blobs_margin1() {
|
||||
deneb_prune_blobs_margin_test(1).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn deneb_prune_blobs_margin3() {
|
||||
deneb_prune_blobs_margin_test(3).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn deneb_prune_blobs_margin4() {
|
||||
deneb_prune_blobs_margin_test(4).await;
|
||||
}
|
||||
|
||||
async fn deneb_prune_blobs_margin_test(margin: u64) {
|
||||
let config = StoreConfig {
|
||||
blob_prune_margin_epochs: margin,
|
||||
..StoreConfig::default()
|
||||
};
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store_generic(&db_path, config, test_spec::<E>());
|
||||
|
||||
let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else {
|
||||
// No-op prior to Deneb.
|
||||
return;
|
||||
};
|
||||
let deneb_fork_slot = deneb_fork_epoch.start_slot(E::slots_per_epoch());
|
||||
|
||||
let num_blocks_produced = E::slots_per_epoch() * 8;
|
||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||
|
||||
harness
|
||||
.extend_chain(
|
||||
num_blocks_produced as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Prior to manual pruning with an artifically low data availability boundary all blobs should
|
||||
// be stored.
|
||||
assert_eq!(
|
||||
store.get_blob_info().oldest_blob_slot,
|
||||
Some(deneb_fork_slot)
|
||||
);
|
||||
check_blob_existence(&harness, Slot::new(1), harness.head_slot(), true);
|
||||
|
||||
// Trigger blob pruning of blobs older than epoch 6 - margin (6 is the minimum, due to
|
||||
// finalization).
|
||||
let data_availability_boundary = Epoch::new(6);
|
||||
let effective_data_availability_boundary =
|
||||
data_availability_boundary - store.get_config().blob_prune_margin_epochs;
|
||||
assert!(
|
||||
effective_data_availability_boundary > 0,
|
||||
"must be > 0 because epoch 0 won't get pruned alone"
|
||||
);
|
||||
store
|
||||
.try_prune_blobs(true, data_availability_boundary)
|
||||
.unwrap();
|
||||
|
||||
// Check oldest blob slot is updated accordingly and prior blobs have been deleted.
|
||||
let oldest_blob_slot = store.get_blob_info().oldest_blob_slot.unwrap();
|
||||
assert_eq!(
|
||||
oldest_blob_slot,
|
||||
effective_data_availability_boundary.start_slot(E::slots_per_epoch())
|
||||
);
|
||||
check_blob_existence(&harness, Slot::new(0), oldest_blob_slot - 1, false);
|
||||
check_blob_existence(&harness, oldest_blob_slot, harness.head_slot(), true);
|
||||
}
|
||||
|
||||
/// Check that there are blob sidecars (or not) at every slot in the range.
|
||||
fn check_blob_existence(
|
||||
harness: &TestHarness,
|
||||
start_slot: Slot,
|
||||
end_slot: Slot,
|
||||
should_exist: bool,
|
||||
) {
|
||||
let mut blobs_seen = 0;
|
||||
for (block_root, slot) in harness
|
||||
.chain
|
||||
.forwards_iter_block_roots_until(start_slot, end_slot)
|
||||
.unwrap()
|
||||
.map(Result::unwrap)
|
||||
{
|
||||
if let Some(blobs) = harness.chain.store.get_blobs(&block_root).unwrap() {
|
||||
assert!(should_exist, "blobs at slot {slot} exist but should not");
|
||||
blobs_seen += blobs.len();
|
||||
} else {
|
||||
// We don't actually store empty blobs, so unfortunately we can't assert anything
|
||||
// meaningful here (like asserting that the blob should not exist).
|
||||
}
|
||||
}
|
||||
if should_exist {
|
||||
assert_ne!(blobs_seen, 0, "expected non-zero number of blobs");
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks that two chains are the same, for the purpose of these tests.
|
||||
///
|
||||
/// Several fields that are hard/impossible to check are ignored (e.g., the store).
|
||||
|
@ -684,19 +684,20 @@ async fn run_skip_slot_test(skip_slots: u64) {
|
||||
Slot::new(0)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
harness_b
|
||||
.chain
|
||||
.process_block(
|
||||
harness_a.chain.head_snapshot().beacon_block_root,
|
||||
harness_a.chain.head_snapshot().beacon_block.clone(),
|
||||
NotifyExecutionLayer::Yes,
|
||||
|| Ok(())
|
||||
)
|
||||
.await
|
||||
.unwrap(),
|
||||
harness_a.chain.head_snapshot().beacon_block_root
|
||||
);
|
||||
let status = harness_b
|
||||
.chain
|
||||
.process_block(
|
||||
harness_a.chain.head_snapshot().beacon_block_root,
|
||||
harness_a.get_head_block(),
|
||||
NotifyExecutionLayer::Yes,
|
||||
|| Ok(()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let root: Hash256 = status.try_into().unwrap();
|
||||
|
||||
assert_eq!(root, harness_a.chain.head_snapshot().beacon_block_root);
|
||||
|
||||
harness_b.chain.recompute_head_at_current_slot().await;
|
||||
|
||||
|
@ -39,13 +39,11 @@
|
||||
//! task.
|
||||
|
||||
use crate::work_reprocessing_queue::{
|
||||
spawn_reprocess_scheduler, QueuedAggregate, QueuedBackfillBatch, QueuedGossipBlock,
|
||||
QueuedLightClientUpdate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, ReprocessQueueMessage,
|
||||
QueuedBackfillBatch, QueuedGossipBlock, ReprocessQueueMessage,
|
||||
};
|
||||
use futures::stream::{Stream, StreamExt};
|
||||
use futures::task::Poll;
|
||||
use lighthouse_network::NetworkGlobals;
|
||||
use lighthouse_network::{MessageId, PeerId};
|
||||
use lighthouse_network::{MessageId, NetworkGlobals, PeerId};
|
||||
use logging::TimeLatch;
|
||||
use parking_lot::Mutex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@ -62,8 +60,14 @@ use std::time::Duration;
|
||||
use task_executor::TaskExecutor;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::mpsc::error::TrySendError;
|
||||
use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, Slot, SubnetId};
|
||||
use types::consts::deneb::MAX_BLOBS_PER_BLOCK;
|
||||
use types::{Attestation, Hash256, SignedAggregateAndProof, SubnetId};
|
||||
use types::{EthSpec, Slot};
|
||||
use work_reprocessing_queue::IgnoredRpcBlock;
|
||||
use work_reprocessing_queue::{
|
||||
spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock,
|
||||
QueuedUnaggregate, ReadyWork,
|
||||
};
|
||||
|
||||
mod metrics;
|
||||
pub mod work_reprocessing_queue;
|
||||
@ -102,6 +106,10 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024;
|
||||
/// before we start dropping them.
|
||||
const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024;
|
||||
|
||||
/// The maximum number of queued `SignedBlobSidecar` objects received on gossip that
|
||||
/// will be stored before we start dropping them.
|
||||
const MAX_GOSSIP_BLOB_QUEUE_LEN: usize = 1_024;
|
||||
|
||||
/// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but
|
||||
/// within acceptable clock disparity) that will be queued before we start dropping them.
|
||||
const MAX_DELAYED_BLOCK_QUEUE_LEN: usize = 1_024;
|
||||
@ -142,6 +150,10 @@ const MAX_SYNC_CONTRIBUTION_QUEUE_LEN: usize = 1024;
|
||||
/// will be stored before we start dropping them.
|
||||
const MAX_RPC_BLOCK_QUEUE_LEN: usize = 1_024;
|
||||
|
||||
/// The maximum number of queued `BlobSidecar` objects received from the network RPC that
|
||||
/// will be stored before we start dropping them.
|
||||
const MAX_RPC_BLOB_QUEUE_LEN: usize = 1_024;
|
||||
|
||||
/// The maximum number of queued `Vec<SignedBeaconBlock>` objects received during syncing that will
|
||||
/// be stored before we start dropping them.
|
||||
const MAX_CHAIN_SEGMENT_QUEUE_LEN: usize = 64;
|
||||
@ -154,10 +166,19 @@ const MAX_STATUS_QUEUE_LEN: usize = 1_024;
|
||||
/// will be stored before we start dropping them.
|
||||
const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024;
|
||||
|
||||
/// The maximum number of queued `BlobsByRangeRequest` objects received from the network RPC that
|
||||
/// will be stored before we start dropping them.
|
||||
const MAX_BLOBS_BY_RANGE_QUEUE_LEN: usize =
|
||||
MAX_BLOCKS_BY_RANGE_QUEUE_LEN * MAX_BLOBS_PER_BLOCK as usize;
|
||||
|
||||
/// The maximum number of queued `BlocksByRootRequest` objects received from the network RPC that
|
||||
/// will be stored before we start dropping them.
|
||||
const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024;
|
||||
|
||||
/// The maximum number of queued `BlobsByRootRequest` objects received from the network RPC that
|
||||
/// will be stored before we start dropping them.
|
||||
const MAX_BLOBS_BY_ROOTS_QUEUE_LEN: usize = 1_024;
|
||||
|
||||
/// Maximum number of `SignedBlsToExecutionChange` messages to queue before dropping them.
|
||||
///
|
||||
/// This value is set high to accommodate the large spike that is expected immediately after Capella
|
||||
@ -204,6 +225,7 @@ pub const GOSSIP_ATTESTATION_BATCH: &str = "gossip_attestation_batch";
|
||||
pub const GOSSIP_AGGREGATE: &str = "gossip_aggregate";
|
||||
pub const GOSSIP_AGGREGATE_BATCH: &str = "gossip_aggregate_batch";
|
||||
pub const GOSSIP_BLOCK: &str = "gossip_block";
|
||||
pub const GOSSIP_BLOBS_SIDECAR: &str = "gossip_blobs_sidecar";
|
||||
pub const DELAYED_IMPORT_BLOCK: &str = "delayed_import_block";
|
||||
pub const GOSSIP_VOLUNTARY_EXIT: &str = "gossip_voluntary_exit";
|
||||
pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing";
|
||||
@ -214,11 +236,14 @@ pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_upd
|
||||
pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update";
|
||||
pub const RPC_BLOCK: &str = "rpc_block";
|
||||
pub const IGNORED_RPC_BLOCK: &str = "ignored_rpc_block";
|
||||
pub const RPC_BLOBS: &str = "rpc_blob";
|
||||
pub const CHAIN_SEGMENT: &str = "chain_segment";
|
||||
pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill";
|
||||
pub const STATUS_PROCESSING: &str = "status_processing";
|
||||
pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request";
|
||||
pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request";
|
||||
pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request";
|
||||
pub const BLOBS_BY_ROOTS_REQUEST: &str = "blobs_by_roots_request";
|
||||
pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap";
|
||||
pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation";
|
||||
pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate";
|
||||
@ -566,6 +591,7 @@ pub enum Work<E: EthSpec> {
|
||||
process_batch: Box<dyn FnOnce(Vec<GossipAggregatePackage<E>>) + Send + Sync>,
|
||||
},
|
||||
GossipBlock(AsyncFn),
|
||||
GossipSignedBlobSidecar(AsyncFn),
|
||||
DelayedImportBlock {
|
||||
beacon_block_slot: Slot,
|
||||
beacon_block_root: Hash256,
|
||||
@ -581,6 +607,9 @@ pub enum Work<E: EthSpec> {
|
||||
RpcBlock {
|
||||
process_fn: AsyncFn,
|
||||
},
|
||||
RpcBlobs {
|
||||
process_fn: AsyncFn,
|
||||
},
|
||||
IgnoredRpcBlock {
|
||||
process_fn: BlockingFn,
|
||||
},
|
||||
@ -589,6 +618,8 @@ pub enum Work<E: EthSpec> {
|
||||
Status(BlockingFn),
|
||||
BlocksByRangeRequest(BlockingFnWithManualSendOnIdle),
|
||||
BlocksByRootsRequest(BlockingFnWithManualSendOnIdle),
|
||||
BlobsByRangeRequest(BlockingFn),
|
||||
BlobsByRootsRequest(BlockingFn),
|
||||
GossipBlsToExecutionChange(BlockingFn),
|
||||
LightClientBootstrapRequest(BlockingFn),
|
||||
ApiRequestP0(BlockingOrAsync),
|
||||
@ -610,6 +641,7 @@ impl<E: EthSpec> Work<E> {
|
||||
Work::GossipAggregate { .. } => GOSSIP_AGGREGATE,
|
||||
Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH,
|
||||
Work::GossipBlock(_) => GOSSIP_BLOCK,
|
||||
Work::GossipSignedBlobSidecar(_) => GOSSIP_BLOBS_SIDECAR,
|
||||
Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK,
|
||||
Work::GossipVoluntaryExit(_) => GOSSIP_VOLUNTARY_EXIT,
|
||||
Work::GossipProposerSlashing(_) => GOSSIP_PROPOSER_SLASHING,
|
||||
@ -619,12 +651,15 @@ impl<E: EthSpec> Work<E> {
|
||||
Work::GossipLightClientFinalityUpdate(_) => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE,
|
||||
Work::GossipLightClientOptimisticUpdate(_) => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE,
|
||||
Work::RpcBlock { .. } => RPC_BLOCK,
|
||||
Work::RpcBlobs { .. } => RPC_BLOBS,
|
||||
Work::IgnoredRpcBlock { .. } => IGNORED_RPC_BLOCK,
|
||||
Work::ChainSegment { .. } => CHAIN_SEGMENT,
|
||||
Work::ChainSegmentBackfill(_) => CHAIN_SEGMENT_BACKFILL,
|
||||
Work::Status(_) => STATUS_PROCESSING,
|
||||
Work::BlocksByRangeRequest(_) => BLOCKS_BY_RANGE_REQUEST,
|
||||
Work::BlocksByRootsRequest(_) => BLOCKS_BY_ROOTS_REQUEST,
|
||||
Work::BlobsByRangeRequest(_) => BLOBS_BY_RANGE_REQUEST,
|
||||
Work::BlobsByRootsRequest(_) => BLOBS_BY_ROOTS_REQUEST,
|
||||
Work::LightClientBootstrapRequest(_) => LIGHT_CLIENT_BOOTSTRAP_REQUEST,
|
||||
Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION,
|
||||
Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE,
|
||||
@ -771,14 +806,18 @@ impl<E: EthSpec> BeaconProcessor<E> {
|
||||
|
||||
// Using a FIFO queue since blocks need to be imported sequentially.
|
||||
let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN);
|
||||
let mut rpc_blob_queue = FifoQueue::new(MAX_RPC_BLOB_QUEUE_LEN);
|
||||
let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN);
|
||||
let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN);
|
||||
let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN);
|
||||
let mut gossip_blob_queue = FifoQueue::new(MAX_GOSSIP_BLOB_QUEUE_LEN);
|
||||
let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN);
|
||||
|
||||
let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN);
|
||||
let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN);
|
||||
let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN);
|
||||
let mut blbroots_queue = FifoQueue::new(MAX_BLOBS_BY_ROOTS_QUEUE_LEN);
|
||||
let mut blbrange_queue = FifoQueue::new(MAX_BLOBS_BY_RANGE_QUEUE_LEN);
|
||||
|
||||
let mut gossip_bls_to_execution_change_queue =
|
||||
FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN);
|
||||
@ -915,6 +954,8 @@ impl<E: EthSpec> BeaconProcessor<E> {
|
||||
// requested these blocks.
|
||||
} else if let Some(item) = rpc_block_queue.pop() {
|
||||
self.spawn_worker(item, idle_tx);
|
||||
} else if let Some(item) = rpc_blob_queue.pop() {
|
||||
self.spawn_worker(item, idle_tx);
|
||||
// Check delayed blocks before gossip blocks, the gossip blocks might rely
|
||||
// on the delayed ones.
|
||||
} else if let Some(item) = delayed_block_queue.pop() {
|
||||
@ -923,7 +964,9 @@ impl<E: EthSpec> BeaconProcessor<E> {
|
||||
// required to verify some attestations.
|
||||
} else if let Some(item) = gossip_block_queue.pop() {
|
||||
self.spawn_worker(item, idle_tx);
|
||||
// Check the priority 0 API requests after blocks, but before attestations.
|
||||
} else if let Some(item) = gossip_blob_queue.pop() {
|
||||
self.spawn_worker(item, idle_tx);
|
||||
// Check the priority 0 API requests after blocks and blobs, but before attestations.
|
||||
} else if let Some(item) = api_request_p0_queue.pop() {
|
||||
self.spawn_worker(item, idle_tx);
|
||||
// Check the aggregates, *then* the unaggregates since we assume that
|
||||
@ -1068,6 +1111,10 @@ impl<E: EthSpec> BeaconProcessor<E> {
|
||||
self.spawn_worker(item, idle_tx);
|
||||
} else if let Some(item) = bbroots_queue.pop() {
|
||||
self.spawn_worker(item, idle_tx);
|
||||
} else if let Some(item) = blbrange_queue.pop() {
|
||||
self.spawn_worker(item, idle_tx);
|
||||
} else if let Some(item) = blbroots_queue.pop() {
|
||||
self.spawn_worker(item, idle_tx);
|
||||
// Check slashings after all other consensus messages so we prioritize
|
||||
// following head.
|
||||
//
|
||||
@ -1158,6 +1205,9 @@ impl<E: EthSpec> BeaconProcessor<E> {
|
||||
Work::GossipBlock { .. } => {
|
||||
gossip_block_queue.push(work, work_id, &self.log)
|
||||
}
|
||||
Work::GossipSignedBlobSidecar { .. } => {
|
||||
gossip_blob_queue.push(work, work_id, &self.log)
|
||||
}
|
||||
Work::DelayedImportBlock { .. } => {
|
||||
delayed_block_queue.push(work, work_id, &self.log)
|
||||
}
|
||||
@ -1183,6 +1233,7 @@ impl<E: EthSpec> BeaconProcessor<E> {
|
||||
Work::RpcBlock { .. } | Work::IgnoredRpcBlock { .. } => {
|
||||
rpc_block_queue.push(work, work_id, &self.log)
|
||||
}
|
||||
Work::RpcBlobs { .. } => rpc_blob_queue.push(work, work_id, &self.log),
|
||||
Work::ChainSegment { .. } => {
|
||||
chain_segment_queue.push(work, work_id, &self.log)
|
||||
}
|
||||
@ -1196,6 +1247,9 @@ impl<E: EthSpec> BeaconProcessor<E> {
|
||||
Work::BlocksByRootsRequest { .. } => {
|
||||
bbroots_queue.push(work, work_id, &self.log)
|
||||
}
|
||||
Work::BlobsByRangeRequest { .. } => {
|
||||
blbrange_queue.push(work, work_id, &self.log)
|
||||
}
|
||||
Work::LightClientBootstrapRequest { .. } => {
|
||||
lcbootstrap_queue.push(work, work_id, &self.log)
|
||||
}
|
||||
@ -1208,6 +1262,9 @@ impl<E: EthSpec> BeaconProcessor<E> {
|
||||
Work::GossipBlsToExecutionChange { .. } => {
|
||||
gossip_bls_to_execution_change_queue.push(work, work_id, &self.log)
|
||||
}
|
||||
Work::BlobsByRootsRequest { .. } => {
|
||||
blbroots_queue.push(work, work_id, &self.log)
|
||||
}
|
||||
Work::UnknownLightClientOptimisticUpdate { .. } => {
|
||||
unknown_light_client_update_queue.push(work, work_id, &self.log)
|
||||
}
|
||||
@ -1245,10 +1302,18 @@ impl<E: EthSpec> BeaconProcessor<E> {
|
||||
&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL,
|
||||
gossip_block_queue.len() as i64,
|
||||
);
|
||||
metrics::set_gauge(
|
||||
&metrics::BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL,
|
||||
gossip_block_queue.len() as i64,
|
||||
);
|
||||
metrics::set_gauge(
|
||||
&metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL,
|
||||
rpc_block_queue.len() as i64,
|
||||
);
|
||||
metrics::set_gauge(
|
||||
&metrics::BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL,
|
||||
rpc_blob_queue.len() as i64,
|
||||
);
|
||||
metrics::set_gauge(
|
||||
&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL,
|
||||
chain_segment_queue.len() as i64,
|
||||
@ -1388,11 +1453,17 @@ impl<E: EthSpec> BeaconProcessor<E> {
|
||||
beacon_block_root: _,
|
||||
process_fn,
|
||||
} => task_spawner.spawn_async(process_fn),
|
||||
Work::RpcBlock { process_fn } => task_spawner.spawn_async(process_fn),
|
||||
Work::RpcBlock { process_fn } | Work::RpcBlobs { process_fn } => {
|
||||
task_spawner.spawn_async(process_fn)
|
||||
}
|
||||
Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn),
|
||||
Work::GossipBlock(work) => task_spawner.spawn_async(async move {
|
||||
work.await;
|
||||
}),
|
||||
Work::GossipBlock(work) | Work::GossipSignedBlobSidecar(work) => task_spawner
|
||||
.spawn_async(async move {
|
||||
work.await;
|
||||
}),
|
||||
Work::BlobsByRangeRequest(process_fn) | Work::BlobsByRootsRequest(process_fn) => {
|
||||
task_spawner.spawn_blocking(process_fn)
|
||||
}
|
||||
Work::BlocksByRangeRequest(work) | Work::BlocksByRootsRequest(work) => {
|
||||
task_spawner.spawn_blocking_with_manual_send_idle(work)
|
||||
}
|
||||
|
@ -46,6 +46,11 @@ lazy_static::lazy_static! {
|
||||
"beacon_processor_gossip_block_queue_total",
|
||||
"Count of blocks from gossip waiting to be verified."
|
||||
);
|
||||
// Gossip blobs.
|
||||
pub static ref BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
|
||||
"beacon_processor_gossip_blob_queue_total",
|
||||
"Count of blocks from gossip waiting to be verified."
|
||||
);
|
||||
// Gossip Exits.
|
||||
pub static ref BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
|
||||
"beacon_processor_exit_queue_total",
|
||||
@ -71,6 +76,11 @@ lazy_static::lazy_static! {
|
||||
"beacon_processor_rpc_block_queue_total",
|
||||
"Count of blocks from the rpc waiting to be verified."
|
||||
);
|
||||
// Rpc blobs.
|
||||
pub static ref BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
|
||||
"beacon_processor_rpc_blob_queue_total",
|
||||
"Count of blobs from the rpc waiting to be verified."
|
||||
);
|
||||
// Chain segments.
|
||||
pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
|
||||
"beacon_processor_chain_segment_queue_total",
|
||||
|
@ -1,8 +1,8 @@
|
||||
use eth2::types::builder_bid::SignedBuilderBid;
|
||||
use eth2::types::FullPayloadContents;
|
||||
use eth2::types::{
|
||||
AbstractExecPayload, BlindedPayload, EthSpec, ExecutionBlockHash, ExecutionPayload,
|
||||
ForkVersionedResponse, PublicKeyBytes, SignedBeaconBlock, SignedValidatorRegistrationData,
|
||||
Slot,
|
||||
BlindedPayload, EthSpec, ExecutionBlockHash, ForkVersionedResponse, PublicKeyBytes,
|
||||
SignedBlockContents, SignedValidatorRegistrationData, Slot,
|
||||
};
|
||||
pub use eth2::Error;
|
||||
use eth2::{ok_or_error, StatusCode};
|
||||
@ -140,8 +140,8 @@ impl BuilderHttpClient {
|
||||
/// `POST /eth/v1/builder/blinded_blocks`
|
||||
pub async fn post_builder_blinded_blocks<E: EthSpec>(
|
||||
&self,
|
||||
blinded_block: &SignedBeaconBlock<E, BlindedPayload<E>>,
|
||||
) -> Result<ForkVersionedResponse<ExecutionPayload<E>>, Error> {
|
||||
blinded_block: &SignedBlockContents<E, BlindedPayload<E>>,
|
||||
) -> Result<ForkVersionedResponse<FullPayloadContents<E>>, Error> {
|
||||
let mut path = self.server.full.clone();
|
||||
|
||||
path.path_segments_mut()
|
||||
@ -163,12 +163,12 @@ impl BuilderHttpClient {
|
||||
}
|
||||
|
||||
/// `GET /eth/v1/builder/header`
|
||||
pub async fn get_builder_header<E: EthSpec, Payload: AbstractExecPayload<E>>(
|
||||
pub async fn get_builder_header<E: EthSpec>(
|
||||
&self,
|
||||
slot: Slot,
|
||||
parent_hash: ExecutionBlockHash,
|
||||
pubkey: &PublicKeyBytes,
|
||||
) -> Result<Option<ForkVersionedResponse<SignedBuilderBid<E, Payload>>>, Error> {
|
||||
) -> Result<Option<ForkVersionedResponse<SignedBuilderBid<E>>>, Error> {
|
||||
let mut path = self.server.full.clone();
|
||||
|
||||
path.path_segments_mut()
|
||||
|
@ -22,7 +22,6 @@ types = { workspace = true }
|
||||
eth2_config = { workspace = true }
|
||||
slot_clock = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_derive = "1.0.116"
|
||||
error-chain = { workspace = true }
|
||||
slog = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
|
@ -2,6 +2,7 @@ use crate::address_change_broadcast::broadcast_address_changes_at_capella;
|
||||
use crate::config::{ClientGenesis, Config as ClientConfig};
|
||||
use crate::notifier::spawn_notifier;
|
||||
use crate::Client;
|
||||
use beacon_chain::data_availability_checker::start_availability_cache_maintenance_service;
|
||||
use beacon_chain::otb_verification_service::start_otb_verification_service;
|
||||
use beacon_chain::proposer_prep_service::start_proposer_prep_service;
|
||||
use beacon_chain::schema_change::migrate_schema;
|
||||
@ -508,6 +509,12 @@ where
|
||||
ClientGenesis::FromStore => builder.resume_from_db().map(|v| (v, None))?,
|
||||
};
|
||||
|
||||
let beacon_chain_builder = if let Some(trusted_setup) = config.trusted_setup {
|
||||
beacon_chain_builder.trusted_setup(trusted_setup)
|
||||
} else {
|
||||
beacon_chain_builder
|
||||
};
|
||||
|
||||
if config.sync_eth1_chain {
|
||||
self.eth1_service = eth1_service_option;
|
||||
}
|
||||
@ -838,6 +845,10 @@ where
|
||||
|
||||
start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone());
|
||||
start_otb_verification_service(runtime_context.executor.clone(), beacon_chain.clone());
|
||||
start_availability_cache_maintenance_service(
|
||||
runtime_context.executor.clone(),
|
||||
beacon_chain.clone(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(Client {
|
||||
@ -898,6 +909,7 @@ where
|
||||
mut self,
|
||||
hot_path: &Path,
|
||||
cold_path: &Path,
|
||||
blobs_path: Option<PathBuf>,
|
||||
config: StoreConfig,
|
||||
log: Logger,
|
||||
) -> Result<Self, String> {
|
||||
@ -935,6 +947,7 @@ where
|
||||
let store = HotColdDB::open(
|
||||
hot_path,
|
||||
cold_path,
|
||||
blobs_path,
|
||||
schema_upgrade,
|
||||
config,
|
||||
spec,
|
||||
|
@ -1,10 +1,11 @@
|
||||
use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD;
|
||||
use beacon_chain::TrustedSetup;
|
||||
use beacon_processor::BeaconProcessorConfig;
|
||||
use directory::DEFAULT_ROOT_DIR;
|
||||
use environment::LoggerConfig;
|
||||
use network::NetworkConfig;
|
||||
use sensitive_url::SensitiveUrl;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
@ -45,6 +46,8 @@ pub struct Config {
|
||||
pub db_name: String,
|
||||
/// Path where the freezer database will be located.
|
||||
pub freezer_db_path: Option<PathBuf>,
|
||||
/// Path where the blobs database will be located if blobs should be in a separate database.
|
||||
pub blobs_db_path: Option<PathBuf>,
|
||||
pub log_file: PathBuf,
|
||||
/// If true, the node will use co-ordinated junk for eth1 values.
|
||||
///
|
||||
@ -71,6 +74,7 @@ pub struct Config {
|
||||
pub chain: beacon_chain::ChainConfig,
|
||||
pub eth1: eth1::Config,
|
||||
pub execution_layer: Option<execution_layer::Config>,
|
||||
pub trusted_setup: Option<TrustedSetup>,
|
||||
pub http_api: http_api::Config,
|
||||
pub http_metrics: http_metrics::Config,
|
||||
pub monitoring_api: Option<monitoring_api::Config>,
|
||||
@ -87,6 +91,7 @@ impl Default for Config {
|
||||
data_dir: PathBuf::from(DEFAULT_ROOT_DIR),
|
||||
db_name: "chain_db".to_string(),
|
||||
freezer_db_path: None,
|
||||
blobs_db_path: None,
|
||||
log_file: PathBuf::from(""),
|
||||
genesis: <_>::default(),
|
||||
store: <_>::default(),
|
||||
@ -96,6 +101,7 @@ impl Default for Config {
|
||||
sync_eth1_chain: false,
|
||||
eth1: <_>::default(),
|
||||
execution_layer: None,
|
||||
trusted_setup: None,
|
||||
graffiti: Graffiti::default(),
|
||||
http_api: <_>::default(),
|
||||
http_metrics: <_>::default(),
|
||||
@ -150,11 +156,27 @@ impl Config {
|
||||
.unwrap_or_else(|| self.default_freezer_db_path())
|
||||
}
|
||||
|
||||
/// Returns the path to which the client may initialize the on-disk blobs database.
|
||||
///
|
||||
/// Will attempt to use the user-supplied path from e.g. the CLI, or will default
|
||||
/// to None.
|
||||
pub fn get_blobs_db_path(&self) -> Option<PathBuf> {
|
||||
self.blobs_db_path.clone()
|
||||
}
|
||||
|
||||
/// Get the freezer DB path, creating it if necessary.
|
||||
pub fn create_freezer_db_path(&self) -> Result<PathBuf, String> {
|
||||
ensure_dir_exists(self.get_freezer_db_path())
|
||||
}
|
||||
|
||||
/// Get the blobs DB path, creating it if necessary.
|
||||
pub fn create_blobs_db_path(&self) -> Result<Option<PathBuf>, String> {
|
||||
match self.get_blobs_db_path() {
|
||||
Some(blobs_db_path) => Ok(Some(ensure_dir_exists(blobs_db_path)?)),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the "modern" path to the data_dir.
|
||||
///
|
||||
/// See `Self::get_data_dir` documentation for more info.
|
||||
|
@ -25,6 +25,7 @@ hex = { workspace = true }
|
||||
ethereum_ssz = { workspace = true }
|
||||
ssz_types = { workspace = true }
|
||||
eth2 = { workspace = true }
|
||||
kzg = { workspace = true }
|
||||
state_processing = { workspace = true }
|
||||
superstruct = { workspace = true }
|
||||
lru = { workspace = true }
|
||||
|
@ -7,7 +7,7 @@ use ethers_core::utils::rlp::RlpStream;
|
||||
use keccak_hash::KECCAK_EMPTY_LIST_RLP;
|
||||
use triehash::ordered_trie_root;
|
||||
use types::{
|
||||
map_execution_block_header_fields_except_withdrawals, Address, EthSpec, ExecutionBlockHash,
|
||||
map_execution_block_header_fields_base, Address, BeaconBlockRef, EthSpec, ExecutionBlockHash,
|
||||
ExecutionBlockHeader, ExecutionPayloadRef, Hash256, Hash64, Uint256,
|
||||
};
|
||||
|
||||
@ -18,6 +18,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
/// transactions.
|
||||
pub fn calculate_execution_block_hash(
|
||||
payload: ExecutionPayloadRef<T>,
|
||||
parent_beacon_block_root: Hash256,
|
||||
) -> (ExecutionBlockHash, Hash256) {
|
||||
// Calculate the transactions root.
|
||||
// We're currently using a deprecated Parity library for this. We should move to a
|
||||
@ -37,12 +38,23 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
None
|
||||
};
|
||||
|
||||
let rlp_blob_gas_used = payload.blob_gas_used().ok();
|
||||
let rlp_excess_blob_gas = payload.excess_blob_gas().ok();
|
||||
|
||||
// Calculate parent beacon block root (post-Deneb).
|
||||
let rlp_parent_beacon_block_root = rlp_excess_blob_gas
|
||||
.as_ref()
|
||||
.map(|_| parent_beacon_block_root);
|
||||
|
||||
// Construct the block header.
|
||||
let exec_block_header = ExecutionBlockHeader::from_payload(
|
||||
payload,
|
||||
KECCAK_EMPTY_LIST_RLP.as_fixed_bytes().into(),
|
||||
rlp_transactions_root,
|
||||
rlp_withdrawals_root,
|
||||
rlp_blob_gas_used,
|
||||
rlp_excess_blob_gas,
|
||||
rlp_parent_beacon_block_root,
|
||||
);
|
||||
|
||||
// Hash the RLP encoding of the block header.
|
||||
@ -56,10 +68,14 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
/// Verify `payload.block_hash` locally within Lighthouse.
|
||||
///
|
||||
/// No remote calls to the execution client will be made, so this is quite a cheap check.
|
||||
pub fn verify_payload_block_hash(&self, payload: ExecutionPayloadRef<T>) -> Result<(), Error> {
|
||||
pub fn verify_payload_block_hash(&self, block: BeaconBlockRef<T>) -> Result<(), Error> {
|
||||
let payload = block.execution_payload()?.execution_payload_ref();
|
||||
let parent_beacon_block_root = block.parent_root();
|
||||
|
||||
let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH);
|
||||
|
||||
let (header_hash, rlp_transactions_root) = Self::calculate_execution_block_hash(payload);
|
||||
let (header_hash, rlp_transactions_root) =
|
||||
Self::calculate_execution_block_hash(payload, parent_beacon_block_root);
|
||||
|
||||
if header_hash != payload.block_hash() {
|
||||
return Err(Error::BlockHashMismatch {
|
||||
@ -88,12 +104,21 @@ pub fn rlp_encode_withdrawal(withdrawal: &JsonWithdrawal) -> Vec<u8> {
|
||||
pub fn rlp_encode_block_header(header: &ExecutionBlockHeader) -> Vec<u8> {
|
||||
let mut rlp_header_stream = RlpStream::new();
|
||||
rlp_header_stream.begin_unbounded_list();
|
||||
map_execution_block_header_fields_except_withdrawals!(&header, |_, field| {
|
||||
map_execution_block_header_fields_base!(&header, |_, field| {
|
||||
rlp_header_stream.append(field);
|
||||
});
|
||||
if let Some(withdrawals_root) = &header.withdrawals_root {
|
||||
rlp_header_stream.append(withdrawals_root);
|
||||
}
|
||||
if let Some(blob_gas_used) = &header.blob_gas_used {
|
||||
rlp_header_stream.append(blob_gas_used);
|
||||
}
|
||||
if let Some(excess_blob_gas) = &header.excess_blob_gas {
|
||||
rlp_header_stream.append(excess_blob_gas);
|
||||
}
|
||||
if let Some(parent_beacon_block_root) = &header.parent_beacon_block_root {
|
||||
rlp_header_stream.append(parent_beacon_block_root);
|
||||
}
|
||||
rlp_header_stream.finalize_unbounded_list();
|
||||
rlp_header_stream.out().into()
|
||||
}
|
||||
@ -140,6 +165,9 @@ mod test {
|
||||
nonce: Hash64::zero(),
|
||||
base_fee_per_gas: 0x036b_u64.into(),
|
||||
withdrawals_root: None,
|
||||
blob_gas_used: None,
|
||||
excess_blob_gas: None,
|
||||
parent_beacon_block_root: None,
|
||||
};
|
||||
let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b";
|
||||
let expected_hash =
|
||||
@ -168,6 +196,9 @@ mod test {
|
||||
nonce: Hash64::zero(),
|
||||
base_fee_per_gas: 0x036b_u64.into(),
|
||||
withdrawals_root: None,
|
||||
blob_gas_used: None,
|
||||
excess_blob_gas: None,
|
||||
parent_beacon_block_root: None,
|
||||
};
|
||||
let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b";
|
||||
let expected_hash =
|
||||
@ -197,10 +228,43 @@ mod test {
|
||||
nonce: Hash64::zero(),
|
||||
base_fee_per_gas: 0x34187b238_u64.into(),
|
||||
withdrawals_root: None,
|
||||
blob_gas_used: None,
|
||||
excess_blob_gas: None,
|
||||
parent_beacon_block_root: None,
|
||||
};
|
||||
let expected_hash =
|
||||
Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351")
|
||||
.unwrap();
|
||||
test_rlp_encoding(&header, None, expected_hash);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rlp_encode_block_deneb() {
|
||||
let header = ExecutionBlockHeader {
|
||||
parent_hash: Hash256::from_str("172864416698b842f4c92f7b476be294b4ef720202779df194cd225f531053ab").unwrap(),
|
||||
ommers_hash: Hash256::from_str("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").unwrap(),
|
||||
beneficiary: Address::from_str("878705ba3f8bc32fcf7f4caa1a35e72af65cf766").unwrap(),
|
||||
state_root: Hash256::from_str("c6457d0df85c84c62d1c68f68138b6e796e8a44fb44de221386fb2d5611c41e0").unwrap(),
|
||||
transactions_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(),
|
||||
receipts_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(),
|
||||
logs_bloom:<[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(),
|
||||
difficulty: 0.into(),
|
||||
number: 97.into(),
|
||||
gas_limit: 27482534.into(),
|
||||
gas_used: 0.into(),
|
||||
timestamp: 1692132829u64,
|
||||
extra_data: hex::decode("d883010d00846765746888676f312e32302e37856c696e7578").unwrap(),
|
||||
mix_hash: Hash256::from_str("0b493c22d2ad4ca76c77ae6ad916af429b42b1dc98fdcb8e5ddbd049bbc5d623").unwrap(),
|
||||
nonce: Hash64::zero(),
|
||||
base_fee_per_gas: 2374u64.into(),
|
||||
withdrawals_root: Some(Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap()),
|
||||
blob_gas_used: Some(0x0u64),
|
||||
excess_blob_gas: Some(0x0u64),
|
||||
parent_beacon_block_root: Some(Hash256::from_str("f7d327d2c04e4f12e9cdd492e53d39a1d390f8b1571e3b2a22ac6e1e170e5b1a").unwrap()),
|
||||
};
|
||||
let expected_hash =
|
||||
Hash256::from_str("a7448e600ead0a23d16f96aa46e8dea9eef8a7c5669a5f0a5ff32709afe9c408")
|
||||
.unwrap();
|
||||
test_rlp_encoding(&header, None, expected_hash);
|
||||
}
|
||||
}
|
||||
|
@ -1,26 +1,35 @@
|
||||
use crate::engines::ForkchoiceState;
|
||||
use crate::http::{
|
||||
ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2,
|
||||
ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3,
|
||||
ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1,
|
||||
ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2,
|
||||
ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V1,
|
||||
ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3,
|
||||
};
|
||||
use eth2::types::{SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2};
|
||||
pub use ethers_core::types::Transaction;
|
||||
use ethers_core::utils::rlp::{self, Decodable, Rlp};
|
||||
use eth2::types::{
|
||||
BlobsBundle, SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2,
|
||||
SsePayloadAttributesV3,
|
||||
};
|
||||
use ethers_core::types::Transaction;
|
||||
use ethers_core::utils::rlp;
|
||||
use ethers_core::utils::rlp::{Decodable, Rlp};
|
||||
use http::deposit_methods::RpcError;
|
||||
pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1};
|
||||
use pretty_reqwest_error::PrettyReqwestError;
|
||||
use reqwest::StatusCode;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash;
|
||||
use std::convert::TryFrom;
|
||||
use strum::IntoStaticStr;
|
||||
use superstruct::superstruct;
|
||||
pub use types::{
|
||||
Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader,
|
||||
Address, BeaconBlockRef, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader,
|
||||
ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, Uint256, VariableList,
|
||||
Withdrawal, Withdrawals,
|
||||
};
|
||||
use types::{ExecutionPayloadCapella, ExecutionPayloadMerge};
|
||||
use types::{
|
||||
BeaconStateError, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge,
|
||||
KzgProofs, VersionedHash,
|
||||
};
|
||||
|
||||
pub mod auth;
|
||||
pub mod http;
|
||||
@ -48,14 +57,12 @@ pub enum Error {
|
||||
PayloadIdUnavailable,
|
||||
TransitionConfigurationMismatch,
|
||||
PayloadConversionLogicFlaw,
|
||||
DeserializeTransaction(ssz_types::Error),
|
||||
DeserializeTransactions(ssz_types::Error),
|
||||
SszError(ssz_types::Error),
|
||||
DeserializeWithdrawals(ssz_types::Error),
|
||||
BuilderApi(builder_client::Error),
|
||||
IncorrectStateVariant,
|
||||
RequiredMethodUnsupported(&'static str),
|
||||
UnsupportedForkVariant(String),
|
||||
BadConversion(String),
|
||||
RlpDecoderError(rlp::DecoderError),
|
||||
}
|
||||
|
||||
@ -96,6 +103,12 @@ impl From<rlp::DecoderError> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ssz_types::Error> for Error {
|
||||
fn from(e: ssz_types::Error) -> Self {
|
||||
Error::SszError(e)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, IntoStaticStr)]
|
||||
#[strum(serialize_all = "snake_case")]
|
||||
pub enum PayloadStatusV1Status {
|
||||
@ -137,7 +150,7 @@ pub struct ExecutionBlock {
|
||||
|
||||
/// Representation of an execution block with enough detail to reconstruct a payload.
|
||||
#[superstruct(
|
||||
variants(Merge, Capella),
|
||||
variants(Merge, Capella, Deneb),
|
||||
variant_attributes(
|
||||
derive(Clone, Debug, PartialEq, Serialize, Deserialize,),
|
||||
serde(bound = "T: EthSpec", rename_all = "camelCase"),
|
||||
@ -171,8 +184,14 @@ pub struct ExecutionBlockWithTransactions<T: EthSpec> {
|
||||
#[serde(rename = "hash")]
|
||||
pub block_hash: ExecutionBlockHash,
|
||||
pub transactions: Vec<Transaction>,
|
||||
#[superstruct(only(Capella))]
|
||||
#[superstruct(only(Capella, Deneb))]
|
||||
pub withdrawals: Vec<JsonWithdrawal>,
|
||||
#[superstruct(only(Deneb))]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub blob_gas_used: u64,
|
||||
#[superstruct(only(Deneb))]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub excess_blob_gas: u64,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> TryFrom<ExecutionPayload<T>> for ExecutionBlockWithTransactions<T> {
|
||||
@ -226,13 +245,39 @@ impl<T: EthSpec> TryFrom<ExecutionPayload<T>> for ExecutionBlockWithTransactions
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
ExecutionPayload::Deneb(block) => Self::Deneb(ExecutionBlockWithTransactionsDeneb {
|
||||
parent_hash: block.parent_hash,
|
||||
fee_recipient: block.fee_recipient,
|
||||
state_root: block.state_root,
|
||||
receipts_root: block.receipts_root,
|
||||
logs_bloom: block.logs_bloom,
|
||||
prev_randao: block.prev_randao,
|
||||
block_number: block.block_number,
|
||||
gas_limit: block.gas_limit,
|
||||
gas_used: block.gas_used,
|
||||
timestamp: block.timestamp,
|
||||
extra_data: block.extra_data,
|
||||
base_fee_per_gas: block.base_fee_per_gas,
|
||||
block_hash: block.block_hash,
|
||||
transactions: block
|
||||
.transactions
|
||||
.iter()
|
||||
.map(|tx| Transaction::decode(&Rlp::new(tx)))
|
||||
.collect::<Result<Vec<_>, _>>()?,
|
||||
withdrawals: Vec::from(block.withdrawals)
|
||||
.into_iter()
|
||||
.map(|withdrawal| withdrawal.into())
|
||||
.collect(),
|
||||
blob_gas_used: block.blob_gas_used,
|
||||
excess_blob_gas: block.excess_blob_gas,
|
||||
}),
|
||||
};
|
||||
Ok(json_payload)
|
||||
}
|
||||
}
|
||||
|
||||
#[superstruct(
|
||||
variants(V1, V2),
|
||||
variants(V1, V2, V3),
|
||||
variant_attributes(derive(Clone, Debug, Eq, Hash, PartialEq),),
|
||||
cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"),
|
||||
partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant")
|
||||
@ -245,8 +290,10 @@ pub struct PayloadAttributes {
|
||||
pub prev_randao: Hash256,
|
||||
#[superstruct(getter(copy))]
|
||||
pub suggested_fee_recipient: Address,
|
||||
#[superstruct(only(V2))]
|
||||
#[superstruct(only(V2, V3))]
|
||||
pub withdrawals: Vec<Withdrawal>,
|
||||
#[superstruct(only(V3), partial_getter(copy))]
|
||||
pub parent_beacon_block_root: Hash256,
|
||||
}
|
||||
|
||||
impl PayloadAttributes {
|
||||
@ -255,14 +302,24 @@ impl PayloadAttributes {
|
||||
prev_randao: Hash256,
|
||||
suggested_fee_recipient: Address,
|
||||
withdrawals: Option<Vec<Withdrawal>>,
|
||||
parent_beacon_block_root: Option<Hash256>,
|
||||
) -> Self {
|
||||
match withdrawals {
|
||||
Some(withdrawals) => PayloadAttributes::V2(PayloadAttributesV2 {
|
||||
timestamp,
|
||||
prev_randao,
|
||||
suggested_fee_recipient,
|
||||
withdrawals,
|
||||
}),
|
||||
Some(withdrawals) => match parent_beacon_block_root {
|
||||
Some(parent_beacon_block_root) => PayloadAttributes::V3(PayloadAttributesV3 {
|
||||
timestamp,
|
||||
prev_randao,
|
||||
suggested_fee_recipient,
|
||||
withdrawals,
|
||||
parent_beacon_block_root,
|
||||
}),
|
||||
None => PayloadAttributes::V2(PayloadAttributesV2 {
|
||||
timestamp,
|
||||
prev_randao,
|
||||
suggested_fee_recipient,
|
||||
withdrawals,
|
||||
}),
|
||||
},
|
||||
None => PayloadAttributes::V1(PayloadAttributesV1 {
|
||||
timestamp,
|
||||
prev_randao,
|
||||
@ -295,6 +352,19 @@ impl From<PayloadAttributes> for SsePayloadAttributes {
|
||||
suggested_fee_recipient,
|
||||
withdrawals,
|
||||
}),
|
||||
PayloadAttributes::V3(PayloadAttributesV3 {
|
||||
timestamp,
|
||||
prev_randao,
|
||||
suggested_fee_recipient,
|
||||
withdrawals,
|
||||
parent_beacon_block_root,
|
||||
}) => Self::V3(SsePayloadAttributesV3 {
|
||||
timestamp,
|
||||
prev_randao,
|
||||
suggested_fee_recipient,
|
||||
withdrawals,
|
||||
parent_beacon_block_root,
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -320,7 +390,7 @@ pub struct ProposeBlindedBlockResponse {
|
||||
}
|
||||
|
||||
#[superstruct(
|
||||
variants(Merge, Capella),
|
||||
variants(Merge, Capella, Deneb),
|
||||
variant_attributes(derive(Clone, Debug, PartialEq),),
|
||||
map_into(ExecutionPayload),
|
||||
map_ref_into(ExecutionPayloadRef),
|
||||
@ -333,7 +403,27 @@ pub struct GetPayloadResponse<T: EthSpec> {
|
||||
pub execution_payload: ExecutionPayloadMerge<T>,
|
||||
#[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))]
|
||||
pub execution_payload: ExecutionPayloadCapella<T>,
|
||||
#[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))]
|
||||
pub execution_payload: ExecutionPayloadDeneb<T>,
|
||||
pub block_value: Uint256,
|
||||
#[superstruct(only(Deneb))]
|
||||
pub blobs_bundle: BlobsBundle<T>,
|
||||
#[superstruct(only(Deneb), partial_getter(copy))]
|
||||
pub should_override_builder: bool,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> GetPayloadResponse<E> {
|
||||
pub fn fee_recipient(&self) -> Address {
|
||||
ExecutionPayloadRef::from(self.to_ref()).fee_recipient()
|
||||
}
|
||||
|
||||
pub fn block_hash(&self) -> ExecutionBlockHash {
|
||||
ExecutionPayloadRef::from(self.to_ref()).block_hash()
|
||||
}
|
||||
|
||||
pub fn block_number(&self) -> u64 {
|
||||
ExecutionPayloadRef::from(self.to_ref()).block_number()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec> From<GetPayloadResponseRef<'a, T>> for ExecutionPayloadRef<'a, T> {
|
||||
@ -352,16 +442,25 @@ impl<T: EthSpec> From<GetPayloadResponse<T>> for ExecutionPayload<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> From<GetPayloadResponse<T>> for (ExecutionPayload<T>, Uint256) {
|
||||
impl<T: EthSpec> From<GetPayloadResponse<T>>
|
||||
for (ExecutionPayload<T>, Uint256, Option<BlobsBundle<T>>)
|
||||
{
|
||||
fn from(response: GetPayloadResponse<T>) -> Self {
|
||||
match response {
|
||||
GetPayloadResponse::Merge(inner) => (
|
||||
ExecutionPayload::Merge(inner.execution_payload),
|
||||
inner.block_value,
|
||||
None,
|
||||
),
|
||||
GetPayloadResponse::Capella(inner) => (
|
||||
ExecutionPayload::Capella(inner.execution_payload),
|
||||
inner.block_value,
|
||||
None,
|
||||
),
|
||||
GetPayloadResponse::Deneb(inner) => (
|
||||
ExecutionPayload::Deneb(inner.execution_payload),
|
||||
inner.block_value,
|
||||
Some(inner.blobs_bundle),
|
||||
),
|
||||
}
|
||||
}
|
||||
@ -435,6 +534,138 @@ impl<E: EthSpec> ExecutionPayloadBodyV1<E> {
|
||||
))
|
||||
}
|
||||
}
|
||||
ExecutionPayloadHeader::Deneb(header) => {
|
||||
if let Some(withdrawals) = self.withdrawals {
|
||||
Ok(ExecutionPayload::Deneb(ExecutionPayloadDeneb {
|
||||
parent_hash: header.parent_hash,
|
||||
fee_recipient: header.fee_recipient,
|
||||
state_root: header.state_root,
|
||||
receipts_root: header.receipts_root,
|
||||
logs_bloom: header.logs_bloom,
|
||||
prev_randao: header.prev_randao,
|
||||
block_number: header.block_number,
|
||||
gas_limit: header.gas_limit,
|
||||
gas_used: header.gas_used,
|
||||
timestamp: header.timestamp,
|
||||
extra_data: header.extra_data,
|
||||
base_fee_per_gas: header.base_fee_per_gas,
|
||||
block_hash: header.block_hash,
|
||||
transactions: self.transactions,
|
||||
withdrawals,
|
||||
blob_gas_used: header.blob_gas_used,
|
||||
excess_blob_gas: header.excess_blob_gas,
|
||||
}))
|
||||
} else {
|
||||
Err(format!(
|
||||
"block {} is post capella but payload body doesn't have withdrawals",
|
||||
header.block_hash
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[superstruct(
|
||||
variants(Merge, Capella, Deneb),
|
||||
variant_attributes(derive(Clone, Debug, PartialEq),),
|
||||
map_into(ExecutionPayload),
|
||||
map_ref_into(ExecutionPayloadRef),
|
||||
cast_error(
|
||||
ty = "BeaconStateError",
|
||||
expr = "BeaconStateError::IncorrectStateVariant"
|
||||
),
|
||||
partial_getter_error(
|
||||
ty = "BeaconStateError",
|
||||
expr = "BeaconStateError::IncorrectStateVariant"
|
||||
)
|
||||
)]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct NewPayloadRequest<E: EthSpec> {
|
||||
#[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))]
|
||||
pub execution_payload: ExecutionPayloadMerge<E>,
|
||||
#[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))]
|
||||
pub execution_payload: ExecutionPayloadCapella<E>,
|
||||
#[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))]
|
||||
pub execution_payload: ExecutionPayloadDeneb<E>,
|
||||
#[superstruct(only(Deneb))]
|
||||
pub versioned_hashes: Vec<VersionedHash>,
|
||||
#[superstruct(only(Deneb))]
|
||||
pub parent_beacon_block_root: Hash256,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> NewPayloadRequest<E> {
|
||||
pub fn parent_hash(&self) -> ExecutionBlockHash {
|
||||
match self {
|
||||
Self::Merge(payload) => payload.execution_payload.parent_hash,
|
||||
Self::Capella(payload) => payload.execution_payload.parent_hash,
|
||||
Self::Deneb(payload) => payload.execution_payload.parent_hash,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block_hash(&self) -> ExecutionBlockHash {
|
||||
match self {
|
||||
Self::Merge(payload) => payload.execution_payload.block_hash,
|
||||
Self::Capella(payload) => payload.execution_payload.block_hash,
|
||||
Self::Deneb(payload) => payload.execution_payload.block_hash,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block_number(&self) -> u64 {
|
||||
match self {
|
||||
Self::Merge(payload) => payload.execution_payload.block_number,
|
||||
Self::Capella(payload) => payload.execution_payload.block_number,
|
||||
Self::Deneb(payload) => payload.execution_payload.block_number,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_execution_payload(self) -> ExecutionPayload<E> {
|
||||
map_new_payload_request_into_execution_payload!(self, |request, cons| {
|
||||
cons(request.execution_payload)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, E: EthSpec> TryFrom<BeaconBlockRef<'a, E>> for NewPayloadRequest<E> {
|
||||
type Error = BeaconStateError;
|
||||
|
||||
fn try_from(block: BeaconBlockRef<'a, E>) -> Result<Self, Self::Error> {
|
||||
match block {
|
||||
BeaconBlockRef::Base(_) | BeaconBlockRef::Altair(_) => {
|
||||
Err(Self::Error::IncorrectStateVariant)
|
||||
}
|
||||
BeaconBlockRef::Merge(block_ref) => Ok(Self::Merge(NewPayloadRequestMerge {
|
||||
execution_payload: block_ref.body.execution_payload.execution_payload.clone(),
|
||||
})),
|
||||
BeaconBlockRef::Capella(block_ref) => Ok(Self::Capella(NewPayloadRequestCapella {
|
||||
execution_payload: block_ref.body.execution_payload.execution_payload.clone(),
|
||||
})),
|
||||
BeaconBlockRef::Deneb(block_ref) => Ok(Self::Deneb(NewPayloadRequestDeneb {
|
||||
execution_payload: block_ref.body.execution_payload.execution_payload.clone(),
|
||||
versioned_hashes: block_ref
|
||||
.body
|
||||
.blob_kzg_commitments
|
||||
.iter()
|
||||
.map(kzg_commitment_to_versioned_hash)
|
||||
.collect(),
|
||||
parent_beacon_block_root: block_ref.parent_root,
|
||||
})),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> TryFrom<ExecutionPayload<E>> for NewPayloadRequest<E> {
|
||||
type Error = BeaconStateError;
|
||||
|
||||
fn try_from(payload: ExecutionPayload<E>) -> Result<Self, Self::Error> {
|
||||
match payload {
|
||||
ExecutionPayload::Merge(payload) => Ok(Self::Merge(NewPayloadRequestMerge {
|
||||
execution_payload: payload,
|
||||
})),
|
||||
ExecutionPayload::Capella(payload) => Ok(Self::Capella(NewPayloadRequestCapella {
|
||||
execution_payload: payload,
|
||||
})),
|
||||
ExecutionPayload::Deneb(_) => Err(Self::Error::IncorrectStateVariant),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -443,12 +674,15 @@ impl<E: EthSpec> ExecutionPayloadBodyV1<E> {
|
||||
pub struct EngineCapabilities {
|
||||
pub new_payload_v1: bool,
|
||||
pub new_payload_v2: bool,
|
||||
pub new_payload_v3: bool,
|
||||
pub forkchoice_updated_v1: bool,
|
||||
pub forkchoice_updated_v2: bool,
|
||||
pub forkchoice_updated_v3: bool,
|
||||
pub get_payload_bodies_by_hash_v1: bool,
|
||||
pub get_payload_bodies_by_range_v1: bool,
|
||||
pub get_payload_v1: bool,
|
||||
pub get_payload_v2: bool,
|
||||
pub get_payload_v3: bool,
|
||||
}
|
||||
|
||||
impl EngineCapabilities {
|
||||
@ -460,12 +694,18 @@ impl EngineCapabilities {
|
||||
if self.new_payload_v2 {
|
||||
response.push(ENGINE_NEW_PAYLOAD_V2);
|
||||
}
|
||||
if self.new_payload_v3 {
|
||||
response.push(ENGINE_NEW_PAYLOAD_V3);
|
||||
}
|
||||
if self.forkchoice_updated_v1 {
|
||||
response.push(ENGINE_FORKCHOICE_UPDATED_V1);
|
||||
}
|
||||
if self.forkchoice_updated_v2 {
|
||||
response.push(ENGINE_FORKCHOICE_UPDATED_V2);
|
||||
}
|
||||
if self.forkchoice_updated_v3 {
|
||||
response.push(ENGINE_FORKCHOICE_UPDATED_V3);
|
||||
}
|
||||
if self.get_payload_bodies_by_hash_v1 {
|
||||
response.push(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1);
|
||||
}
|
||||
@ -478,6 +718,9 @@ impl EngineCapabilities {
|
||||
if self.get_payload_v2 {
|
||||
response.push(ENGINE_GET_PAYLOAD_V2);
|
||||
}
|
||||
if self.get_payload_v3 {
|
||||
response.push(ENGINE_GET_PAYLOAD_V3);
|
||||
}
|
||||
|
||||
response
|
||||
}
|
||||
|
@ -32,14 +32,17 @@ pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_secs(1);
|
||||
|
||||
pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1";
|
||||
pub const ENGINE_NEW_PAYLOAD_V2: &str = "engine_newPayloadV2";
|
||||
pub const ENGINE_NEW_PAYLOAD_V3: &str = "engine_newPayloadV3";
|
||||
pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8);
|
||||
|
||||
pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1";
|
||||
pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2";
|
||||
pub const ENGINE_GET_PAYLOAD_V3: &str = "engine_getPayloadV3";
|
||||
pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2);
|
||||
|
||||
pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1";
|
||||
pub const ENGINE_FORKCHOICE_UPDATED_V2: &str = "engine_forkchoiceUpdatedV2";
|
||||
pub const ENGINE_FORKCHOICE_UPDATED_V3: &str = "engine_forkchoiceUpdatedV3";
|
||||
pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8);
|
||||
|
||||
pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesByHashV1";
|
||||
@ -58,10 +61,13 @@ pub const METHOD_NOT_FOUND_CODE: i64 = -32601;
|
||||
pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[
|
||||
ENGINE_NEW_PAYLOAD_V1,
|
||||
ENGINE_NEW_PAYLOAD_V2,
|
||||
ENGINE_NEW_PAYLOAD_V3,
|
||||
ENGINE_GET_PAYLOAD_V1,
|
||||
ENGINE_GET_PAYLOAD_V2,
|
||||
ENGINE_GET_PAYLOAD_V3,
|
||||
ENGINE_FORKCHOICE_UPDATED_V1,
|
||||
ENGINE_FORKCHOICE_UPDATED_V2,
|
||||
ENGINE_FORKCHOICE_UPDATED_V3,
|
||||
ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1,
|
||||
ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1,
|
||||
];
|
||||
@ -72,12 +78,15 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[
|
||||
pub static PRE_CAPELLA_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities {
|
||||
new_payload_v1: true,
|
||||
new_payload_v2: false,
|
||||
new_payload_v3: false,
|
||||
forkchoice_updated_v1: true,
|
||||
forkchoice_updated_v2: false,
|
||||
forkchoice_updated_v3: false,
|
||||
get_payload_bodies_by_hash_v1: false,
|
||||
get_payload_bodies_by_range_v1: false,
|
||||
get_payload_v1: true,
|
||||
get_payload_v2: false,
|
||||
get_payload_v3: false,
|
||||
};
|
||||
|
||||
/// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object.
|
||||
@ -741,6 +750,14 @@ impl HttpJsonRpc {
|
||||
)
|
||||
.await?,
|
||||
),
|
||||
ForkName::Deneb => ExecutionBlockWithTransactions::Deneb(
|
||||
self.rpc_request(
|
||||
ETH_GET_BLOCK_BY_HASH,
|
||||
params,
|
||||
ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier,
|
||||
)
|
||||
.await?,
|
||||
),
|
||||
ForkName::Base | ForkName::Altair => {
|
||||
return Err(Error::UnsupportedForkVariant(format!(
|
||||
"called get_block_by_hash_with_txns with fork {:?}",
|
||||
@ -784,6 +801,27 @@ impl HttpJsonRpc {
|
||||
Ok(response.into())
|
||||
}
|
||||
|
||||
pub async fn new_payload_v3<T: EthSpec>(
|
||||
&self,
|
||||
new_payload_request_deneb: NewPayloadRequestDeneb<T>,
|
||||
) -> Result<PayloadStatusV1, Error> {
|
||||
let params = json!([
|
||||
JsonExecutionPayload::V3(new_payload_request_deneb.execution_payload.into()),
|
||||
new_payload_request_deneb.versioned_hashes,
|
||||
new_payload_request_deneb.parent_beacon_block_root,
|
||||
]);
|
||||
|
||||
let response: JsonPayloadStatusV1 = self
|
||||
.rpc_request(
|
||||
ENGINE_NEW_PAYLOAD_V3,
|
||||
params,
|
||||
ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(response.into())
|
||||
}
|
||||
|
||||
pub async fn get_payload_v1<T: EthSpec>(
|
||||
&self,
|
||||
payload_id: PayloadId,
|
||||
@ -835,10 +873,33 @@ impl HttpJsonRpc {
|
||||
.await?;
|
||||
Ok(JsonGetPayloadResponse::V2(response).into())
|
||||
}
|
||||
ForkName::Base | ForkName::Altair => Err(Error::UnsupportedForkVariant(format!(
|
||||
"called get_payload_v2 with {}",
|
||||
fork_name
|
||||
))),
|
||||
ForkName::Base | ForkName::Altair | ForkName::Deneb => Err(
|
||||
Error::UnsupportedForkVariant(format!("called get_payload_v2 with {}", fork_name)),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_payload_v3<T: EthSpec>(
|
||||
&self,
|
||||
fork_name: ForkName,
|
||||
payload_id: PayloadId,
|
||||
) -> Result<GetPayloadResponse<T>, Error> {
|
||||
let params = json!([JsonPayloadIdRequest::from(payload_id)]);
|
||||
|
||||
match fork_name {
|
||||
ForkName::Deneb => {
|
||||
let response: JsonGetPayloadResponseV3<T> = self
|
||||
.rpc_request(
|
||||
ENGINE_GET_PAYLOAD_V3,
|
||||
params,
|
||||
ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier,
|
||||
)
|
||||
.await?;
|
||||
Ok(JsonGetPayloadResponse::V3(response).into())
|
||||
}
|
||||
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => Err(
|
||||
Error::UnsupportedForkVariant(format!("called get_payload_v3 with {}", fork_name)),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
@ -884,6 +945,27 @@ impl HttpJsonRpc {
|
||||
Ok(response.into())
|
||||
}
|
||||
|
||||
pub async fn forkchoice_updated_v3(
|
||||
&self,
|
||||
forkchoice_state: ForkchoiceState,
|
||||
payload_attributes: Option<PayloadAttributes>,
|
||||
) -> Result<ForkchoiceUpdatedResponse, Error> {
|
||||
let params = json!([
|
||||
JsonForkchoiceStateV1::from(forkchoice_state),
|
||||
payload_attributes.map(JsonPayloadAttributes::from)
|
||||
]);
|
||||
|
||||
let response: JsonForkchoiceUpdatedV1Response = self
|
||||
.rpc_request(
|
||||
ENGINE_FORKCHOICE_UPDATED_V3,
|
||||
params,
|
||||
ENGINE_FORKCHOICE_UPDATED_TIMEOUT * self.execution_timeout_multiplier,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(response.into())
|
||||
}
|
||||
|
||||
pub async fn get_payload_bodies_by_hash_v1<E: EthSpec>(
|
||||
&self,
|
||||
block_hashes: Vec<ExecutionBlockHash>,
|
||||
@ -950,14 +1032,17 @@ impl HttpJsonRpc {
|
||||
Ok(capabilities) => Ok(EngineCapabilities {
|
||||
new_payload_v1: capabilities.contains(ENGINE_NEW_PAYLOAD_V1),
|
||||
new_payload_v2: capabilities.contains(ENGINE_NEW_PAYLOAD_V2),
|
||||
new_payload_v3: capabilities.contains(ENGINE_NEW_PAYLOAD_V3),
|
||||
forkchoice_updated_v1: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V1),
|
||||
forkchoice_updated_v2: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V2),
|
||||
forkchoice_updated_v3: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V3),
|
||||
get_payload_bodies_by_hash_v1: capabilities
|
||||
.contains(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1),
|
||||
get_payload_bodies_by_range_v1: capabilities
|
||||
.contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1),
|
||||
get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1),
|
||||
get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2),
|
||||
get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3),
|
||||
}),
|
||||
}
|
||||
}
|
||||
@ -994,15 +1079,28 @@ impl HttpJsonRpc {
|
||||
// new_payload that the execution engine supports
|
||||
pub async fn new_payload<T: EthSpec>(
|
||||
&self,
|
||||
execution_payload: ExecutionPayload<T>,
|
||||
new_payload_request: NewPayloadRequest<T>,
|
||||
) -> Result<PayloadStatusV1, Error> {
|
||||
let engine_capabilities = self.get_engine_capabilities(None).await?;
|
||||
if engine_capabilities.new_payload_v2 {
|
||||
self.new_payload_v2(execution_payload).await
|
||||
} else if engine_capabilities.new_payload_v1 {
|
||||
self.new_payload_v1(execution_payload).await
|
||||
} else {
|
||||
Err(Error::RequiredMethodUnsupported("engine_newPayload"))
|
||||
match new_payload_request {
|
||||
NewPayloadRequest::Merge(_) | NewPayloadRequest::Capella(_) => {
|
||||
if engine_capabilities.new_payload_v2 {
|
||||
self.new_payload_v2(new_payload_request.into_execution_payload())
|
||||
.await
|
||||
} else if engine_capabilities.new_payload_v1 {
|
||||
self.new_payload_v1(new_payload_request.into_execution_payload())
|
||||
.await
|
||||
} else {
|
||||
Err(Error::RequiredMethodUnsupported("engine_newPayload"))
|
||||
}
|
||||
}
|
||||
NewPayloadRequest::Deneb(new_payload_request_deneb) => {
|
||||
if engine_capabilities.new_payload_v3 {
|
||||
self.new_payload_v3(new_payload_request_deneb).await
|
||||
} else {
|
||||
Err(Error::RequiredMethodUnsupported("engine_newPayloadV3"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1014,12 +1112,27 @@ impl HttpJsonRpc {
|
||||
payload_id: PayloadId,
|
||||
) -> Result<GetPayloadResponse<T>, Error> {
|
||||
let engine_capabilities = self.get_engine_capabilities(None).await?;
|
||||
if engine_capabilities.get_payload_v2 {
|
||||
self.get_payload_v2(fork_name, payload_id).await
|
||||
} else if engine_capabilities.new_payload_v1 {
|
||||
self.get_payload_v1(payload_id).await
|
||||
} else {
|
||||
Err(Error::RequiredMethodUnsupported("engine_getPayload"))
|
||||
match fork_name {
|
||||
ForkName::Merge | ForkName::Capella => {
|
||||
if engine_capabilities.get_payload_v2 {
|
||||
self.get_payload_v2(fork_name, payload_id).await
|
||||
} else if engine_capabilities.new_payload_v1 {
|
||||
self.get_payload_v1(payload_id).await
|
||||
} else {
|
||||
Err(Error::RequiredMethodUnsupported("engine_getPayload"))
|
||||
}
|
||||
}
|
||||
ForkName::Deneb => {
|
||||
if engine_capabilities.get_payload_v3 {
|
||||
self.get_payload_v3(fork_name, payload_id).await
|
||||
} else {
|
||||
Err(Error::RequiredMethodUnsupported("engine_getPayloadV3"))
|
||||
}
|
||||
}
|
||||
ForkName::Base | ForkName::Altair => Err(Error::UnsupportedForkVariant(format!(
|
||||
"called get_payload with {}",
|
||||
fork_name
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
@ -1028,14 +1141,41 @@ impl HttpJsonRpc {
|
||||
pub async fn forkchoice_updated(
|
||||
&self,
|
||||
forkchoice_state: ForkchoiceState,
|
||||
payload_attributes: Option<PayloadAttributes>,
|
||||
maybe_payload_attributes: Option<PayloadAttributes>,
|
||||
) -> Result<ForkchoiceUpdatedResponse, Error> {
|
||||
let engine_capabilities = self.get_engine_capabilities(None).await?;
|
||||
if engine_capabilities.forkchoice_updated_v2 {
|
||||
self.forkchoice_updated_v2(forkchoice_state, payload_attributes)
|
||||
if let Some(payload_attributes) = maybe_payload_attributes.as_ref() {
|
||||
match payload_attributes {
|
||||
PayloadAttributes::V1(_) | PayloadAttributes::V2(_) => {
|
||||
if engine_capabilities.forkchoice_updated_v2 {
|
||||
self.forkchoice_updated_v2(forkchoice_state, maybe_payload_attributes)
|
||||
.await
|
||||
} else if engine_capabilities.forkchoice_updated_v1 {
|
||||
self.forkchoice_updated_v1(forkchoice_state, maybe_payload_attributes)
|
||||
.await
|
||||
} else {
|
||||
Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated"))
|
||||
}
|
||||
}
|
||||
PayloadAttributes::V3(_) => {
|
||||
if engine_capabilities.forkchoice_updated_v3 {
|
||||
self.forkchoice_updated_v3(forkchoice_state, maybe_payload_attributes)
|
||||
.await
|
||||
} else {
|
||||
Err(Error::RequiredMethodUnsupported(
|
||||
"engine_forkchoiceUpdatedV3",
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if engine_capabilities.forkchoice_updated_v3 {
|
||||
self.forkchoice_updated_v3(forkchoice_state, maybe_payload_attributes)
|
||||
.await
|
||||
} else if engine_capabilities.forkchoice_updated_v2 {
|
||||
self.forkchoice_updated_v2(forkchoice_state, maybe_payload_attributes)
|
||||
.await
|
||||
} else if engine_capabilities.forkchoice_updated_v1 {
|
||||
self.forkchoice_updated_v1(forkchoice_state, payload_attributes)
|
||||
self.forkchoice_updated_v1(forkchoice_state, maybe_payload_attributes)
|
||||
.await
|
||||
} else {
|
||||
Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated"))
|
||||
|
@ -2,10 +2,12 @@ use super::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use strum::EnumString;
|
||||
use superstruct::superstruct;
|
||||
use types::beacon_block_body::KzgCommitments;
|
||||
use types::blob_sidecar::BlobsList;
|
||||
use types::{
|
||||
EthSpec, ExecutionBlockHash, FixedVector, Transactions, Unsigned, VariableList, Withdrawal,
|
||||
EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadDeneb,
|
||||
ExecutionPayloadMerge, FixedVector, Transactions, Unsigned, VariableList, Withdrawal,
|
||||
};
|
||||
use types::{ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge};
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
@ -61,7 +63,7 @@ pub struct JsonPayloadIdResponse {
|
||||
}
|
||||
|
||||
#[superstruct(
|
||||
variants(V1, V2),
|
||||
variants(V1, V2, V3),
|
||||
variant_attributes(
|
||||
derive(Debug, PartialEq, Default, Serialize, Deserialize,),
|
||||
serde(bound = "T: EthSpec", rename_all = "camelCase"),
|
||||
@ -94,8 +96,14 @@ pub struct JsonExecutionPayload<T: EthSpec> {
|
||||
pub block_hash: ExecutionBlockHash,
|
||||
#[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")]
|
||||
pub transactions: Transactions<T>,
|
||||
#[superstruct(only(V2))]
|
||||
#[superstruct(only(V2, V3))]
|
||||
pub withdrawals: VariableList<JsonWithdrawal, T::MaxWithdrawalsPerPayload>,
|
||||
#[superstruct(only(V3))]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub blob_gas_used: u64,
|
||||
#[superstruct(only(V3))]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub excess_blob_gas: u64,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> From<ExecutionPayloadMerge<T>> for JsonExecutionPayloadV1<T> {
|
||||
@ -144,12 +152,41 @@ impl<T: EthSpec> From<ExecutionPayloadCapella<T>> for JsonExecutionPayloadV2<T>
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<T: EthSpec> From<ExecutionPayloadDeneb<T>> for JsonExecutionPayloadV3<T> {
|
||||
fn from(payload: ExecutionPayloadDeneb<T>) -> Self {
|
||||
JsonExecutionPayloadV3 {
|
||||
parent_hash: payload.parent_hash,
|
||||
fee_recipient: payload.fee_recipient,
|
||||
state_root: payload.state_root,
|
||||
receipts_root: payload.receipts_root,
|
||||
logs_bloom: payload.logs_bloom,
|
||||
prev_randao: payload.prev_randao,
|
||||
block_number: payload.block_number,
|
||||
gas_limit: payload.gas_limit,
|
||||
gas_used: payload.gas_used,
|
||||
timestamp: payload.timestamp,
|
||||
extra_data: payload.extra_data,
|
||||
base_fee_per_gas: payload.base_fee_per_gas,
|
||||
block_hash: payload.block_hash,
|
||||
transactions: payload.transactions,
|
||||
withdrawals: payload
|
||||
.withdrawals
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.collect::<Vec<_>>()
|
||||
.into(),
|
||||
blob_gas_used: payload.blob_gas_used,
|
||||
excess_blob_gas: payload.excess_blob_gas,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> From<ExecutionPayload<T>> for JsonExecutionPayload<T> {
|
||||
fn from(execution_payload: ExecutionPayload<T>) -> Self {
|
||||
match execution_payload {
|
||||
ExecutionPayload::Merge(payload) => JsonExecutionPayload::V1(payload.into()),
|
||||
ExecutionPayload::Capella(payload) => JsonExecutionPayload::V2(payload.into()),
|
||||
ExecutionPayload::Deneb(payload) => JsonExecutionPayload::V3(payload.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -200,18 +237,47 @@ impl<T: EthSpec> From<JsonExecutionPayloadV2<T>> for ExecutionPayloadCapella<T>
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<T: EthSpec> From<JsonExecutionPayloadV3<T>> for ExecutionPayloadDeneb<T> {
|
||||
fn from(payload: JsonExecutionPayloadV3<T>) -> Self {
|
||||
ExecutionPayloadDeneb {
|
||||
parent_hash: payload.parent_hash,
|
||||
fee_recipient: payload.fee_recipient,
|
||||
state_root: payload.state_root,
|
||||
receipts_root: payload.receipts_root,
|
||||
logs_bloom: payload.logs_bloom,
|
||||
prev_randao: payload.prev_randao,
|
||||
block_number: payload.block_number,
|
||||
gas_limit: payload.gas_limit,
|
||||
gas_used: payload.gas_used,
|
||||
timestamp: payload.timestamp,
|
||||
extra_data: payload.extra_data,
|
||||
base_fee_per_gas: payload.base_fee_per_gas,
|
||||
block_hash: payload.block_hash,
|
||||
transactions: payload.transactions,
|
||||
withdrawals: payload
|
||||
.withdrawals
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.collect::<Vec<_>>()
|
||||
.into(),
|
||||
blob_gas_used: payload.blob_gas_used,
|
||||
excess_blob_gas: payload.excess_blob_gas,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> From<JsonExecutionPayload<T>> for ExecutionPayload<T> {
|
||||
fn from(json_execution_payload: JsonExecutionPayload<T>) -> Self {
|
||||
match json_execution_payload {
|
||||
JsonExecutionPayload::V1(payload) => ExecutionPayload::Merge(payload.into()),
|
||||
JsonExecutionPayload::V2(payload) => ExecutionPayload::Capella(payload.into()),
|
||||
JsonExecutionPayload::V3(payload) => ExecutionPayload::Deneb(payload.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[superstruct(
|
||||
variants(V1, V2),
|
||||
variants(V1, V2, V3),
|
||||
variant_attributes(
|
||||
derive(Debug, PartialEq, Serialize, Deserialize),
|
||||
serde(bound = "T: EthSpec", rename_all = "camelCase")
|
||||
@ -226,8 +292,14 @@ pub struct JsonGetPayloadResponse<T: EthSpec> {
|
||||
pub execution_payload: JsonExecutionPayloadV1<T>,
|
||||
#[superstruct(only(V2), partial_getter(rename = "execution_payload_v2"))]
|
||||
pub execution_payload: JsonExecutionPayloadV2<T>,
|
||||
#[superstruct(only(V3), partial_getter(rename = "execution_payload_v3"))]
|
||||
pub execution_payload: JsonExecutionPayloadV3<T>,
|
||||
#[serde(with = "serde_utils::u256_hex_be")]
|
||||
pub block_value: Uint256,
|
||||
#[superstruct(only(V3))]
|
||||
pub blobs_bundle: JsonBlobsBundleV1<T>,
|
||||
#[superstruct(only(V3))]
|
||||
pub should_override_builder: bool,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> From<JsonGetPayloadResponse<T>> for GetPayloadResponse<T> {
|
||||
@ -245,6 +317,14 @@ impl<T: EthSpec> From<JsonGetPayloadResponse<T>> for GetPayloadResponse<T> {
|
||||
block_value: response.block_value,
|
||||
})
|
||||
}
|
||||
JsonGetPayloadResponse::V3(response) => {
|
||||
GetPayloadResponse::Deneb(GetPayloadResponseDeneb {
|
||||
execution_payload: response.execution_payload.into(),
|
||||
block_value: response.block_value,
|
||||
blobs_bundle: response.blobs_bundle.into(),
|
||||
should_override_builder: response.should_override_builder,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -284,7 +364,7 @@ impl From<JsonWithdrawal> for Withdrawal {
|
||||
}
|
||||
|
||||
#[superstruct(
|
||||
variants(V1, V2),
|
||||
variants(V1, V2, V3),
|
||||
variant_attributes(
|
||||
derive(Debug, Clone, PartialEq, Serialize, Deserialize),
|
||||
serde(rename_all = "camelCase")
|
||||
@ -299,8 +379,10 @@ pub struct JsonPayloadAttributes {
|
||||
pub timestamp: u64,
|
||||
pub prev_randao: Hash256,
|
||||
pub suggested_fee_recipient: Address,
|
||||
#[superstruct(only(V2))]
|
||||
#[superstruct(only(V2, V3))]
|
||||
pub withdrawals: Vec<JsonWithdrawal>,
|
||||
#[superstruct(only(V3))]
|
||||
pub parent_beacon_block_root: Hash256,
|
||||
}
|
||||
|
||||
impl From<PayloadAttributes> for JsonPayloadAttributes {
|
||||
@ -317,6 +399,13 @@ impl From<PayloadAttributes> for JsonPayloadAttributes {
|
||||
suggested_fee_recipient: pa.suggested_fee_recipient,
|
||||
withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(),
|
||||
}),
|
||||
PayloadAttributes::V3(pa) => Self::V3(JsonPayloadAttributesV3 {
|
||||
timestamp: pa.timestamp,
|
||||
prev_randao: pa.prev_randao,
|
||||
suggested_fee_recipient: pa.suggested_fee_recipient,
|
||||
withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(),
|
||||
parent_beacon_block_root: pa.parent_beacon_block_root,
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -335,6 +424,41 @@ impl From<JsonPayloadAttributes> for PayloadAttributes {
|
||||
suggested_fee_recipient: jpa.suggested_fee_recipient,
|
||||
withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(),
|
||||
}),
|
||||
JsonPayloadAttributes::V3(jpa) => Self::V3(PayloadAttributesV3 {
|
||||
timestamp: jpa.timestamp,
|
||||
prev_randao: jpa.prev_randao,
|
||||
suggested_fee_recipient: jpa.suggested_fee_recipient,
|
||||
withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(),
|
||||
parent_beacon_block_root: jpa.parent_beacon_block_root,
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(bound = "E: EthSpec", rename_all = "camelCase")]
|
||||
pub struct JsonBlobsBundleV1<E: EthSpec> {
|
||||
pub commitments: KzgCommitments<E>,
|
||||
pub proofs: KzgProofs<E>,
|
||||
#[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")]
|
||||
pub blobs: BlobsList<E>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> From<BlobsBundle<E>> for JsonBlobsBundleV1<E> {
|
||||
fn from(blobs_bundle: BlobsBundle<E>) -> Self {
|
||||
Self {
|
||||
commitments: blobs_bundle.commitments,
|
||||
proofs: blobs_bundle.proofs,
|
||||
blobs: blobs_bundle.blobs,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<E: EthSpec> From<JsonBlobsBundleV1<E>> for BlobsBundle<E> {
|
||||
fn from(json_blobs_bundle: JsonBlobsBundleV1<E>) -> Self {
|
||||
Self {
|
||||
commitments: json_blobs_bundle.commitments,
|
||||
proofs: json_blobs_bundle.proofs,
|
||||
blobs: json_blobs_bundle.blobs,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -14,7 +14,9 @@ pub use engine_api::*;
|
||||
pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc};
|
||||
use engines::{Engine, EngineError};
|
||||
pub use engines::{EngineState, ForkchoiceState};
|
||||
use eth2::types::builder_bid::SignedBuilderBid;
|
||||
use eth2::types::{builder_bid::SignedBuilderBid, BlobsBundle, ForkVersionedResponse};
|
||||
use eth2::types::{FullPayloadContents, SignedBlockContents};
|
||||
use ethers_core::types::Transaction as EthersTransaction;
|
||||
use fork_choice::ForkchoiceUpdateParameters;
|
||||
use lru::LruCache;
|
||||
use payload_status::process_payload_status;
|
||||
@ -27,7 +29,6 @@ use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::future::Future;
|
||||
use std::io::Write;
|
||||
use std::marker::PhantomData;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
@ -39,12 +40,15 @@ use tokio::{
|
||||
};
|
||||
use tokio_stream::wrappers::WatchStream;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{AbstractExecPayload, BeaconStateError, ExecPayload};
|
||||
use types::beacon_block_body::KzgCommitments;
|
||||
use types::builder_bid::BuilderBid;
|
||||
use types::sidecar::{BlobItems, Sidecar};
|
||||
use types::KzgProofs;
|
||||
use types::{
|
||||
BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionPayloadCapella, ExecutionPayloadMerge,
|
||||
ForkVersionedResponse, ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock,
|
||||
Slot,
|
||||
AbstractExecPayload, BeaconStateError, BlindedPayload, BlockType, ChainSpec, Epoch,
|
||||
ExecPayload, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge,
|
||||
};
|
||||
use types::{ProposerPreparationData, PublicKeyBytes, Signature, Slot};
|
||||
|
||||
mod block_hash;
|
||||
mod engine_api;
|
||||
@ -83,6 +87,40 @@ pub enum ProvenancedPayload<P> {
|
||||
Builder(P),
|
||||
}
|
||||
|
||||
impl<E: EthSpec, Payload: AbstractExecPayload<E>> TryFrom<BuilderBid<E>>
|
||||
for ProvenancedPayload<BlockProposalContents<E, Payload>>
|
||||
{
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: BuilderBid<E>) -> Result<Self, Error> {
|
||||
let block_proposal_contents = match value {
|
||||
BuilderBid::Merge(builder_bid) => BlockProposalContents::Payload {
|
||||
payload: ExecutionPayloadHeader::Merge(builder_bid.header)
|
||||
.try_into()
|
||||
.map_err(|_| Error::InvalidPayloadConversion)?,
|
||||
block_value: builder_bid.value,
|
||||
},
|
||||
BuilderBid::Capella(builder_bid) => BlockProposalContents::Payload {
|
||||
payload: ExecutionPayloadHeader::Capella(builder_bid.header)
|
||||
.try_into()
|
||||
.map_err(|_| Error::InvalidPayloadConversion)?,
|
||||
block_value: builder_bid.value,
|
||||
},
|
||||
BuilderBid::Deneb(builder_bid) => BlockProposalContents::PayloadAndBlobs {
|
||||
payload: ExecutionPayloadHeader::Deneb(builder_bid.header)
|
||||
.try_into()
|
||||
.map_err(|_| Error::InvalidPayloadConversion)?,
|
||||
block_value: builder_bid.value,
|
||||
kzg_commitments: builder_bid.blinded_blobs_bundle.commitments,
|
||||
blobs: BlobItems::try_from_blob_roots(builder_bid.blinded_blobs_bundle.blob_roots)
|
||||
.map_err(Error::InvalidBlobConversion)?,
|
||||
proofs: builder_bid.blinded_blobs_bundle.proofs,
|
||||
},
|
||||
};
|
||||
Ok(ProvenancedPayload::Builder(block_proposal_contents))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
NoEngine,
|
||||
@ -104,6 +142,8 @@ pub enum Error {
|
||||
InvalidJWTSecret(String),
|
||||
InvalidForkForPayload,
|
||||
InvalidPayloadBody(String),
|
||||
InvalidPayloadConversion,
|
||||
InvalidBlobConversion(String),
|
||||
BeaconStateError(BeaconStateError),
|
||||
}
|
||||
|
||||
@ -123,37 +163,81 @@ pub enum BlockProposalContents<T: EthSpec, Payload: AbstractExecPayload<T>> {
|
||||
Payload {
|
||||
payload: Payload,
|
||||
block_value: Uint256,
|
||||
// TODO: remove for 4844, since it appears in PayloadAndBlobs
|
||||
_phantom: PhantomData<T>,
|
||||
},
|
||||
PayloadAndBlobs {
|
||||
payload: Payload,
|
||||
block_value: Uint256,
|
||||
kzg_commitments: KzgCommitments<T>,
|
||||
blobs: <Payload::Sidecar as Sidecar<T>>::BlobItems,
|
||||
proofs: KzgProofs<T>,
|
||||
},
|
||||
}
|
||||
|
||||
impl<E: EthSpec, Payload: AbstractExecPayload<E>> TryFrom<GetPayloadResponse<E>>
|
||||
for BlockProposalContents<E, Payload>
|
||||
{
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(response: GetPayloadResponse<E>) -> Result<Self, Error> {
|
||||
let (execution_payload, block_value, maybe_bundle) = response.into();
|
||||
match maybe_bundle {
|
||||
Some(bundle) => Ok(Self::PayloadAndBlobs {
|
||||
payload: execution_payload.into(),
|
||||
block_value,
|
||||
kzg_commitments: bundle.commitments,
|
||||
blobs: BlobItems::try_from_blobs(bundle.blobs)
|
||||
.map_err(Error::InvalidBlobConversion)?,
|
||||
proofs: bundle.proofs,
|
||||
}),
|
||||
None => Ok(Self::Payload {
|
||||
payload: execution_payload.into(),
|
||||
block_value,
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Payload> {
|
||||
pub fn payload(&self) -> &Payload {
|
||||
pub fn deconstruct(
|
||||
self,
|
||||
) -> (
|
||||
Payload,
|
||||
Option<KzgCommitments<T>>,
|
||||
Option<<Payload::Sidecar as Sidecar<T>>::BlobItems>,
|
||||
Option<KzgProofs<T>>,
|
||||
) {
|
||||
match self {
|
||||
Self::Payload {
|
||||
payload,
|
||||
block_value: _,
|
||||
_phantom: _,
|
||||
} => payload,
|
||||
} => (payload, None, None, None),
|
||||
Self::PayloadAndBlobs {
|
||||
payload,
|
||||
block_value: _,
|
||||
kzg_commitments,
|
||||
blobs,
|
||||
proofs,
|
||||
} => (payload, Some(kzg_commitments), Some(blobs), Some(proofs)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn payload(&self) -> &Payload {
|
||||
match self {
|
||||
Self::Payload { payload, .. } => payload,
|
||||
Self::PayloadAndBlobs { payload, .. } => payload,
|
||||
}
|
||||
}
|
||||
pub fn to_payload(self) -> Payload {
|
||||
match self {
|
||||
Self::Payload {
|
||||
payload,
|
||||
block_value: _,
|
||||
_phantom: _,
|
||||
} => payload,
|
||||
Self::Payload { payload, .. } => payload,
|
||||
Self::PayloadAndBlobs { payload, .. } => payload,
|
||||
}
|
||||
}
|
||||
pub fn block_value(&self) -> &Uint256 {
|
||||
match self {
|
||||
Self::Payload {
|
||||
payload: _,
|
||||
block_value,
|
||||
_phantom: _,
|
||||
} => block_value,
|
||||
Self::Payload { block_value, .. } => block_value,
|
||||
Self::PayloadAndBlobs { block_value, .. } => block_value,
|
||||
}
|
||||
}
|
||||
pub fn default_at_fork(fork_name: ForkName) -> Result<Self, BeaconStateError> {
|
||||
@ -162,9 +246,15 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Paylo
|
||||
BlockProposalContents::Payload {
|
||||
payload: Payload::default_at_fork(fork_name)?,
|
||||
block_value: Uint256::zero(),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
ForkName::Deneb => BlockProposalContents::PayloadAndBlobs {
|
||||
payload: Payload::default_at_fork(fork_name)?,
|
||||
block_value: Uint256::zero(),
|
||||
blobs: Payload::default_blobs_at_fork(fork_name)?,
|
||||
kzg_commitments: VariableList::default(),
|
||||
proofs: VariableList::default(),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -208,6 +298,8 @@ pub enum FailedCondition {
|
||||
EpochsSinceFinalization,
|
||||
}
|
||||
|
||||
type PayloadContentsRefTuple<'a, T> = (ExecutionPayloadRef<'a, T>, Option<&'a BlobsBundle<T>>);
|
||||
|
||||
struct Inner<E: EthSpec> {
|
||||
engine: Arc<Engine>,
|
||||
builder: ArcSwapOption<BuilderHttpClient>,
|
||||
@ -221,6 +313,7 @@ struct Inner<E: EthSpec> {
|
||||
builder_profit_threshold: Uint256,
|
||||
log: Logger,
|
||||
always_prefer_builder_payload: bool,
|
||||
ignore_builder_override_suggestion_threshold: f32,
|
||||
/// Track whether the last `newPayload` call errored.
|
||||
///
|
||||
/// This is used *only* in the informational sync status endpoint, so that a VC using this
|
||||
@ -251,6 +344,7 @@ pub struct Config {
|
||||
pub builder_profit_threshold: u128,
|
||||
pub execution_timeout_multiplier: Option<u32>,
|
||||
pub always_prefer_builder_payload: bool,
|
||||
pub ignore_builder_override_suggestion_threshold: f32,
|
||||
}
|
||||
|
||||
/// Provides access to one execution engine and provides a neat interface for consumption by the
|
||||
@ -260,6 +354,40 @@ pub struct ExecutionLayer<T: EthSpec> {
|
||||
inner: Arc<Inner<T>>,
|
||||
}
|
||||
|
||||
/// This function will return the percentage difference between 2 U256 values, using `base_value`
|
||||
/// as the denominator. It is accurate to 7 decimal places which is about the precision of
|
||||
/// an f32.
|
||||
///
|
||||
/// If some error is encountered in the calculation, None will be returned.
|
||||
fn percentage_difference_u256(base_value: Uint256, comparison_value: Uint256) -> Option<f32> {
|
||||
if base_value == Uint256::zero() {
|
||||
return None;
|
||||
}
|
||||
// this is the total supply of ETH in WEI
|
||||
let max_value = Uint256::from(12u8) * Uint256::exp10(25);
|
||||
if base_value > max_value || comparison_value > max_value {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Now we should be able to calculate the difference without division by zero or overflow
|
||||
const PRECISION: usize = 7;
|
||||
let precision_factor = Uint256::exp10(PRECISION);
|
||||
let scaled_difference = if base_value <= comparison_value {
|
||||
(comparison_value - base_value) * precision_factor
|
||||
} else {
|
||||
(base_value - comparison_value) * precision_factor
|
||||
};
|
||||
let scaled_proportion = scaled_difference / base_value;
|
||||
// max value of scaled difference is 1.2 * 10^33, well below the max value of a u128 / f64 / f32
|
||||
let percentage =
|
||||
100.0f64 * scaled_proportion.low_u128() as f64 / precision_factor.low_u128() as f64;
|
||||
if base_value <= comparison_value {
|
||||
Some(percentage as f32)
|
||||
} else {
|
||||
Some(-percentage as f32)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> ExecutionLayer<T> {
|
||||
/// Instantiate `Self` with an Execution engine specified in `Config`, using JSON-RPC via HTTP.
|
||||
pub fn from_config(config: Config, executor: TaskExecutor, log: Logger) -> Result<Self, Error> {
|
||||
@ -275,6 +403,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
builder_profit_threshold,
|
||||
execution_timeout_multiplier,
|
||||
always_prefer_builder_payload,
|
||||
ignore_builder_override_suggestion_threshold,
|
||||
} = config;
|
||||
|
||||
if urls.len() > 1 {
|
||||
@ -338,6 +467,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
builder_profit_threshold: Uint256::from(builder_profit_threshold),
|
||||
log,
|
||||
always_prefer_builder_payload,
|
||||
ignore_builder_override_suggestion_threshold,
|
||||
last_new_payload_errored: RwLock::new(false),
|
||||
};
|
||||
|
||||
@ -383,12 +513,28 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
}
|
||||
|
||||
/// Cache a full payload, keyed on the `tree_hash_root` of the payload
|
||||
fn cache_payload(&self, payload: ExecutionPayloadRef<T>) -> Option<ExecutionPayload<T>> {
|
||||
self.inner.payload_cache.put(payload.clone_from_ref())
|
||||
fn cache_payload(
|
||||
&self,
|
||||
payload_and_blobs: PayloadContentsRefTuple<T>,
|
||||
) -> Option<FullPayloadContents<T>> {
|
||||
let (payload_ref, maybe_json_blobs_bundle) = payload_and_blobs;
|
||||
|
||||
let payload = payload_ref.clone_from_ref();
|
||||
let maybe_blobs_bundle = maybe_json_blobs_bundle
|
||||
.cloned()
|
||||
.map(|blobs_bundle| BlobsBundle {
|
||||
commitments: blobs_bundle.commitments,
|
||||
proofs: blobs_bundle.proofs,
|
||||
blobs: blobs_bundle.blobs,
|
||||
});
|
||||
|
||||
self.inner
|
||||
.payload_cache
|
||||
.put(FullPayloadContents::new(payload, maybe_blobs_bundle))
|
||||
}
|
||||
|
||||
/// Attempt to retrieve a full payload from the payload cache by the payload root
|
||||
pub fn get_payload_by_root(&self, root: &Hash256) -> Option<ExecutionPayload<T>> {
|
||||
pub fn get_payload_by_root(&self, root: &Hash256) -> Option<FullPayloadContents<T>> {
|
||||
self.inner.payload_cache.get(root)
|
||||
}
|
||||
|
||||
@ -686,6 +832,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
current_fork,
|
||||
)
|
||||
.await
|
||||
.and_then(GetPayloadResponse::try_into)
|
||||
.map(ProvenancedPayload::Local)
|
||||
}
|
||||
};
|
||||
@ -751,11 +898,11 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
let ((relay_result, relay_duration), (local_result, local_duration)) = tokio::join!(
|
||||
timed_future(metrics::GET_BLINDED_PAYLOAD_BUILDER, async {
|
||||
builder
|
||||
.get_builder_header::<T, Payload>(slot, parent_hash, &pubkey)
|
||||
.get_builder_header::<T>(slot, parent_hash, &pubkey)
|
||||
.await
|
||||
}),
|
||||
timed_future(metrics::GET_BLINDED_PAYLOAD_LOCAL, async {
|
||||
self.get_full_payload_caching::<Payload>(
|
||||
self.get_full_payload_caching(
|
||||
parent_hash,
|
||||
payload_attributes,
|
||||
forkchoice_update_params,
|
||||
@ -769,13 +916,13 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
self.log(),
|
||||
"Requested blinded execution payload";
|
||||
"relay_fee_recipient" => match &relay_result {
|
||||
Ok(Some(r)) => format!("{:?}", r.data.message.header.fee_recipient()),
|
||||
Ok(Some(r)) => format!("{:?}", r.data.message.header().fee_recipient()),
|
||||
Ok(None) => "empty response".to_string(),
|
||||
Err(_) => "request failed".to_string(),
|
||||
},
|
||||
"relay_response_ms" => relay_duration.as_millis(),
|
||||
"local_fee_recipient" => match &local_result {
|
||||
Ok(proposal_contents) => format!("{:?}", proposal_contents.payload().fee_recipient()),
|
||||
Ok(get_payload_response) => format!("{:?}", get_payload_response.fee_recipient()),
|
||||
Err(_) => "request failed".to_string()
|
||||
},
|
||||
"local_response_ms" => local_duration.as_millis(),
|
||||
@ -789,43 +936,61 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
"Builder error when requesting payload";
|
||||
"info" => "falling back to local execution client",
|
||||
"relay_error" => ?e,
|
||||
"local_block_hash" => ?local.payload().block_hash(),
|
||||
"local_block_hash" => ?local.block_hash(),
|
||||
"parent_hash" => ?parent_hash,
|
||||
);
|
||||
Ok(ProvenancedPayload::Local(local))
|
||||
Ok(ProvenancedPayload::Local(local.try_into()?))
|
||||
}
|
||||
(Ok(None), Ok(local)) => {
|
||||
info!(
|
||||
self.log(),
|
||||
"Builder did not return a payload";
|
||||
"info" => "falling back to local execution client",
|
||||
"local_block_hash" => ?local.payload().block_hash(),
|
||||
"local_block_hash" => ?local.block_hash(),
|
||||
"parent_hash" => ?parent_hash,
|
||||
);
|
||||
Ok(ProvenancedPayload::Local(local))
|
||||
Ok(ProvenancedPayload::Local(local.try_into()?))
|
||||
}
|
||||
(Ok(Some(relay)), Ok(local)) => {
|
||||
let header = &relay.data.message.header;
|
||||
let header = &relay.data.message.header();
|
||||
|
||||
info!(
|
||||
self.log(),
|
||||
"Received local and builder payloads";
|
||||
"relay_block_hash" => ?header.block_hash(),
|
||||
"local_block_hash" => ?local.payload().block_hash(),
|
||||
"local_block_hash" => ?local.block_hash(),
|
||||
"parent_hash" => ?parent_hash,
|
||||
);
|
||||
|
||||
let relay_value = relay.data.message.value;
|
||||
let relay_value = relay.data.message.value();
|
||||
let local_value = *local.block_value();
|
||||
|
||||
if !self.inner.always_prefer_builder_payload {
|
||||
if local_value >= relay_value {
|
||||
if local_value >= *relay_value {
|
||||
info!(
|
||||
self.log(),
|
||||
"Local block is more profitable than relay block";
|
||||
"local_block_value" => %local_value,
|
||||
"relay_value" => %relay_value
|
||||
);
|
||||
return Ok(ProvenancedPayload::Local(local));
|
||||
return Ok(ProvenancedPayload::Local(local.try_into()?));
|
||||
} else if local.should_override_builder().unwrap_or(false) {
|
||||
let percentage_difference =
|
||||
percentage_difference_u256(local_value, *relay_value);
|
||||
if percentage_difference.map_or(false, |percentage| {
|
||||
percentage
|
||||
< self
|
||||
.inner
|
||||
.ignore_builder_override_suggestion_threshold
|
||||
}) {
|
||||
info!(
|
||||
self.log(),
|
||||
"Using local payload because execution engine suggested we ignore builder payload";
|
||||
"local_block_value" => %local_value,
|
||||
"relay_value" => %relay_value
|
||||
);
|
||||
return Ok(ProvenancedPayload::Local(local.try_into()?));
|
||||
}
|
||||
} else {
|
||||
info!(
|
||||
self.log(),
|
||||
@ -840,18 +1005,12 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
&relay,
|
||||
parent_hash,
|
||||
payload_attributes,
|
||||
Some(local.payload().block_number()),
|
||||
Some(local.block_number()),
|
||||
self.inner.builder_profit_threshold,
|
||||
current_fork,
|
||||
spec,
|
||||
) {
|
||||
Ok(()) => Ok(ProvenancedPayload::Builder(
|
||||
BlockProposalContents::Payload {
|
||||
payload: relay.data.message.header,
|
||||
block_value: relay.data.message.value,
|
||||
_phantom: PhantomData,
|
||||
},
|
||||
)),
|
||||
Ok(()) => Ok(ProvenancedPayload::try_from(relay.data.message)?),
|
||||
Err(reason) if !reason.payload_invalid() => {
|
||||
info!(
|
||||
self.log(),
|
||||
@ -861,7 +1020,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
"relay_block_hash" => ?header.block_hash(),
|
||||
"parent_hash" => ?parent_hash,
|
||||
);
|
||||
Ok(ProvenancedPayload::Local(local))
|
||||
Ok(ProvenancedPayload::Local(local.try_into()?))
|
||||
}
|
||||
Err(reason) => {
|
||||
metrics::inc_counter_vec(
|
||||
@ -876,12 +1035,12 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
"relay_block_hash" => ?header.block_hash(),
|
||||
"parent_hash" => ?parent_hash,
|
||||
);
|
||||
Ok(ProvenancedPayload::Local(local))
|
||||
Ok(ProvenancedPayload::Local(local.try_into()?))
|
||||
}
|
||||
}
|
||||
}
|
||||
(Ok(Some(relay)), Err(local_error)) => {
|
||||
let header = &relay.data.message.header;
|
||||
let header = &relay.data.message.header();
|
||||
|
||||
info!(
|
||||
self.log(),
|
||||
@ -900,22 +1059,12 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
current_fork,
|
||||
spec,
|
||||
) {
|
||||
Ok(()) => Ok(ProvenancedPayload::Builder(
|
||||
BlockProposalContents::Payload {
|
||||
payload: relay.data.message.header,
|
||||
block_value: relay.data.message.value,
|
||||
_phantom: PhantomData,
|
||||
},
|
||||
)),
|
||||
Ok(()) => Ok(ProvenancedPayload::try_from(relay.data.message)?),
|
||||
// If the payload is valid then use it. The local EE failed
|
||||
// to produce a payload so we have no alternative.
|
||||
Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder(
|
||||
BlockProposalContents::Payload {
|
||||
payload: relay.data.message.header,
|
||||
block_value: relay.data.message.value,
|
||||
_phantom: PhantomData,
|
||||
},
|
||||
)),
|
||||
Err(e) if !e.payload_invalid() => {
|
||||
Ok(ProvenancedPayload::try_from(relay.data.message)?)
|
||||
}
|
||||
Err(reason) => {
|
||||
metrics::inc_counter_vec(
|
||||
&metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS,
|
||||
@ -983,17 +1132,18 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
current_fork,
|
||||
)
|
||||
.await
|
||||
.and_then(GetPayloadResponse::try_into)
|
||||
.map(ProvenancedPayload::Local)
|
||||
}
|
||||
|
||||
/// Get a full payload without caching its result in the execution layer's payload cache.
|
||||
async fn get_full_payload<Payload: AbstractExecPayload<T>>(
|
||||
async fn get_full_payload(
|
||||
&self,
|
||||
parent_hash: ExecutionBlockHash,
|
||||
payload_attributes: &PayloadAttributes,
|
||||
forkchoice_update_params: ForkchoiceUpdateParameters,
|
||||
current_fork: ForkName,
|
||||
) -> Result<BlockProposalContents<T, Payload>, Error> {
|
||||
) -> Result<GetPayloadResponse<T>, Error> {
|
||||
self.get_full_payload_with(
|
||||
parent_hash,
|
||||
payload_attributes,
|
||||
@ -1005,13 +1155,13 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
}
|
||||
|
||||
/// Get a full payload and cache its result in the execution layer's payload cache.
|
||||
async fn get_full_payload_caching<Payload: AbstractExecPayload<T>>(
|
||||
async fn get_full_payload_caching(
|
||||
&self,
|
||||
parent_hash: ExecutionBlockHash,
|
||||
payload_attributes: &PayloadAttributes,
|
||||
forkchoice_update_params: ForkchoiceUpdateParameters,
|
||||
current_fork: ForkName,
|
||||
) -> Result<BlockProposalContents<T, Payload>, Error> {
|
||||
) -> Result<GetPayloadResponse<T>, Error> {
|
||||
self.get_full_payload_with(
|
||||
parent_hash,
|
||||
payload_attributes,
|
||||
@ -1022,14 +1172,17 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_full_payload_with<Payload: AbstractExecPayload<T>>(
|
||||
async fn get_full_payload_with(
|
||||
&self,
|
||||
parent_hash: ExecutionBlockHash,
|
||||
payload_attributes: &PayloadAttributes,
|
||||
forkchoice_update_params: ForkchoiceUpdateParameters,
|
||||
current_fork: ForkName,
|
||||
f: fn(&ExecutionLayer<T>, ExecutionPayloadRef<T>) -> Option<ExecutionPayload<T>>,
|
||||
) -> Result<BlockProposalContents<T, Payload>, Error> {
|
||||
cache_fn: fn(
|
||||
&ExecutionLayer<T>,
|
||||
PayloadContentsRefTuple<T>,
|
||||
) -> Option<FullPayloadContents<T>>,
|
||||
) -> Result<GetPayloadResponse<T>, Error> {
|
||||
self.engine()
|
||||
.request(move |engine| async move {
|
||||
let payload_id = if let Some(id) = engine
|
||||
@ -1082,7 +1235,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
}
|
||||
};
|
||||
|
||||
let payload_fut = async {
|
||||
let payload_response = async {
|
||||
debug!(
|
||||
self.log(),
|
||||
"Issuing engine_getPayload";
|
||||
@ -1092,36 +1245,30 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
"parent_hash" => ?parent_hash,
|
||||
);
|
||||
engine.api.get_payload::<T>(current_fork, payload_id).await
|
||||
};
|
||||
let payload_response = payload_fut.await;
|
||||
let (execution_payload, block_value) = payload_response.map(|payload_response| {
|
||||
if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() {
|
||||
error!(
|
||||
self.log(),
|
||||
"Inconsistent fee recipient";
|
||||
"msg" => "The fee recipient returned from the Execution Engine differs \
|
||||
from the suggested_fee_recipient set on the beacon node. This could \
|
||||
indicate that fees are being diverted to another address. Please \
|
||||
ensure that the value of suggested_fee_recipient is set correctly and \
|
||||
that the Execution Engine is trusted.",
|
||||
"fee_recipient" => ?payload_response.execution_payload_ref().fee_recipient(),
|
||||
"suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(),
|
||||
);
|
||||
}
|
||||
if f(self, payload_response.execution_payload_ref()).is_some() {
|
||||
warn!(
|
||||
self.log(),
|
||||
"Duplicate payload cached, this might indicate redundant proposal \
|
||||
attempts."
|
||||
);
|
||||
}
|
||||
payload_response.into()
|
||||
})?;
|
||||
Ok(BlockProposalContents::Payload {
|
||||
payload: execution_payload.into(),
|
||||
block_value,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}.await?;
|
||||
|
||||
if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() {
|
||||
error!(
|
||||
self.log(),
|
||||
"Inconsistent fee recipient";
|
||||
"msg" => "The fee recipient returned from the Execution Engine differs \
|
||||
from the suggested_fee_recipient set on the beacon node. This could \
|
||||
indicate that fees are being diverted to another address. Please \
|
||||
ensure that the value of suggested_fee_recipient is set correctly and \
|
||||
that the Execution Engine is trusted.",
|
||||
"fee_recipient" => ?payload_response.execution_payload_ref().fee_recipient(),
|
||||
"suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(),
|
||||
);
|
||||
}
|
||||
if cache_fn(self, (payload_response.execution_payload_ref(), payload_response.blobs_bundle().ok())).is_some() {
|
||||
warn!(
|
||||
self.log(),
|
||||
"Duplicate payload cached, this might indicate redundant proposal \
|
||||
attempts."
|
||||
);
|
||||
}
|
||||
|
||||
Ok(payload_response)
|
||||
})
|
||||
.await
|
||||
.map_err(Box::new)
|
||||
@ -1131,24 +1278,25 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
/// Maps to the `engine_newPayload` JSON-RPC call.
|
||||
pub async fn notify_new_payload(
|
||||
&self,
|
||||
execution_payload: &ExecutionPayload<T>,
|
||||
new_payload_request: NewPayloadRequest<T>,
|
||||
) -> Result<PayloadStatus, Error> {
|
||||
let _timer = metrics::start_timer_vec(
|
||||
&metrics::EXECUTION_LAYER_REQUEST_TIMES,
|
||||
&[metrics::NEW_PAYLOAD],
|
||||
);
|
||||
|
||||
let block_hash = new_payload_request.block_hash();
|
||||
trace!(
|
||||
self.log(),
|
||||
"Issuing engine_newPayload";
|
||||
"parent_hash" => ?execution_payload.parent_hash(),
|
||||
"block_hash" => ?execution_payload.block_hash(),
|
||||
"block_number" => execution_payload.block_number(),
|
||||
"parent_hash" => ?new_payload_request.parent_hash(),
|
||||
"block_hash" => ?block_hash,
|
||||
"block_number" => ?new_payload_request.block_number(),
|
||||
);
|
||||
|
||||
let result = self
|
||||
.engine()
|
||||
.request(|engine| engine.api.new_payload(execution_payload.clone()))
|
||||
.request(|engine| engine.api.new_payload(new_payload_request))
|
||||
.await;
|
||||
|
||||
if let Ok(status) = &result {
|
||||
@ -1159,7 +1307,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
}
|
||||
*self.inner.last_new_payload_errored.write().await = result.is_err();
|
||||
|
||||
process_payload_status(execution_payload.block_hash(), result, self.log())
|
||||
process_payload_status(block_hash, result, self.log())
|
||||
.map_err(Box::new)
|
||||
.map_err(Error::EngineError)
|
||||
}
|
||||
@ -1576,6 +1724,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
let payload = match fork {
|
||||
ForkName::Merge => ExecutionPayloadMerge::default().into(),
|
||||
ForkName::Capella => ExecutionPayloadCapella::default().into(),
|
||||
ForkName::Deneb => ExecutionPayloadDeneb::default().into(),
|
||||
ForkName::Base | ForkName::Altair => {
|
||||
return Err(Error::InvalidForkForPayload);
|
||||
}
|
||||
@ -1643,6 +1792,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
return match fork {
|
||||
ForkName::Merge => Ok(Some(ExecutionPayloadMerge::default().into())),
|
||||
ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())),
|
||||
ForkName::Deneb => Ok(Some(ExecutionPayloadDeneb::default().into())),
|
||||
ForkName::Base | ForkName::Altair => Err(ApiError::UnsupportedForkVariant(
|
||||
format!("called get_payload_by_hash_from_engine with {}", fork),
|
||||
)),
|
||||
@ -1659,15 +1809,15 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let transactions = VariableList::new(
|
||||
block
|
||||
.transactions()
|
||||
.iter()
|
||||
.map(|transaction| VariableList::new(transaction.rlp().to_vec()))
|
||||
.collect::<Result<_, _>>()
|
||||
.map_err(ApiError::DeserializeTransaction)?,
|
||||
)
|
||||
.map_err(ApiError::DeserializeTransactions)?;
|
||||
let convert_transactions = |transactions: Vec<EthersTransaction>| {
|
||||
VariableList::new(
|
||||
transactions
|
||||
.into_iter()
|
||||
.map(|tx| VariableList::new(tx.rlp().to_vec()))
|
||||
.collect::<Result<Vec<_>, ssz_types::Error>>()?,
|
||||
)
|
||||
.map_err(ApiError::SszError)
|
||||
};
|
||||
|
||||
let payload = match block {
|
||||
ExecutionBlockWithTransactions::Merge(merge_block) => {
|
||||
@ -1685,7 +1835,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
extra_data: merge_block.extra_data,
|
||||
base_fee_per_gas: merge_block.base_fee_per_gas,
|
||||
block_hash: merge_block.block_hash,
|
||||
transactions,
|
||||
transactions: convert_transactions(merge_block.transactions)?,
|
||||
})
|
||||
}
|
||||
ExecutionBlockWithTransactions::Capella(capella_block) => {
|
||||
@ -1711,10 +1861,39 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
extra_data: capella_block.extra_data,
|
||||
base_fee_per_gas: capella_block.base_fee_per_gas,
|
||||
block_hash: capella_block.block_hash,
|
||||
transactions,
|
||||
transactions: convert_transactions(capella_block.transactions)?,
|
||||
withdrawals,
|
||||
})
|
||||
}
|
||||
ExecutionBlockWithTransactions::Deneb(deneb_block) => {
|
||||
let withdrawals = VariableList::new(
|
||||
deneb_block
|
||||
.withdrawals
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.collect(),
|
||||
)
|
||||
.map_err(ApiError::DeserializeWithdrawals)?;
|
||||
ExecutionPayload::Deneb(ExecutionPayloadDeneb {
|
||||
parent_hash: deneb_block.parent_hash,
|
||||
fee_recipient: deneb_block.fee_recipient,
|
||||
state_root: deneb_block.state_root,
|
||||
receipts_root: deneb_block.receipts_root,
|
||||
logs_bloom: deneb_block.logs_bloom,
|
||||
prev_randao: deneb_block.prev_randao,
|
||||
block_number: deneb_block.block_number,
|
||||
gas_limit: deneb_block.gas_limit,
|
||||
gas_used: deneb_block.gas_used,
|
||||
timestamp: deneb_block.timestamp,
|
||||
extra_data: deneb_block.extra_data,
|
||||
base_fee_per_gas: deneb_block.base_fee_per_gas,
|
||||
block_hash: deneb_block.block_hash,
|
||||
transactions: convert_transactions(deneb_block.transactions)?,
|
||||
withdrawals,
|
||||
blob_gas_used: deneb_block.blob_gas_used,
|
||||
excess_blob_gas: deneb_block.excess_blob_gas,
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Some(payload))
|
||||
@ -1723,8 +1902,8 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
pub async fn propose_blinded_beacon_block(
|
||||
&self,
|
||||
block_root: Hash256,
|
||||
block: &SignedBeaconBlock<T, BlindedPayload<T>>,
|
||||
) -> Result<ExecutionPayload<T>, Error> {
|
||||
block: &SignedBlockContents<T, BlindedPayload<T>>,
|
||||
) -> Result<FullPayloadContents<T>, Error> {
|
||||
debug!(
|
||||
self.log(),
|
||||
"Sending block to builder";
|
||||
@ -1743,11 +1922,12 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
.await;
|
||||
|
||||
match &payload_result {
|
||||
Ok(payload) => {
|
||||
Ok(unblinded_response) => {
|
||||
metrics::inc_counter_vec(
|
||||
&metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME,
|
||||
&[metrics::SUCCESS],
|
||||
);
|
||||
let payload = unblinded_response.payload_ref();
|
||||
info!(
|
||||
self.log(),
|
||||
"Builder successfully revealed payload";
|
||||
@ -1771,6 +1951,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
"relay_response_ms" => duration.as_millis(),
|
||||
"block_root" => ?block_root,
|
||||
"parent_hash" => ?block
|
||||
.signed_block()
|
||||
.message()
|
||||
.execution_payload()
|
||||
.map(|payload| format!("{}", payload.parent_hash()))
|
||||
@ -1889,8 +2070,8 @@ impl fmt::Display for InvalidBuilderPayload {
|
||||
}
|
||||
|
||||
/// Perform some cursory, non-exhaustive validation of the bid returned from the builder.
|
||||
fn verify_builder_bid<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
||||
bid: &ForkVersionedResponse<SignedBuilderBid<T, Payload>>,
|
||||
fn verify_builder_bid<T: EthSpec>(
|
||||
bid: &ForkVersionedResponse<SignedBuilderBid<T>>,
|
||||
parent_hash: ExecutionBlockHash,
|
||||
payload_attributes: &PayloadAttributes,
|
||||
block_number: Option<u64>,
|
||||
@ -1899,11 +2080,11 @@ fn verify_builder_bid<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), Box<InvalidBuilderPayload>> {
|
||||
let is_signature_valid = bid.data.verify_signature(spec);
|
||||
let header = &bid.data.message.header;
|
||||
let payload_value = bid.data.message.value;
|
||||
let header = &bid.data.message.header();
|
||||
let payload_value = bid.data.message.value();
|
||||
|
||||
// Avoid logging values that we can't represent with our Prometheus library.
|
||||
let payload_value_gwei = bid.data.message.value / 1_000_000_000;
|
||||
let payload_value_gwei = bid.data.message.value() / 1_000_000_000;
|
||||
if payload_value_gwei <= Uint256::from(i64::max_value()) {
|
||||
metrics::set_gauge_vec(
|
||||
&metrics::EXECUTION_LAYER_PAYLOAD_BIDS,
|
||||
@ -1917,12 +2098,12 @@ fn verify_builder_bid<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
||||
.ok()
|
||||
.cloned()
|
||||
.map(|withdrawals| Withdrawals::<T>::from(withdrawals).tree_hash_root());
|
||||
let payload_withdrawals_root = header.withdrawals_root().ok();
|
||||
let payload_withdrawals_root = header.withdrawals_root().ok().copied();
|
||||
|
||||
if payload_value < profit_threshold {
|
||||
if *payload_value < profit_threshold {
|
||||
Err(Box::new(InvalidBuilderPayload::LowValue {
|
||||
profit_threshold,
|
||||
payload_value,
|
||||
payload_value: *payload_value,
|
||||
}))
|
||||
} else if header.parent_hash() != parent_hash {
|
||||
Err(Box::new(InvalidBuilderPayload::ParentHash {
|
||||
@ -1952,7 +2133,7 @@ fn verify_builder_bid<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
||||
} else if !is_signature_valid {
|
||||
Err(Box::new(InvalidBuilderPayload::Signature {
|
||||
signature: bid.data.signature.clone(),
|
||||
pubkey: bid.data.message.pubkey,
|
||||
pubkey: *bid.data.message.pubkey(),
|
||||
}))
|
||||
} else if payload_withdrawals_root != expected_withdrawals_root {
|
||||
Err(Box::new(InvalidBuilderPayload::WithdrawalsRoot {
|
||||
@ -1973,13 +2154,6 @@ async fn timed_future<F: Future<Output = T>, T>(metric: &str, future: F) -> (T,
|
||||
(result, duration)
|
||||
}
|
||||
|
||||
fn noop<T: EthSpec>(
|
||||
_: &ExecutionLayer<T>,
|
||||
_: ExecutionPayloadRef<T>,
|
||||
) -> Option<ExecutionPayload<T>> {
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Returns the duration since the unix epoch.
|
||||
fn timestamp_now() -> u64 {
|
||||
@ -1989,6 +2163,13 @@ fn timestamp_now() -> u64 {
|
||||
.as_secs()
|
||||
}
|
||||
|
||||
fn noop<T: EthSpec>(
|
||||
_: &ExecutionLayer<T>,
|
||||
_: PayloadContentsRefTuple<T>,
|
||||
) -> Option<FullPayloadContents<T>> {
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
@ -2134,4 +2315,42 @@ mod test {
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn percentage_difference_u256_tests() {
|
||||
// ensure function returns `None` when base value is zero
|
||||
assert_eq!(percentage_difference_u256(0.into(), 1.into()), None);
|
||||
// ensure function returns `None` when either value is greater than 120 Million ETH
|
||||
let max_value = Uint256::from(12u8) * Uint256::exp10(25);
|
||||
assert_eq!(
|
||||
percentage_difference_u256(1u8.into(), max_value + Uint256::from(1u8)),
|
||||
None
|
||||
);
|
||||
assert_eq!(
|
||||
percentage_difference_u256(max_value + Uint256::from(1u8), 1u8.into()),
|
||||
None
|
||||
);
|
||||
// it should work up to max value
|
||||
assert_eq!(
|
||||
percentage_difference_u256(max_value, max_value / Uint256::from(2u8)),
|
||||
Some(-50f32)
|
||||
);
|
||||
// should work when base value is greater than comparison value
|
||||
assert_eq!(
|
||||
percentage_difference_u256(4u8.into(), 3u8.into()),
|
||||
Some(-25f32)
|
||||
);
|
||||
// should work when comparison value is greater than base value
|
||||
assert_eq!(
|
||||
percentage_difference_u256(4u8.into(), 5u8.into()),
|
||||
Some(25f32)
|
||||
);
|
||||
// should be accurate to 7 decimal places
|
||||
let result =
|
||||
percentage_difference_u256(Uint256::from(31415926u64), Uint256::from(13371337u64))
|
||||
.expect("should get percentage");
|
||||
// result = -57.4377116
|
||||
assert!(result > -57.43772);
|
||||
assert!(result <= -57.43771);
|
||||
}
|
||||
}
|
||||
|
@ -1,13 +1,14 @@
|
||||
use eth2::types::FullPayloadContents;
|
||||
use lru::LruCache;
|
||||
use parking_lot::Mutex;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{EthSpec, ExecutionPayload, Hash256};
|
||||
use types::{EthSpec, Hash256};
|
||||
|
||||
pub const DEFAULT_PAYLOAD_CACHE_SIZE: usize = 10;
|
||||
|
||||
/// A cache mapping execution payloads by tree hash roots.
|
||||
pub struct PayloadCache<T: EthSpec> {
|
||||
payloads: Mutex<LruCache<PayloadCacheId, ExecutionPayload<T>>>,
|
||||
payloads: Mutex<LruCache<PayloadCacheId, FullPayloadContents<T>>>,
|
||||
}
|
||||
|
||||
#[derive(Hash, PartialEq, Eq)]
|
||||
@ -22,16 +23,16 @@ impl<T: EthSpec> Default for PayloadCache<T> {
|
||||
}
|
||||
|
||||
impl<T: EthSpec> PayloadCache<T> {
|
||||
pub fn put(&self, payload: ExecutionPayload<T>) -> Option<ExecutionPayload<T>> {
|
||||
let root = payload.tree_hash_root();
|
||||
pub fn put(&self, payload: FullPayloadContents<T>) -> Option<FullPayloadContents<T>> {
|
||||
let root = payload.payload_ref().tree_hash_root();
|
||||
self.payloads.lock().put(PayloadCacheId(root), payload)
|
||||
}
|
||||
|
||||
pub fn pop(&self, root: &Hash256) -> Option<ExecutionPayload<T>> {
|
||||
pub fn pop(&self, root: &Hash256) -> Option<FullPayloadContents<T>> {
|
||||
self.payloads.lock().pop(&PayloadCacheId(*root))
|
||||
}
|
||||
|
||||
pub fn get(&self, hash: &Hash256) -> Option<ExecutionPayload<T>> {
|
||||
pub fn get(&self, hash: &Hash256) -> Option<FullPayloadContents<T>> {
|
||||
self.payloads.lock().get(&PayloadCacheId(*hash)).cloned()
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
use crate::engines::ForkchoiceState;
|
||||
use crate::EthersTransaction;
|
||||
use crate::{
|
||||
engine_api::{
|
||||
json_structures::{
|
||||
@ -8,15 +9,24 @@ use crate::{
|
||||
},
|
||||
ExecutionBlockWithTransactions,
|
||||
};
|
||||
use eth2::types::BlobsBundle;
|
||||
use kzg::Kzg;
|
||||
use parking_lot::Mutex;
|
||||
use rand::{rngs::StdRng, Rng, SeedableRng};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use ssz_types::VariableList;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tree_hash::TreeHash;
|
||||
use tree_hash_derive::TreeHash;
|
||||
use types::{
|
||||
EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge,
|
||||
ForkName, Hash256, Uint256,
|
||||
BlobSidecar, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella,
|
||||
ExecutionPayloadDeneb, ExecutionPayloadHeader, ExecutionPayloadMerge, ForkName, Hash256,
|
||||
Transaction, Transactions, Uint256,
|
||||
};
|
||||
|
||||
use super::DEFAULT_TERMINAL_BLOCK;
|
||||
|
||||
const GAS_LIMIT: u64 = 16384;
|
||||
const GAS_USED: u64 = GAS_LIMIT - 1;
|
||||
|
||||
@ -118,6 +128,19 @@ pub struct ExecutionBlockGenerator<T: EthSpec> {
|
||||
* Post-merge fork triggers
|
||||
*/
|
||||
pub shanghai_time: Option<u64>, // withdrawals
|
||||
pub cancun_time: Option<u64>, // deneb
|
||||
/*
|
||||
* deneb stuff
|
||||
*/
|
||||
pub blobs_bundles: HashMap<PayloadId, BlobsBundle<T>>,
|
||||
pub kzg: Option<Arc<Kzg<T::Kzg>>>,
|
||||
rng: Arc<Mutex<StdRng>>,
|
||||
}
|
||||
|
||||
fn make_rng() -> Arc<Mutex<StdRng>> {
|
||||
// Nondeterminism in tests is a highly undesirable thing. Seed the RNG to some arbitrary
|
||||
// but fixed value for reproducibility.
|
||||
Arc::new(Mutex::new(StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64)))
|
||||
}
|
||||
|
||||
impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
||||
@ -126,6 +149,8 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
||||
terminal_block_number: u64,
|
||||
terminal_block_hash: ExecutionBlockHash,
|
||||
shanghai_time: Option<u64>,
|
||||
cancun_time: Option<u64>,
|
||||
kzg: Option<Kzg<T::Kzg>>,
|
||||
) -> Self {
|
||||
let mut gen = Self {
|
||||
head_block: <_>::default(),
|
||||
@ -139,6 +164,10 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
||||
next_payload_id: 0,
|
||||
payload_ids: <_>::default(),
|
||||
shanghai_time,
|
||||
cancun_time,
|
||||
blobs_bundles: <_>::default(),
|
||||
kzg: kzg.map(Arc::new),
|
||||
rng: make_rng(),
|
||||
};
|
||||
|
||||
gen.insert_pow_block(0).unwrap();
|
||||
@ -171,9 +200,12 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
||||
}
|
||||
|
||||
pub fn get_fork_at_timestamp(&self, timestamp: u64) -> ForkName {
|
||||
match self.shanghai_time {
|
||||
Some(fork_time) if timestamp >= fork_time => ForkName::Capella,
|
||||
_ => ForkName::Merge,
|
||||
match self.cancun_time {
|
||||
Some(fork_time) if timestamp >= fork_time => ForkName::Deneb,
|
||||
_ => match self.shanghai_time {
|
||||
Some(fork_time) if timestamp >= fork_time => ForkName::Capella,
|
||||
_ => ForkName::Merge,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -249,10 +281,15 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
||||
finalized_block_hash
|
||||
));
|
||||
}
|
||||
let parent_hash = if block_number == 0 {
|
||||
ExecutionBlockHash::zero()
|
||||
let block = if block_number == 0 {
|
||||
generate_genesis_block(self.terminal_total_difficulty, self.terminal_block_number)?
|
||||
} else if let Some(block) = self.block_by_number(block_number - 1) {
|
||||
block.block_hash()
|
||||
generate_pow_block(
|
||||
self.terminal_total_difficulty,
|
||||
self.terminal_block_number,
|
||||
block_number,
|
||||
block.block_hash(),
|
||||
)?
|
||||
} else {
|
||||
return Err(format!(
|
||||
"parent with block number {} not found",
|
||||
@ -260,13 +297,6 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
||||
));
|
||||
};
|
||||
|
||||
let block = generate_pow_block(
|
||||
self.terminal_total_difficulty,
|
||||
self.terminal_block_number,
|
||||
block_number,
|
||||
parent_hash,
|
||||
)?;
|
||||
|
||||
// Insert block into block tree
|
||||
self.insert_block(Block::PoW(block))?;
|
||||
|
||||
@ -327,10 +357,10 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
||||
Ok(hash)
|
||||
}
|
||||
|
||||
// This does not reject duplicate blocks inserted. This lets us re-use the same execution
|
||||
// block generator for multiple beacon chains which is useful in testing.
|
||||
pub fn insert_block(&mut self, block: Block<T>) -> Result<ExecutionBlockHash, String> {
|
||||
if self.blocks.contains_key(&block.block_hash()) {
|
||||
return Err(format!("{:?} is already known", block.block_hash()));
|
||||
} else if block.parent_hash() != ExecutionBlockHash::zero()
|
||||
if block.parent_hash() != ExecutionBlockHash::zero()
|
||||
&& !self.blocks.contains_key(&block.parent_hash())
|
||||
{
|
||||
return Err(format!("parent block {:?} is unknown", block.parent_hash()));
|
||||
@ -388,6 +418,10 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
||||
self.payload_ids.get(id).cloned()
|
||||
}
|
||||
|
||||
pub fn get_blobs_bundle(&mut self, id: &PayloadId) -> Option<BlobsBundle<T>> {
|
||||
self.blobs_bundles.get(id).cloned()
|
||||
}
|
||||
|
||||
pub fn new_payload(&mut self, payload: ExecutionPayload<T>) -> PayloadStatusV1 {
|
||||
let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash()) {
|
||||
parent
|
||||
@ -424,14 +458,20 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
||||
forkchoice_state: ForkchoiceState,
|
||||
payload_attributes: Option<PayloadAttributes>,
|
||||
) -> Result<JsonForkchoiceUpdatedV1Response, String> {
|
||||
if let Some(payload) = self
|
||||
.pending_payloads
|
||||
.remove(&forkchoice_state.head_block_hash)
|
||||
{
|
||||
// This is meant to cover starting post-merge transition at genesis. Useful for
|
||||
// testing Capella forks and later.
|
||||
let head_block_hash = forkchoice_state.head_block_hash;
|
||||
if let Some(genesis_pow_block) = self.block_by_number(0) {
|
||||
if genesis_pow_block.block_hash() == head_block_hash {
|
||||
self.terminal_block_hash = head_block_hash;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(payload) = self.pending_payloads.remove(&head_block_hash) {
|
||||
self.insert_block(Block::PoS(payload))?;
|
||||
}
|
||||
|
||||
let unknown_head_block_hash = !self.blocks.contains_key(&forkchoice_state.head_block_hash);
|
||||
let unknown_head_block_hash = !self.blocks.contains_key(&head_block_hash);
|
||||
let unknown_safe_block_hash = forkchoice_state.safe_block_hash
|
||||
!= ExecutionBlockHash::zero()
|
||||
&& !self.blocks.contains_key(&forkchoice_state.safe_block_hash);
|
||||
@ -464,75 +504,15 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
||||
|
||||
let parent = self
|
||||
.blocks
|
||||
.get(&forkchoice_state.head_block_hash)
|
||||
.ok_or_else(|| {
|
||||
format!(
|
||||
"unknown parent block {:?}",
|
||||
forkchoice_state.head_block_hash
|
||||
)
|
||||
})?;
|
||||
.get(&head_block_hash)
|
||||
.cloned()
|
||||
.ok_or_else(|| format!("unknown parent block {head_block_hash:?}"))?;
|
||||
|
||||
let id = payload_id_from_u64(self.next_payload_id);
|
||||
self.next_payload_id += 1;
|
||||
|
||||
let mut execution_payload = match &attributes {
|
||||
PayloadAttributes::V1(pa) => ExecutionPayload::Merge(ExecutionPayloadMerge {
|
||||
parent_hash: forkchoice_state.head_block_hash,
|
||||
fee_recipient: pa.suggested_fee_recipient,
|
||||
receipts_root: Hash256::repeat_byte(42),
|
||||
state_root: Hash256::repeat_byte(43),
|
||||
logs_bloom: vec![0; 256].into(),
|
||||
prev_randao: pa.prev_randao,
|
||||
block_number: parent.block_number() + 1,
|
||||
gas_limit: GAS_LIMIT,
|
||||
gas_used: GAS_USED,
|
||||
timestamp: pa.timestamp,
|
||||
extra_data: "block gen was here".as_bytes().to_vec().into(),
|
||||
base_fee_per_gas: Uint256::one(),
|
||||
block_hash: ExecutionBlockHash::zero(),
|
||||
transactions: vec![].into(),
|
||||
}),
|
||||
PayloadAttributes::V2(pa) => match self.get_fork_at_timestamp(pa.timestamp) {
|
||||
ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge {
|
||||
parent_hash: forkchoice_state.head_block_hash,
|
||||
fee_recipient: pa.suggested_fee_recipient,
|
||||
receipts_root: Hash256::repeat_byte(42),
|
||||
state_root: Hash256::repeat_byte(43),
|
||||
logs_bloom: vec![0; 256].into(),
|
||||
prev_randao: pa.prev_randao,
|
||||
block_number: parent.block_number() + 1,
|
||||
gas_limit: GAS_LIMIT,
|
||||
gas_used: GAS_USED,
|
||||
timestamp: pa.timestamp,
|
||||
extra_data: "block gen was here".as_bytes().to_vec().into(),
|
||||
base_fee_per_gas: Uint256::one(),
|
||||
block_hash: ExecutionBlockHash::zero(),
|
||||
transactions: vec![].into(),
|
||||
}),
|
||||
ForkName::Capella => ExecutionPayload::Capella(ExecutionPayloadCapella {
|
||||
parent_hash: forkchoice_state.head_block_hash,
|
||||
fee_recipient: pa.suggested_fee_recipient,
|
||||
receipts_root: Hash256::repeat_byte(42),
|
||||
state_root: Hash256::repeat_byte(43),
|
||||
logs_bloom: vec![0; 256].into(),
|
||||
prev_randao: pa.prev_randao,
|
||||
block_number: parent.block_number() + 1,
|
||||
gas_limit: GAS_LIMIT,
|
||||
gas_used: GAS_USED,
|
||||
timestamp: pa.timestamp,
|
||||
extra_data: "block gen was here".as_bytes().to_vec().into(),
|
||||
base_fee_per_gas: Uint256::one(),
|
||||
block_hash: ExecutionBlockHash::zero(),
|
||||
transactions: vec![].into(),
|
||||
withdrawals: pa.withdrawals.clone().into(),
|
||||
}),
|
||||
_ => unreachable!(),
|
||||
},
|
||||
};
|
||||
|
||||
*execution_payload.block_hash_mut() =
|
||||
ExecutionBlockHash::from_root(execution_payload.tree_hash_root());
|
||||
|
||||
let execution_payload =
|
||||
self.build_new_execution_payload(head_block_hash, &parent, id, &attributes)?;
|
||||
self.payload_ids.insert(id, execution_payload);
|
||||
|
||||
Some(id)
|
||||
@ -559,12 +539,224 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
||||
payload_id: id.map(Into::into),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn build_new_execution_payload(
|
||||
&mut self,
|
||||
head_block_hash: ExecutionBlockHash,
|
||||
parent: &Block<T>,
|
||||
id: PayloadId,
|
||||
attributes: &PayloadAttributes,
|
||||
) -> Result<ExecutionPayload<T>, String> {
|
||||
let mut execution_payload = match attributes {
|
||||
PayloadAttributes::V1(pa) => ExecutionPayload::Merge(ExecutionPayloadMerge {
|
||||
parent_hash: head_block_hash,
|
||||
fee_recipient: pa.suggested_fee_recipient,
|
||||
receipts_root: Hash256::repeat_byte(42),
|
||||
state_root: Hash256::repeat_byte(43),
|
||||
logs_bloom: vec![0; 256].into(),
|
||||
prev_randao: pa.prev_randao,
|
||||
block_number: parent.block_number() + 1,
|
||||
gas_limit: GAS_LIMIT,
|
||||
gas_used: GAS_USED,
|
||||
timestamp: pa.timestamp,
|
||||
extra_data: "block gen was here".as_bytes().to_vec().into(),
|
||||
base_fee_per_gas: Uint256::one(),
|
||||
block_hash: ExecutionBlockHash::zero(),
|
||||
transactions: vec![].into(),
|
||||
}),
|
||||
PayloadAttributes::V2(pa) => match self.get_fork_at_timestamp(pa.timestamp) {
|
||||
ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge {
|
||||
parent_hash: head_block_hash,
|
||||
fee_recipient: pa.suggested_fee_recipient,
|
||||
receipts_root: Hash256::repeat_byte(42),
|
||||
state_root: Hash256::repeat_byte(43),
|
||||
logs_bloom: vec![0; 256].into(),
|
||||
prev_randao: pa.prev_randao,
|
||||
block_number: parent.block_number() + 1,
|
||||
gas_limit: GAS_LIMIT,
|
||||
gas_used: GAS_USED,
|
||||
timestamp: pa.timestamp,
|
||||
extra_data: "block gen was here".as_bytes().to_vec().into(),
|
||||
base_fee_per_gas: Uint256::one(),
|
||||
block_hash: ExecutionBlockHash::zero(),
|
||||
transactions: vec![].into(),
|
||||
}),
|
||||
ForkName::Capella => ExecutionPayload::Capella(ExecutionPayloadCapella {
|
||||
parent_hash: head_block_hash,
|
||||
fee_recipient: pa.suggested_fee_recipient,
|
||||
receipts_root: Hash256::repeat_byte(42),
|
||||
state_root: Hash256::repeat_byte(43),
|
||||
logs_bloom: vec![0; 256].into(),
|
||||
prev_randao: pa.prev_randao,
|
||||
block_number: parent.block_number() + 1,
|
||||
gas_limit: GAS_LIMIT,
|
||||
gas_used: GAS_USED,
|
||||
timestamp: pa.timestamp,
|
||||
extra_data: "block gen was here".as_bytes().to_vec().into(),
|
||||
base_fee_per_gas: Uint256::one(),
|
||||
block_hash: ExecutionBlockHash::zero(),
|
||||
transactions: vec![].into(),
|
||||
withdrawals: pa.withdrawals.clone().into(),
|
||||
}),
|
||||
_ => unreachable!(),
|
||||
},
|
||||
PayloadAttributes::V3(pa) => ExecutionPayload::Deneb(ExecutionPayloadDeneb {
|
||||
parent_hash: head_block_hash,
|
||||
fee_recipient: pa.suggested_fee_recipient,
|
||||
receipts_root: Hash256::repeat_byte(42),
|
||||
state_root: Hash256::repeat_byte(43),
|
||||
logs_bloom: vec![0; 256].into(),
|
||||
prev_randao: pa.prev_randao,
|
||||
block_number: parent.block_number() + 1,
|
||||
gas_limit: GAS_LIMIT,
|
||||
gas_used: GAS_USED,
|
||||
timestamp: pa.timestamp,
|
||||
extra_data: "block gen was here".as_bytes().to_vec().into(),
|
||||
base_fee_per_gas: Uint256::one(),
|
||||
block_hash: ExecutionBlockHash::zero(),
|
||||
transactions: vec![].into(),
|
||||
withdrawals: pa.withdrawals.clone().into(),
|
||||
blob_gas_used: 0,
|
||||
excess_blob_gas: 0,
|
||||
}),
|
||||
};
|
||||
|
||||
match execution_payload.fork_name() {
|
||||
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {}
|
||||
ForkName::Deneb => {
|
||||
// get random number between 0 and Max Blobs
|
||||
let mut rng = self.rng.lock();
|
||||
let num_blobs = rng.gen::<usize>() % (T::max_blobs_per_block() + 1);
|
||||
let kzg = self.kzg.as_ref().ok_or("kzg not initialized")?;
|
||||
let (bundle, transactions) = generate_random_blobs(num_blobs, kzg, &mut *rng)?;
|
||||
for tx in Vec::from(transactions) {
|
||||
execution_payload
|
||||
.transactions_mut()
|
||||
.push(tx)
|
||||
.map_err(|_| "transactions are full".to_string())?;
|
||||
}
|
||||
self.blobs_bundles.insert(id, bundle);
|
||||
}
|
||||
}
|
||||
|
||||
*execution_payload.block_hash_mut() =
|
||||
ExecutionBlockHash::from_root(execution_payload.tree_hash_root());
|
||||
Ok(execution_payload)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_random_blobs<T: EthSpec, R: Rng>(
|
||||
n_blobs: usize,
|
||||
kzg: &Kzg<T::Kzg>,
|
||||
rng: &mut R,
|
||||
) -> Result<(BlobsBundle<T>, Transactions<T>), String> {
|
||||
let mut bundle = BlobsBundle::<T>::default();
|
||||
let mut transactions = vec![];
|
||||
for blob_index in 0..n_blobs {
|
||||
let random_valid_sidecar = BlobSidecar::<T>::random_valid(rng, kzg)?;
|
||||
|
||||
let BlobSidecar {
|
||||
blob,
|
||||
kzg_commitment,
|
||||
kzg_proof,
|
||||
..
|
||||
} = random_valid_sidecar;
|
||||
|
||||
let tx = static_valid_tx::<T>()
|
||||
.map_err(|e| format!("error creating valid tx SSZ bytes: {:?}", e))?;
|
||||
|
||||
transactions.push(tx);
|
||||
bundle
|
||||
.blobs
|
||||
.push(blob)
|
||||
.map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?;
|
||||
bundle
|
||||
.commitments
|
||||
.push(kzg_commitment)
|
||||
.map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?;
|
||||
bundle
|
||||
.proofs
|
||||
.push(kzg_proof)
|
||||
.map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?;
|
||||
}
|
||||
|
||||
Ok((bundle, transactions.into()))
|
||||
}
|
||||
|
||||
fn static_valid_tx<T: EthSpec>() -> Result<Transaction<T::MaxBytesPerTransaction>, String> {
|
||||
// This is a real transaction hex encoded, but we don't care about the contents of the transaction.
|
||||
let transaction: EthersTransaction = serde_json::from_str(
|
||||
r#"{
|
||||
"blockHash":"0x1d59ff54b1eb26b013ce3cb5fc9dab3705b415a67127a003c3e61eb445bb8df2",
|
||||
"blockNumber":"0x5daf3b",
|
||||
"from":"0xa7d9ddbe1f17865597fbd27ec712455208b6b76d",
|
||||
"gas":"0xc350",
|
||||
"gasPrice":"0x4a817c800",
|
||||
"hash":"0x88df016429689c079f3b2f6ad39fa052532c56795b733da78a91ebe6a713944b",
|
||||
"input":"0x68656c6c6f21",
|
||||
"nonce":"0x15",
|
||||
"to":"0xf02c1c8e6114b1dbe8937a39260b5b0a374432bb",
|
||||
"transactionIndex":"0x41",
|
||||
"value":"0xf3dbb76162000",
|
||||
"v":"0x25",
|
||||
"r":"0x1b5e176d927f8e9ab405058b2d2457392da3e20f328b16ddabcebc33eaac5fea",
|
||||
"s":"0x4ba69724e8f69de52f0125ad8b3c5c2cef33019bac3249e2c0a2192766d1721c"
|
||||
}"#,
|
||||
)
|
||||
.unwrap();
|
||||
VariableList::new(transaction.rlp().to_vec())
|
||||
.map_err(|e| format!("Failed to convert transaction to SSZ: {:?}", e))
|
||||
}
|
||||
|
||||
fn payload_id_from_u64(n: u64) -> PayloadId {
|
||||
n.to_le_bytes()
|
||||
}
|
||||
|
||||
pub fn generate_genesis_header<T: EthSpec>(
|
||||
spec: &ChainSpec,
|
||||
post_transition_merge: bool,
|
||||
) -> Option<ExecutionPayloadHeader<T>> {
|
||||
let genesis_fork = spec.fork_name_at_slot::<T>(spec.genesis_slot);
|
||||
let genesis_block_hash =
|
||||
generate_genesis_block(spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK)
|
||||
.ok()
|
||||
.map(|block| block.block_hash);
|
||||
match genesis_fork {
|
||||
ForkName::Base | ForkName::Altair => None,
|
||||
ForkName::Merge => {
|
||||
if post_transition_merge {
|
||||
let mut header = ExecutionPayloadHeader::Merge(<_>::default());
|
||||
*header.block_hash_mut() = genesis_block_hash.unwrap_or_default();
|
||||
Some(header)
|
||||
} else {
|
||||
Some(ExecutionPayloadHeader::<T>::Merge(<_>::default()))
|
||||
}
|
||||
}
|
||||
ForkName::Capella => {
|
||||
let mut header = ExecutionPayloadHeader::Capella(<_>::default());
|
||||
*header.block_hash_mut() = genesis_block_hash.unwrap_or_default();
|
||||
Some(header)
|
||||
}
|
||||
ForkName::Deneb => {
|
||||
let mut header = ExecutionPayloadHeader::Deneb(<_>::default());
|
||||
*header.block_hash_mut() = genesis_block_hash.unwrap_or_default();
|
||||
Some(header)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_genesis_block(
|
||||
terminal_total_difficulty: Uint256,
|
||||
terminal_block_number: u64,
|
||||
) -> Result<PoWBlock, String> {
|
||||
generate_pow_block(
|
||||
terminal_total_difficulty,
|
||||
terminal_block_number,
|
||||
0,
|
||||
ExecutionBlockHash::zero(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn generate_pow_block(
|
||||
terminal_total_difficulty: Uint256,
|
||||
terminal_block_number: u64,
|
||||
@ -618,6 +810,8 @@ mod test {
|
||||
TERMINAL_BLOCK,
|
||||
ExecutionBlockHash::zero(),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
for i in 0..=TERMINAL_BLOCK {
|
||||
|
@ -93,7 +93,7 @@ pub async fn handle_rpc<T: EthSpec>(
|
||||
.unwrap())
|
||||
}
|
||||
}
|
||||
ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 => {
|
||||
ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 | ENGINE_NEW_PAYLOAD_V3 => {
|
||||
let request = match method {
|
||||
ENGINE_NEW_PAYLOAD_V1 => JsonExecutionPayload::V1(
|
||||
get_param::<JsonExecutionPayloadV1<T>>(params, 0)
|
||||
@ -106,7 +106,17 @@ pub async fn handle_rpc<T: EthSpec>(
|
||||
.map(|jep| JsonExecutionPayload::V1(jep))
|
||||
})
|
||||
.map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?,
|
||||
// TODO(4844) add that here..
|
||||
ENGINE_NEW_PAYLOAD_V3 => get_param::<JsonExecutionPayloadV3<T>>(params, 0)
|
||||
.map(|jep| JsonExecutionPayload::V3(jep))
|
||||
.or_else(|_| {
|
||||
get_param::<JsonExecutionPayloadV2<T>>(params, 0)
|
||||
.map(|jep| JsonExecutionPayload::V2(jep))
|
||||
.or_else(|_| {
|
||||
get_param::<JsonExecutionPayloadV1<T>>(params, 0)
|
||||
.map(|jep| JsonExecutionPayload::V1(jep))
|
||||
})
|
||||
})
|
||||
.map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
@ -144,7 +154,32 @@ pub async fn handle_rpc<T: EthSpec>(
|
||||
));
|
||||
}
|
||||
}
|
||||
// TODO(4844) add 4844 error checking here
|
||||
ForkName::Deneb => {
|
||||
if method == ENGINE_NEW_PAYLOAD_V1 || method == ENGINE_NEW_PAYLOAD_V2 {
|
||||
return Err((
|
||||
format!("{} called after deneb fork!", method),
|
||||
GENERIC_ERROR_CODE,
|
||||
));
|
||||
}
|
||||
if matches!(request, JsonExecutionPayload::V1(_)) {
|
||||
return Err((
|
||||
format!(
|
||||
"{} called with `ExecutionPayloadV1` after deneb fork!",
|
||||
method
|
||||
),
|
||||
GENERIC_ERROR_CODE,
|
||||
));
|
||||
}
|
||||
if matches!(request, JsonExecutionPayload::V2(_)) {
|
||||
return Err((
|
||||
format!(
|
||||
"{} called with `ExecutionPayloadV2` after deneb fork!",
|
||||
method
|
||||
),
|
||||
GENERIC_ERROR_CODE,
|
||||
));
|
||||
}
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
@ -180,7 +215,7 @@ pub async fn handle_rpc<T: EthSpec>(
|
||||
|
||||
Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap())
|
||||
}
|
||||
ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 => {
|
||||
ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 | ENGINE_GET_PAYLOAD_V3 => {
|
||||
let request: JsonPayloadIdRequest =
|
||||
get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?;
|
||||
let id = request.into();
|
||||
@ -196,6 +231,8 @@ pub async fn handle_rpc<T: EthSpec>(
|
||||
)
|
||||
})?;
|
||||
|
||||
let maybe_blobs = ctx.execution_block_generator.write().get_blobs_bundle(&id);
|
||||
|
||||
// validate method called correctly according to shanghai fork time
|
||||
if ctx
|
||||
.execution_block_generator
|
||||
@ -209,7 +246,19 @@ pub async fn handle_rpc<T: EthSpec>(
|
||||
FORK_REQUEST_MISMATCH_ERROR_CODE,
|
||||
));
|
||||
}
|
||||
// TODO(4844) add 4844 error checking here
|
||||
// validate method called correctly according to deneb fork time
|
||||
if ctx
|
||||
.execution_block_generator
|
||||
.read()
|
||||
.get_fork_at_timestamp(response.timestamp())
|
||||
== ForkName::Deneb
|
||||
&& (method == ENGINE_GET_PAYLOAD_V1 || method == ENGINE_GET_PAYLOAD_V2)
|
||||
{
|
||||
return Err((
|
||||
format!("{} called after deneb fork!", method),
|
||||
FORK_REQUEST_MISMATCH_ERROR_CODE,
|
||||
));
|
||||
}
|
||||
|
||||
match method {
|
||||
ENGINE_GET_PAYLOAD_V1 => {
|
||||
@ -230,11 +279,31 @@ pub async fn handle_rpc<T: EthSpec>(
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}),
|
||||
ENGINE_GET_PAYLOAD_V3 => Ok(match JsonExecutionPayload::from(response) {
|
||||
JsonExecutionPayload::V3(execution_payload) => {
|
||||
serde_json::to_value(JsonGetPayloadResponseV3 {
|
||||
execution_payload,
|
||||
block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(),
|
||||
blobs_bundle: maybe_blobs
|
||||
.ok_or((
|
||||
"No blobs returned despite V3 Payload".to_string(),
|
||||
GENERIC_ERROR_CODE,
|
||||
))?
|
||||
.into(),
|
||||
should_override_builder: false,
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 => {
|
||||
ENGINE_FORKCHOICE_UPDATED_V1
|
||||
| ENGINE_FORKCHOICE_UPDATED_V2
|
||||
| ENGINE_FORKCHOICE_UPDATED_V3 => {
|
||||
let forkchoice_state: JsonForkchoiceStateV1 =
|
||||
get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?;
|
||||
let payload_attributes = match method {
|
||||
@ -260,7 +329,7 @@ pub async fn handle_rpc<T: EthSpec>(
|
||||
.map(|opt| opt.map(JsonPayloadAttributes::V1))
|
||||
.transpose()
|
||||
}
|
||||
ForkName::Capella => {
|
||||
ForkName::Capella | ForkName::Deneb => {
|
||||
get_param::<Option<JsonPayloadAttributesV2>>(params, 1)
|
||||
.map(|opt| opt.map(JsonPayloadAttributes::V2))
|
||||
.transpose()
|
||||
@ -272,10 +341,15 @@ pub async fn handle_rpc<T: EthSpec>(
|
||||
})
|
||||
.map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?
|
||||
}
|
||||
ENGINE_FORKCHOICE_UPDATED_V3 => {
|
||||
get_param::<Option<JsonPayloadAttributesV3>>(params, 1)
|
||||
.map(|opt| opt.map(JsonPayloadAttributes::V3))
|
||||
.map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
// validate method called correctly according to shanghai fork time
|
||||
// validate method called correctly according to fork time
|
||||
if let Some(pa) = payload_attributes.as_ref() {
|
||||
match ctx
|
||||
.execution_block_generator
|
||||
@ -300,6 +374,15 @@ pub async fn handle_rpc<T: EthSpec>(
|
||||
FORK_REQUEST_MISMATCH_ERROR_CODE,
|
||||
));
|
||||
}
|
||||
if method == ENGINE_FORKCHOICE_UPDATED_V3 {
|
||||
return Err((
|
||||
format!(
|
||||
"{} called with `JsonPayloadAttributesV3` before Deneb fork!",
|
||||
method
|
||||
),
|
||||
GENERIC_ERROR_CODE,
|
||||
));
|
||||
}
|
||||
if matches!(pa, JsonPayloadAttributes::V1(_)) {
|
||||
return Err((
|
||||
format!(
|
||||
@ -310,7 +393,20 @@ pub async fn handle_rpc<T: EthSpec>(
|
||||
));
|
||||
}
|
||||
}
|
||||
// TODO(4844) add 4844 error checking here
|
||||
ForkName::Deneb => {
|
||||
if method == ENGINE_FORKCHOICE_UPDATED_V1 {
|
||||
return Err((
|
||||
format!("{} called after Deneb fork!", method),
|
||||
FORK_REQUEST_MISMATCH_ERROR_CODE,
|
||||
));
|
||||
}
|
||||
if method == ENGINE_FORKCHOICE_UPDATED_V2 {
|
||||
return Err((
|
||||
format!("{} called after Deneb fork!", method),
|
||||
FORK_REQUEST_MISMATCH_ERROR_CODE,
|
||||
));
|
||||
}
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET};
|
||||
use crate::{Config, ExecutionLayer, PayloadAttributes};
|
||||
use eth2::types::{BlockId, StateId, ValidatorId};
|
||||
use eth2::types::{BlobsBundle, BlockId, StateId, ValidatorId};
|
||||
use eth2::{BeaconNodeHttpClient, Timeouts};
|
||||
use fork_choice::ForkchoiceUpdateParameters;
|
||||
use parking_lot::RwLock;
|
||||
@ -14,12 +14,14 @@ use std::time::Duration;
|
||||
use task_executor::TaskExecutor;
|
||||
use tempfile::NamedTempFile;
|
||||
use tree_hash::TreeHash;
|
||||
use types::builder_bid::{BuilderBid, SignedBuilderBid};
|
||||
use types::payload::BlindedPayloadRefMut;
|
||||
use types::builder_bid::{
|
||||
BuilderBid, BuilderBidCapella, BuilderBidDeneb, BuilderBidMerge, SignedBuilderBid,
|
||||
};
|
||||
use types::{
|
||||
Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, ForkName,
|
||||
ForkVersionedResponse, Hash256, PublicKeyBytes, Signature, SignedBlindedBeaconBlock,
|
||||
SignedRoot, SignedValidatorRegistrationData, Slot, Uint256,
|
||||
Address, BeaconState, ChainSpec, EthSpec, ExecPayload, ExecutionPayload,
|
||||
ExecutionPayloadHeaderRefMut, ForkName, ForkVersionedResponse, Hash256, PublicKeyBytes,
|
||||
Signature, SignedBlindedBeaconBlock, SignedRoot, SignedValidatorRegistrationData, Slot,
|
||||
Uint256,
|
||||
};
|
||||
use types::{ExecutionBlockHash, SecretKey};
|
||||
use warp::{Filter, Rejection};
|
||||
@ -69,82 +71,108 @@ pub trait BidStuff<E: EthSpec> {
|
||||
|
||||
fn sign_builder_message(&mut self, sk: &SecretKey, spec: &ChainSpec) -> Signature;
|
||||
|
||||
fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid<E, BlindedPayload<E>>;
|
||||
fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid<E>;
|
||||
}
|
||||
|
||||
impl<E: EthSpec> BidStuff<E> for BuilderBid<E, BlindedPayload<E>> {
|
||||
impl<E: EthSpec> BidStuff<E> for BuilderBid<E> {
|
||||
fn set_fee_recipient(&mut self, fee_recipient: Address) {
|
||||
match self.header.to_mut() {
|
||||
BlindedPayloadRefMut::Merge(payload) => {
|
||||
payload.execution_payload_header.fee_recipient = fee_recipient;
|
||||
match self.to_mut().header_mut() {
|
||||
ExecutionPayloadHeaderRefMut::Merge(header) => {
|
||||
header.fee_recipient = fee_recipient;
|
||||
}
|
||||
BlindedPayloadRefMut::Capella(payload) => {
|
||||
payload.execution_payload_header.fee_recipient = fee_recipient;
|
||||
ExecutionPayloadHeaderRefMut::Capella(header) => {
|
||||
header.fee_recipient = fee_recipient;
|
||||
}
|
||||
ExecutionPayloadHeaderRefMut::Deneb(header) => {
|
||||
header.fee_recipient = fee_recipient;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn set_gas_limit(&mut self, gas_limit: u64) {
|
||||
match self.header.to_mut() {
|
||||
BlindedPayloadRefMut::Merge(payload) => {
|
||||
payload.execution_payload_header.gas_limit = gas_limit;
|
||||
match self.to_mut().header_mut() {
|
||||
ExecutionPayloadHeaderRefMut::Merge(header) => {
|
||||
header.gas_limit = gas_limit;
|
||||
}
|
||||
BlindedPayloadRefMut::Capella(payload) => {
|
||||
payload.execution_payload_header.gas_limit = gas_limit;
|
||||
ExecutionPayloadHeaderRefMut::Capella(header) => {
|
||||
header.gas_limit = gas_limit;
|
||||
}
|
||||
ExecutionPayloadHeaderRefMut::Deneb(header) => {
|
||||
header.gas_limit = gas_limit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn set_value(&mut self, value: Uint256) {
|
||||
self.value = value;
|
||||
*self.value_mut() = value;
|
||||
}
|
||||
|
||||
fn set_parent_hash(&mut self, parent_hash: Hash256) {
|
||||
match self.header.to_mut() {
|
||||
BlindedPayloadRefMut::Merge(payload) => {
|
||||
payload.execution_payload_header.parent_hash =
|
||||
ExecutionBlockHash::from_root(parent_hash);
|
||||
match self.to_mut().header_mut() {
|
||||
ExecutionPayloadHeaderRefMut::Merge(header) => {
|
||||
header.parent_hash = ExecutionBlockHash::from_root(parent_hash);
|
||||
}
|
||||
BlindedPayloadRefMut::Capella(payload) => {
|
||||
payload.execution_payload_header.parent_hash =
|
||||
ExecutionBlockHash::from_root(parent_hash);
|
||||
ExecutionPayloadHeaderRefMut::Capella(header) => {
|
||||
header.parent_hash = ExecutionBlockHash::from_root(parent_hash);
|
||||
}
|
||||
ExecutionPayloadHeaderRefMut::Deneb(header) => {
|
||||
header.parent_hash = ExecutionBlockHash::from_root(parent_hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn set_prev_randao(&mut self, prev_randao: Hash256) {
|
||||
match self.header.to_mut() {
|
||||
BlindedPayloadRefMut::Merge(payload) => {
|
||||
payload.execution_payload_header.prev_randao = prev_randao;
|
||||
match self.to_mut().header_mut() {
|
||||
ExecutionPayloadHeaderRefMut::Merge(header) => {
|
||||
header.prev_randao = prev_randao;
|
||||
}
|
||||
BlindedPayloadRefMut::Capella(payload) => {
|
||||
payload.execution_payload_header.prev_randao = prev_randao;
|
||||
ExecutionPayloadHeaderRefMut::Capella(header) => {
|
||||
header.prev_randao = prev_randao;
|
||||
}
|
||||
ExecutionPayloadHeaderRefMut::Deneb(header) => {
|
||||
header.prev_randao = prev_randao;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn set_block_number(&mut self, block_number: u64) {
|
||||
match self.header.to_mut() {
|
||||
BlindedPayloadRefMut::Merge(payload) => {
|
||||
payload.execution_payload_header.block_number = block_number;
|
||||
match self.to_mut().header_mut() {
|
||||
ExecutionPayloadHeaderRefMut::Merge(header) => {
|
||||
header.block_number = block_number;
|
||||
}
|
||||
BlindedPayloadRefMut::Capella(payload) => {
|
||||
payload.execution_payload_header.block_number = block_number;
|
||||
ExecutionPayloadHeaderRefMut::Capella(header) => {
|
||||
header.block_number = block_number;
|
||||
}
|
||||
ExecutionPayloadHeaderRefMut::Deneb(header) => {
|
||||
header.block_number = block_number;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn set_timestamp(&mut self, timestamp: u64) {
|
||||
match self.header.to_mut() {
|
||||
BlindedPayloadRefMut::Merge(payload) => {
|
||||
payload.execution_payload_header.timestamp = timestamp;
|
||||
match self.to_mut().header_mut() {
|
||||
ExecutionPayloadHeaderRefMut::Merge(header) => {
|
||||
header.timestamp = timestamp;
|
||||
}
|
||||
BlindedPayloadRefMut::Capella(payload) => {
|
||||
payload.execution_payload_header.timestamp = timestamp;
|
||||
ExecutionPayloadHeaderRefMut::Capella(header) => {
|
||||
header.timestamp = timestamp;
|
||||
}
|
||||
ExecutionPayloadHeaderRefMut::Deneb(header) => {
|
||||
header.timestamp = timestamp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn set_withdrawals_root(&mut self, withdrawals_root: Hash256) {
|
||||
match self.header.to_mut() {
|
||||
BlindedPayloadRefMut::Merge(_) => {
|
||||
match self.to_mut().header_mut() {
|
||||
ExecutionPayloadHeaderRefMut::Merge(_) => {
|
||||
panic!("no withdrawals before capella")
|
||||
}
|
||||
BlindedPayloadRefMut::Capella(payload) => {
|
||||
payload.execution_payload_header.withdrawals_root = withdrawals_root;
|
||||
ExecutionPayloadHeaderRefMut::Capella(header) => {
|
||||
header.withdrawals_root = withdrawals_root;
|
||||
}
|
||||
ExecutionPayloadHeaderRefMut::Deneb(header) => {
|
||||
header.withdrawals_root = withdrawals_root;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -155,7 +183,7 @@ impl<E: EthSpec> BidStuff<E> for BuilderBid<E, BlindedPayload<E>> {
|
||||
sk.sign(message)
|
||||
}
|
||||
|
||||
fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid<E, BlindedPayload<E>> {
|
||||
fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid<E> {
|
||||
SignedBuilderBid {
|
||||
message: self,
|
||||
signature,
|
||||
@ -297,6 +325,9 @@ pub fn serve<E: EthSpec>(
|
||||
SignedBlindedBeaconBlock::Capella(block) => {
|
||||
block.message.body.execution_payload.tree_hash_root()
|
||||
}
|
||||
SignedBlindedBeaconBlock::Deneb(block) => {
|
||||
block.message.body.execution_payload.tree_hash_root()
|
||||
}
|
||||
};
|
||||
|
||||
let fork_name = builder.spec.fork_name_at_slot::<E>(slot);
|
||||
@ -429,15 +460,37 @@ pub fn serve<E: EthSpec>(
|
||||
let prev_randao = head_state
|
||||
.get_randao_mix(head_state.current_epoch())
|
||||
.map_err(|_| reject("couldn't get prev randao"))?;
|
||||
let expected_withdrawals = match fork {
|
||||
ForkName::Base | ForkName::Altair | ForkName::Merge => None,
|
||||
ForkName::Capella | ForkName::Deneb => Some(
|
||||
builder
|
||||
.beacon_client
|
||||
.get_expected_withdrawals(&StateId::Head)
|
||||
.await
|
||||
.unwrap()
|
||||
.data,
|
||||
),
|
||||
};
|
||||
|
||||
let payload_attributes = match fork {
|
||||
ForkName::Merge => {
|
||||
PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None)
|
||||
}
|
||||
// the withdrawals root is filled in by operations
|
||||
ForkName::Capella => {
|
||||
PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![]))
|
||||
}
|
||||
// the withdrawals root is filled in by operations, but we supply the valid withdrawals
|
||||
// first to avoid polluting the execution block generator with invalid payload attributes
|
||||
// NOTE: this was part of an effort to add payload attribute uniqueness checks,
|
||||
// which was abandoned because it broke too many tests in subtle ways.
|
||||
ForkName::Merge | ForkName::Capella => PayloadAttributes::new(
|
||||
timestamp,
|
||||
*prev_randao,
|
||||
fee_recipient,
|
||||
expected_withdrawals,
|
||||
None,
|
||||
),
|
||||
ForkName::Deneb => PayloadAttributes::new(
|
||||
timestamp,
|
||||
*prev_randao,
|
||||
fee_recipient,
|
||||
expected_withdrawals,
|
||||
Some(head_block_root),
|
||||
),
|
||||
ForkName::Base | ForkName::Altair => {
|
||||
return Err(reject("invalid fork"));
|
||||
}
|
||||
@ -455,9 +508,13 @@ pub fn serve<E: EthSpec>(
|
||||
finalized_hash: Some(finalized_execution_hash),
|
||||
};
|
||||
|
||||
let payload = builder
|
||||
let (payload, _block_value, maybe_blobs_bundle): (
|
||||
ExecutionPayload<E>,
|
||||
Uint256,
|
||||
Option<BlobsBundle<E>>,
|
||||
) = builder
|
||||
.el
|
||||
.get_full_payload_caching::<BlindedPayload<E>>(
|
||||
.get_full_payload_caching(
|
||||
head_execution_hash,
|
||||
&payload_attributes,
|
||||
forkchoice_update_params,
|
||||
@ -465,15 +522,39 @@ pub fn serve<E: EthSpec>(
|
||||
)
|
||||
.await
|
||||
.map_err(|_| reject("couldn't get payload"))?
|
||||
.to_payload()
|
||||
.to_execution_payload_header();
|
||||
.into();
|
||||
|
||||
let mut message = BuilderBid {
|
||||
header: BlindedPayload::from(payload),
|
||||
value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI),
|
||||
pubkey: builder.builder_sk.public_key().compress(),
|
||||
_phantom_data: std::marker::PhantomData,
|
||||
let mut message = match fork {
|
||||
ForkName::Deneb => BuilderBid::Deneb(BuilderBidDeneb {
|
||||
header: payload
|
||||
.as_deneb()
|
||||
.map_err(|_| reject("incorrect payload variant"))?
|
||||
.into(),
|
||||
blinded_blobs_bundle: maybe_blobs_bundle
|
||||
.map(Into::into)
|
||||
.unwrap_or_default(),
|
||||
value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI),
|
||||
pubkey: builder.builder_sk.public_key().compress(),
|
||||
}),
|
||||
ForkName::Capella => BuilderBid::Capella(BuilderBidCapella {
|
||||
header: payload
|
||||
.as_capella()
|
||||
.map_err(|_| reject("incorrect payload variant"))?
|
||||
.into(),
|
||||
value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI),
|
||||
pubkey: builder.builder_sk.public_key().compress(),
|
||||
}),
|
||||
ForkName::Merge => BuilderBid::Merge(BuilderBidMerge {
|
||||
header: payload
|
||||
.as_merge()
|
||||
.map_err(|_| reject("incorrect payload variant"))?
|
||||
.into(),
|
||||
value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI),
|
||||
pubkey: builder.builder_sk.public_key().compress(),
|
||||
}),
|
||||
ForkName::Base | ForkName::Altair => return Err(reject("invalid fork")),
|
||||
};
|
||||
|
||||
message.set_gas_limit(cached_data.gas_limit);
|
||||
|
||||
builder.apply_operations(&mut message);
|
||||
|
@ -5,6 +5,7 @@ use crate::{
|
||||
},
|
||||
Config, *,
|
||||
};
|
||||
use kzg::Kzg;
|
||||
use sensitive_url::SensitiveUrl;
|
||||
use task_executor::TaskExecutor;
|
||||
use tempfile::NamedTempFile;
|
||||
@ -29,8 +30,10 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
||||
DEFAULT_TERMINAL_BLOCK,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
||||
spec,
|
||||
None,
|
||||
)
|
||||
}
|
||||
|
||||
@ -39,9 +42,11 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
||||
executor: TaskExecutor,
|
||||
terminal_block: u64,
|
||||
shanghai_time: Option<u64>,
|
||||
cancun_time: Option<u64>,
|
||||
builder_threshold: Option<u128>,
|
||||
jwt_key: Option<JwtKey>,
|
||||
spec: ChainSpec,
|
||||
kzg: Option<Kzg<T::Kzg>>,
|
||||
) -> Self {
|
||||
let handle = executor.handle().unwrap();
|
||||
|
||||
@ -53,6 +58,8 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
||||
terminal_block,
|
||||
spec.terminal_block_hash,
|
||||
shanghai_time,
|
||||
cancun_time,
|
||||
kzg,
|
||||
);
|
||||
|
||||
let url = SensitiveUrl::parse(&server.url()).unwrap();
|
||||
@ -96,13 +103,8 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
||||
justified_hash: None,
|
||||
finalized_hash: None,
|
||||
};
|
||||
let payload_attributes = PayloadAttributes::new(
|
||||
timestamp,
|
||||
prev_randao,
|
||||
Address::repeat_byte(42),
|
||||
// FIXME: think about how to handle different forks / withdrawals here..
|
||||
None,
|
||||
);
|
||||
let payload_attributes =
|
||||
PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None, None);
|
||||
|
||||
// Insert a proposer to ensure the fork choice updated command works.
|
||||
let slot = Slot::new(0);
|
||||
@ -130,7 +132,7 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
||||
};
|
||||
let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await;
|
||||
let payload_attributes =
|
||||
PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None);
|
||||
PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None);
|
||||
let payload: ExecutionPayload<T> = self
|
||||
.el
|
||||
.get_payload::<FullPayload<T>>(
|
||||
@ -138,7 +140,6 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
||||
&payload_attributes,
|
||||
forkchoice_update_params,
|
||||
builder_params,
|
||||
// FIXME: do we need to consider other forks somehow? What about withdrawals?
|
||||
ForkName::Merge,
|
||||
&self.spec,
|
||||
)
|
||||
@ -165,7 +166,7 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
||||
};
|
||||
let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await;
|
||||
let payload_attributes =
|
||||
PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None);
|
||||
PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None);
|
||||
let payload_header = self
|
||||
.el
|
||||
.get_payload::<BlindedPayload<T>>(
|
||||
@ -173,7 +174,6 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
||||
&payload_attributes,
|
||||
forkchoice_update_params,
|
||||
builder_params,
|
||||
// FIXME: do we need to consider other forks somehow? What about withdrawals?
|
||||
ForkName::Merge,
|
||||
&self.spec,
|
||||
)
|
||||
@ -191,10 +191,15 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
||||
assert_eq!(
|
||||
self.el
|
||||
.get_payload_by_root(&payload_header.tree_hash_root()),
|
||||
Some(payload.clone())
|
||||
Some(FullPayloadContents::Payload(payload.clone()))
|
||||
);
|
||||
|
||||
let status = self.el.notify_new_payload(&payload).await.unwrap();
|
||||
// TODO: again consider forks
|
||||
let status = self
|
||||
.el
|
||||
.notify_new_payload(payload.try_into().unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(status, PayloadStatus::Valid);
|
||||
|
||||
// Use junk values for slot/head-root to ensure there is no payload supplied.
|
||||
|
@ -8,6 +8,7 @@ use bytes::Bytes;
|
||||
use environment::null_logger;
|
||||
use execution_block_generator::PoWBlock;
|
||||
use handle_rpc::handle_rpc;
|
||||
use kzg::Kzg;
|
||||
use parking_lot::{Mutex, RwLock, RwLockWriteGuard};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
@ -23,7 +24,10 @@ use types::{EthSpec, ExecutionBlockHash, Uint256};
|
||||
use warp::{http::StatusCode, Filter, Rejection};
|
||||
|
||||
use crate::EngineCapabilities;
|
||||
pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator};
|
||||
pub use execution_block_generator::{
|
||||
generate_genesis_block, generate_genesis_header, generate_pow_block, generate_random_blobs,
|
||||
Block, ExecutionBlockGenerator,
|
||||
};
|
||||
pub use hook::Hook;
|
||||
pub use mock_builder::{MockBuilder, Operation};
|
||||
pub use mock_execution_layer::MockExecutionLayer;
|
||||
@ -37,12 +41,15 @@ pub const DEFAULT_BUILDER_PAYLOAD_VALUE_WEI: u128 = 20_000_000_000_000_000;
|
||||
pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities {
|
||||
new_payload_v1: true,
|
||||
new_payload_v2: true,
|
||||
new_payload_v3: true,
|
||||
forkchoice_updated_v1: true,
|
||||
forkchoice_updated_v2: true,
|
||||
forkchoice_updated_v3: true,
|
||||
get_payload_bodies_by_hash_v1: true,
|
||||
get_payload_bodies_by_range_v1: true,
|
||||
get_payload_v1: true,
|
||||
get_payload_v2: true,
|
||||
get_payload_v3: true,
|
||||
};
|
||||
|
||||
mod execution_block_generator;
|
||||
@ -59,6 +66,7 @@ pub struct MockExecutionConfig {
|
||||
pub terminal_block: u64,
|
||||
pub terminal_block_hash: ExecutionBlockHash,
|
||||
pub shanghai_time: Option<u64>,
|
||||
pub cancun_time: Option<u64>,
|
||||
}
|
||||
|
||||
impl Default for MockExecutionConfig {
|
||||
@ -70,6 +78,7 @@ impl Default for MockExecutionConfig {
|
||||
terminal_block_hash: ExecutionBlockHash::zero(),
|
||||
server_config: Config::default(),
|
||||
shanghai_time: None,
|
||||
cancun_time: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -90,10 +99,16 @@ impl<T: EthSpec> MockServer<T> {
|
||||
DEFAULT_TERMINAL_BLOCK,
|
||||
ExecutionBlockHash::zero(),
|
||||
None, // FIXME(capella): should this be the default?
|
||||
None, // FIXME(deneb): should this be the default?
|
||||
None, // FIXME(deneb): should this be the default?
|
||||
)
|
||||
}
|
||||
|
||||
pub fn new_with_config(handle: &runtime::Handle, config: MockExecutionConfig) -> Self {
|
||||
pub fn new_with_config(
|
||||
handle: &runtime::Handle,
|
||||
config: MockExecutionConfig,
|
||||
kzg: Option<Kzg<T::Kzg>>,
|
||||
) -> Self {
|
||||
let MockExecutionConfig {
|
||||
jwt_key,
|
||||
terminal_difficulty,
|
||||
@ -101,6 +116,7 @@ impl<T: EthSpec> MockServer<T> {
|
||||
terminal_block_hash,
|
||||
server_config,
|
||||
shanghai_time,
|
||||
cancun_time,
|
||||
} = config;
|
||||
let last_echo_request = Arc::new(RwLock::new(None));
|
||||
let preloaded_responses = Arc::new(Mutex::new(vec![]));
|
||||
@ -109,6 +125,8 @@ impl<T: EthSpec> MockServer<T> {
|
||||
terminal_block,
|
||||
terminal_block_hash,
|
||||
shanghai_time,
|
||||
cancun_time,
|
||||
kzg,
|
||||
);
|
||||
|
||||
let ctx: Arc<Context<T>> = Arc::new(Context {
|
||||
@ -161,6 +179,7 @@ impl<T: EthSpec> MockServer<T> {
|
||||
*self.ctx.engine_capabilities.write() = engine_capabilities;
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
handle: &runtime::Handle,
|
||||
jwt_key: JwtKey,
|
||||
@ -168,6 +187,8 @@ impl<T: EthSpec> MockServer<T> {
|
||||
terminal_block: u64,
|
||||
terminal_block_hash: ExecutionBlockHash,
|
||||
shanghai_time: Option<u64>,
|
||||
cancun_time: Option<u64>,
|
||||
kzg: Option<Kzg<T::Kzg>>,
|
||||
) -> Self {
|
||||
Self::new_with_config(
|
||||
handle,
|
||||
@ -178,7 +199,9 @@ impl<T: EthSpec> MockServer<T> {
|
||||
terminal_block,
|
||||
terminal_block_hash,
|
||||
shanghai_time,
|
||||
cancun_time,
|
||||
},
|
||||
kzg,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -1,10 +1,11 @@
|
||||
use crate::{state_id::checkpoint_slot_and_execution_optimistic, ExecutionOptimistic};
|
||||
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped};
|
||||
use eth2::types::BlobIndicesQuery;
|
||||
use eth2::types::BlockId as CoreBlockId;
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use types::{EthSpec, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot};
|
||||
use types::{BlobSidecarList, EthSpec, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot};
|
||||
|
||||
/// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given
|
||||
/// `BlockId`.
|
||||
@ -250,6 +251,37 @@ impl BlockId {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the `BlobSidecarList` identified by `self`.
|
||||
pub fn blob_sidecar_list<T: BeaconChainTypes>(
|
||||
&self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<BlobSidecarList<T::EthSpec>, warp::Rejection> {
|
||||
let root = self.root(chain)?.0;
|
||||
chain
|
||||
.get_blobs(&root)
|
||||
.map_err(warp_utils::reject::beacon_chain_error)
|
||||
}
|
||||
|
||||
pub fn blob_sidecar_list_filtered<T: BeaconChainTypes>(
|
||||
&self,
|
||||
indices: BlobIndicesQuery,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<BlobSidecarList<T::EthSpec>, warp::Rejection> {
|
||||
let blob_sidecar_list = self.blob_sidecar_list(chain)?;
|
||||
let blob_sidecar_list_filtered = match indices.indices {
|
||||
Some(vec) => {
|
||||
let list = blob_sidecar_list
|
||||
.into_iter()
|
||||
.filter(|blob_sidecar| vec.contains(&blob_sidecar.index))
|
||||
.collect();
|
||||
BlobSidecarList::new(list)
|
||||
.map_err(|e| warp_utils::reject::custom_server_error(format!("{:?}", e)))?
|
||||
}
|
||||
None => blob_sidecar_list,
|
||||
};
|
||||
Ok(blob_sidecar_list_filtered)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for BlockId {
|
||||
|
62
beacon_node/http_api/src/build_block_contents.rs
Normal file
62
beacon_node/http_api/src/build_block_contents.rs
Normal file
@ -0,0 +1,62 @@
|
||||
use beacon_chain::BlockProductionError;
|
||||
use eth2::types::{BeaconBlockAndBlobSidecars, BlindedBeaconBlockAndBlobSidecars, BlockContents};
|
||||
use types::{
|
||||
BeaconBlock, BlindedBlobSidecarList, BlindedPayload, BlobSidecarList, EthSpec, ForkName,
|
||||
FullPayload,
|
||||
};
|
||||
|
||||
type Error = warp::reject::Rejection;
|
||||
type FullBlockContents<E> = BlockContents<E, FullPayload<E>>;
|
||||
type BlindedBlockContents<E> = BlockContents<E, BlindedPayload<E>>;
|
||||
|
||||
pub fn build_block_contents<E: EthSpec>(
|
||||
fork_name: ForkName,
|
||||
block: BeaconBlock<E, FullPayload<E>>,
|
||||
maybe_blobs: Option<BlobSidecarList<E>>,
|
||||
) -> Result<FullBlockContents<E>, Error> {
|
||||
match fork_name {
|
||||
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
|
||||
Ok(BlockContents::Block(block))
|
||||
}
|
||||
ForkName::Deneb => {
|
||||
if let Some(blob_sidecars) = maybe_blobs {
|
||||
let block_and_blobs = BeaconBlockAndBlobSidecars {
|
||||
block,
|
||||
blob_sidecars,
|
||||
};
|
||||
|
||||
Ok(BlockContents::BlockAndBlobSidecars(block_and_blobs))
|
||||
} else {
|
||||
Err(warp_utils::reject::block_production_error(
|
||||
BlockProductionError::MissingBlobs,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_blinded_block_contents<E: EthSpec>(
|
||||
fork_name: ForkName,
|
||||
block: BeaconBlock<E, BlindedPayload<E>>,
|
||||
maybe_blobs: Option<BlindedBlobSidecarList<E>>,
|
||||
) -> Result<BlindedBlockContents<E>, Error> {
|
||||
match fork_name {
|
||||
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
|
||||
Ok(BlockContents::Block(block))
|
||||
}
|
||||
ForkName::Deneb => {
|
||||
if let Some(blinded_blob_sidecars) = maybe_blobs {
|
||||
let block_and_blobs = BlindedBeaconBlockAndBlobSidecars {
|
||||
blinded_block: block,
|
||||
blinded_blob_sidecars,
|
||||
};
|
||||
|
||||
Ok(BlockContents::BlindedBlockAndBlobSidecars(block_and_blobs))
|
||||
} else {
|
||||
Err(warp_utils::reject::block_production_error(
|
||||
BlockProductionError::MissingBlobs,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,8 +1,7 @@
|
||||
use beacon_chain::store::{metadata::CURRENT_SCHEMA_VERSION, AnchorInfo};
|
||||
use beacon_chain::store::metadata::CURRENT_SCHEMA_VERSION;
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use eth2::lighthouse::DatabaseInfo;
|
||||
use std::sync::Arc;
|
||||
use types::SignedBlindedBeaconBlock;
|
||||
|
||||
pub fn info<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
@ -11,25 +10,13 @@ pub fn info<T: BeaconChainTypes>(
|
||||
let split = store.get_split_info();
|
||||
let config = store.get_config().clone();
|
||||
let anchor = store.get_anchor_info();
|
||||
let blob_info = store.get_blob_info();
|
||||
|
||||
Ok(DatabaseInfo {
|
||||
schema_version: CURRENT_SCHEMA_VERSION.as_u64(),
|
||||
config,
|
||||
split,
|
||||
anchor,
|
||||
blob_info,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn historical_blocks<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
blocks: Vec<Arc<SignedBlindedBeaconBlock<T::EthSpec>>>,
|
||||
) -> Result<AnchorInfo, warp::Rejection> {
|
||||
chain
|
||||
.import_historical_block_batch(blocks)
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||
|
||||
let anchor = chain.store.get_anchor_info().ok_or_else(|| {
|
||||
warp_utils::reject::custom_bad_request("node is not checkpoint synced".to_string())
|
||||
})?;
|
||||
Ok(anchor)
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ mod attester_duties;
|
||||
mod block_id;
|
||||
mod block_packing_efficiency;
|
||||
mod block_rewards;
|
||||
mod build_block_contents;
|
||||
mod builder_states;
|
||||
mod database;
|
||||
mod metrics;
|
||||
@ -38,7 +39,8 @@ use bytes::Bytes;
|
||||
use directory::DEFAULT_ROOT_DIR;
|
||||
use eth2::types::{
|
||||
self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode,
|
||||
SkipRandaoVerification, ValidatorId, ValidatorStatus,
|
||||
SignedBlindedBlockContents, SignedBlockContents, SkipRandaoVerification, ValidatorId,
|
||||
ValidatorStatus,
|
||||
};
|
||||
use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
|
||||
use lighthouse_version::version_with_platform;
|
||||
@ -75,9 +77,8 @@ use types::{
|
||||
Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError,
|
||||
BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload,
|
||||
ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof,
|
||||
SignedBeaconBlock, SignedBlindedBeaconBlock, SignedBlsToExecutionChange,
|
||||
SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot,
|
||||
SyncCommitteeMessage, SyncContributionData,
|
||||
SignedBlsToExecutionChange, SignedContributionAndProof, SignedValidatorRegistrationData,
|
||||
SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData,
|
||||
};
|
||||
use validator::pubkey_to_validator_index;
|
||||
use version::{
|
||||
@ -1289,7 +1290,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(network_tx_filter.clone())
|
||||
.and(log_filter.clone())
|
||||
.then(
|
||||
move |block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
move |block_contents: SignedBlockContents<T::EthSpec>,
|
||||
task_spawner: TaskSpawner<T::EthSpec>,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
@ -1297,7 +1298,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
|
||||
publish_blocks::publish_block(
|
||||
None,
|
||||
ProvenancedBlock::local(block),
|
||||
ProvenancedBlock::local(block_contents),
|
||||
chain,
|
||||
&network_tx,
|
||||
log,
|
||||
@ -1325,16 +1326,16 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
log: Logger| {
|
||||
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
|
||||
let block =
|
||||
SignedBeaconBlock::<T::EthSpec>::from_ssz_bytes(&block_bytes, &chain.spec)
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_bad_request(format!(
|
||||
"invalid SSZ: {e:?}"
|
||||
))
|
||||
})?;
|
||||
let block_contents = SignedBlockContents::<T::EthSpec>::from_ssz_bytes(
|
||||
&block_bytes,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}"))
|
||||
})?;
|
||||
publish_blocks::publish_block(
|
||||
None,
|
||||
ProvenancedBlock::local(Arc::new(block)),
|
||||
ProvenancedBlock::local(block_contents),
|
||||
chain,
|
||||
&network_tx,
|
||||
log,
|
||||
@ -1358,7 +1359,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(log_filter.clone())
|
||||
.then(
|
||||
move |validation_level: api_types::BroadcastValidationQuery,
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
block_contents: SignedBlockContents<T::EthSpec>,
|
||||
task_spawner: TaskSpawner<T::EthSpec>,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
@ -1366,7 +1367,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
|
||||
publish_blocks::publish_block(
|
||||
None,
|
||||
ProvenancedBlock::local(block),
|
||||
ProvenancedBlock::local(block_contents),
|
||||
chain,
|
||||
&network_tx,
|
||||
log,
|
||||
@ -1396,16 +1397,16 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
log: Logger| {
|
||||
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
|
||||
let block =
|
||||
SignedBeaconBlock::<T::EthSpec>::from_ssz_bytes(&block_bytes, &chain.spec)
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_bad_request(format!(
|
||||
"invalid SSZ: {e:?}"
|
||||
))
|
||||
})?;
|
||||
let block_contents = SignedBlockContents::<T::EthSpec>::from_ssz_bytes(
|
||||
&block_bytes,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}"))
|
||||
})?;
|
||||
publish_blocks::publish_block(
|
||||
None,
|
||||
ProvenancedBlock::local(Arc::new(block)),
|
||||
ProvenancedBlock::local(block_contents),
|
||||
chain,
|
||||
&network_tx,
|
||||
log,
|
||||
@ -1432,14 +1433,14 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(network_tx_filter.clone())
|
||||
.and(log_filter.clone())
|
||||
.then(
|
||||
move |block: SignedBlindedBeaconBlock<T::EthSpec>,
|
||||
move |block_contents: SignedBlindedBlockContents<T::EthSpec>,
|
||||
task_spawner: TaskSpawner<T::EthSpec>,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
log: Logger| {
|
||||
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
|
||||
publish_blocks::publish_blinded_block(
|
||||
block,
|
||||
block_contents,
|
||||
chain,
|
||||
&network_tx,
|
||||
log,
|
||||
@ -1468,13 +1469,14 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
log: Logger| {
|
||||
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
|
||||
let block = SignedBlindedBeaconBlock::<T::EthSpec>::from_ssz_bytes(
|
||||
&block_bytes,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}"))
|
||||
})?;
|
||||
let block =
|
||||
SignedBlockContents::<T::EthSpec, BlindedPayload<_>>::from_ssz_bytes(
|
||||
&block_bytes,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}"))
|
||||
})?;
|
||||
publish_blocks::publish_blinded_block(
|
||||
block,
|
||||
chain,
|
||||
@ -1500,14 +1502,14 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(log_filter.clone())
|
||||
.then(
|
||||
move |validation_level: api_types::BroadcastValidationQuery,
|
||||
block: SignedBlindedBeaconBlock<T::EthSpec>,
|
||||
block_contents: SignedBlindedBlockContents<T::EthSpec>,
|
||||
task_spawner: TaskSpawner<T::EthSpec>,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
log: Logger| {
|
||||
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
|
||||
publish_blocks::publish_blinded_block(
|
||||
block,
|
||||
block_contents,
|
||||
chain,
|
||||
&network_tx,
|
||||
log,
|
||||
@ -1537,13 +1539,14 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
log: Logger| {
|
||||
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
|
||||
let block = SignedBlindedBeaconBlock::<T::EthSpec>::from_ssz_bytes(
|
||||
&block_bytes,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}"))
|
||||
})?;
|
||||
let block =
|
||||
SignedBlockContents::<T::EthSpec, BlindedPayload<_>>::from_ssz_bytes(
|
||||
&block_bytes,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}"))
|
||||
})?;
|
||||
publish_blocks::publish_blinded_block(
|
||||
block,
|
||||
chain,
|
||||
@ -1715,6 +1718,49 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
},
|
||||
);
|
||||
|
||||
/*
|
||||
* beacon/blob_sidecars
|
||||
*/
|
||||
|
||||
// GET beacon/blob_sidecars/{block_id}
|
||||
let get_blobs = eth_v1
|
||||
.and(warp::path("beacon"))
|
||||
.and(warp::path("blob_sidecars"))
|
||||
.and(block_id_or_err)
|
||||
.and(warp::query::<api_types::BlobIndicesQuery>())
|
||||
.and(warp::path::end())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.and(warp::header::optional::<api_types::Accept>("accept"))
|
||||
.then(
|
||||
|block_id: BlockId,
|
||||
indices: api_types::BlobIndicesQuery,
|
||||
task_spawner: TaskSpawner<T::EthSpec>,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
accept_header: Option<api_types::Accept>| {
|
||||
task_spawner.blocking_response_task(Priority::P1, move || {
|
||||
let blob_sidecar_list_filtered =
|
||||
block_id.blob_sidecar_list_filtered(indices, &chain)?;
|
||||
match accept_header {
|
||||
Some(api_types::Accept::Ssz) => Response::builder()
|
||||
.status(200)
|
||||
.header("Content-Type", "application/octet-stream")
|
||||
.body(blob_sidecar_list_filtered.as_ssz_bytes().into())
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"failed to create response: {}",
|
||||
e
|
||||
))
|
||||
}),
|
||||
_ => Ok(warp::reply::json(&api_types::GenericResponse::from(
|
||||
blob_sidecar_list_filtered,
|
||||
))
|
||||
.into_response()),
|
||||
}
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
/*
|
||||
* beacon/pool
|
||||
*/
|
||||
@ -3038,16 +3084,16 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
if query.skip_randao_verification == SkipRandaoVerification::Yes {
|
||||
if !randao_reveal.is_infinity() {
|
||||
return Err(warp_utils::reject::custom_bad_request(
|
||||
"randao_reveal must be point-at-infinity if verification is skipped"
|
||||
.into(),
|
||||
));
|
||||
"randao_reveal must be point-at-infinity if verification is skipped"
|
||||
.into(),
|
||||
));
|
||||
}
|
||||
ProduceBlockVerification::NoVerification
|
||||
} else {
|
||||
ProduceBlockVerification::VerifyRandao
|
||||
};
|
||||
|
||||
let (block, _) = chain
|
||||
let (block, _, maybe_blobs) = chain
|
||||
.produce_block_with_verification::<FullPayload<T::EthSpec>>(
|
||||
randao_reveal,
|
||||
slot,
|
||||
@ -3061,11 +3107,14 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.fork_name(&chain.spec)
|
||||
.map_err(inconsistent_fork_rejection)?;
|
||||
|
||||
let block_contents =
|
||||
build_block_contents::build_block_contents(fork_name, block, maybe_blobs)?;
|
||||
|
||||
match accept_header {
|
||||
Some(api_types::Accept::Ssz) => Response::builder()
|
||||
.status(200)
|
||||
.header("Content-Type", "application/octet-stream")
|
||||
.body(block.as_ssz_bytes().into())
|
||||
.body(block_contents.as_ssz_bytes().into())
|
||||
.map(|res: Response<Bytes>| {
|
||||
add_consensus_version_header(res, fork_name)
|
||||
})
|
||||
@ -3075,7 +3124,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
e
|
||||
))
|
||||
}),
|
||||
_ => fork_versioned_response(endpoint_version, fork_name, block)
|
||||
_ => fork_versioned_response(endpoint_version, fork_name, block_contents)
|
||||
.map(|response| warp::reply::json(&response).into_response())
|
||||
.map(|res| add_consensus_version_header(res, fork_name)),
|
||||
}
|
||||
@ -3125,7 +3174,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
ProduceBlockVerification::VerifyRandao
|
||||
};
|
||||
|
||||
let (block, _) = chain
|
||||
let (block, _, maybe_blobs) = chain
|
||||
.produce_block_with_verification::<BlindedPayload<T::EthSpec>>(
|
||||
randao_reveal,
|
||||
slot,
|
||||
@ -3139,11 +3188,17 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.fork_name(&chain.spec)
|
||||
.map_err(inconsistent_fork_rejection)?;
|
||||
|
||||
let block_contents = build_block_contents::build_blinded_block_contents(
|
||||
fork_name,
|
||||
block,
|
||||
maybe_blobs,
|
||||
)?;
|
||||
|
||||
match accept_header {
|
||||
Some(api_types::Accept::Ssz) => Response::builder()
|
||||
.status(200)
|
||||
.header("Content-Type", "application/octet-stream")
|
||||
.body(block.as_ssz_bytes().into())
|
||||
.body(block_contents.as_ssz_bytes().into())
|
||||
.map(|res: Response<Bytes>| {
|
||||
add_consensus_version_header(res, fork_name)
|
||||
})
|
||||
@ -3154,7 +3209,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
))
|
||||
}),
|
||||
// Pose as a V2 endpoint so we return the fork `version`.
|
||||
_ => fork_versioned_response(V2, fork_name, block)
|
||||
_ => fork_versioned_response(V2, fork_name, block_contents)
|
||||
.map(|response| warp::reply::json(&response).into_response())
|
||||
.map(|res| add_consensus_version_header(res, fork_name)),
|
||||
}
|
||||
@ -4268,31 +4323,6 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
},
|
||||
);
|
||||
|
||||
// POST lighthouse/database/historical_blocks
|
||||
let post_lighthouse_database_historical_blocks = database_path
|
||||
.and(warp::path("historical_blocks"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.and(log_filter.clone())
|
||||
.then(
|
||||
|blocks: Vec<Arc<SignedBlindedBeaconBlock<T::EthSpec>>>,
|
||||
task_spawner: TaskSpawner<T::EthSpec>,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
log: Logger| {
|
||||
info!(
|
||||
log,
|
||||
"Importing historical blocks";
|
||||
"count" => blocks.len(),
|
||||
"source" => "http_api"
|
||||
);
|
||||
task_spawner.blocking_json_task(Priority::P1, move || {
|
||||
database::historical_blocks(chain, blocks)
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// GET lighthouse/analysis/block_rewards
|
||||
let get_lighthouse_block_rewards = warp::path("lighthouse")
|
||||
.and(warp::path("analysis"))
|
||||
@ -4397,6 +4427,9 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
let receiver = match topic {
|
||||
api_types::EventTopic::Head => event_handler.subscribe_head(),
|
||||
api_types::EventTopic::Block => event_handler.subscribe_block(),
|
||||
api_types::EventTopic::BlobSidecar => {
|
||||
event_handler.subscribe_blob_sidecar()
|
||||
}
|
||||
api_types::EventTopic::Attestation => {
|
||||
event_handler.subscribe_attestation()
|
||||
}
|
||||
@ -4526,6 +4559,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.uor(get_beacon_block_attestations)
|
||||
.uor(get_beacon_blinded_block)
|
||||
.uor(get_beacon_block_root)
|
||||
.uor(get_blobs)
|
||||
.uor(get_beacon_pool_attestations)
|
||||
.uor(get_beacon_pool_attester_slashings)
|
||||
.uor(get_beacon_pool_proposer_slashings)
|
||||
@ -4611,7 +4645,6 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.uor(post_validator_liveness_epoch)
|
||||
.uor(post_lighthouse_liveness)
|
||||
.uor(post_lighthouse_database_reconstruct)
|
||||
.uor(post_lighthouse_database_historical_blocks)
|
||||
.uor(post_lighthouse_block_rewards)
|
||||
.uor(post_lighthouse_ui_validator_metrics)
|
||||
.uor(post_lighthouse_ui_validator_info)
|
||||
|
@ -1,10 +1,13 @@
|
||||
use crate::metrics;
|
||||
|
||||
use beacon_chain::block_verification_types::{AsBlock, BlockContentsError};
|
||||
use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
|
||||
use beacon_chain::{
|
||||
BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, IntoGossipVerifiedBlock,
|
||||
NotifyExecutionLayer,
|
||||
AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError,
|
||||
IntoGossipVerifiedBlockContents, NotifyExecutionLayer,
|
||||
};
|
||||
use eth2::types::{BroadcastValidation, ErrorMessage};
|
||||
use eth2::types::{FullPayloadContents, SignedBlockContents};
|
||||
use execution_layer::ProvenancedPayload;
|
||||
use lighthouse_network::PubsubMessage;
|
||||
use network::NetworkMessage;
|
||||
@ -17,12 +20,12 @@ use tokio::sync::mpsc::UnboundedSender;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{
|
||||
AbstractExecPayload, BeaconBlockRef, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash,
|
||||
FullPayload, Hash256, SignedBeaconBlock,
|
||||
ForkName, FullPayload, FullPayloadMerge, Hash256, SignedBeaconBlock, SignedBlobSidecarList,
|
||||
};
|
||||
use warp::http::StatusCode;
|
||||
use warp::{reply::Response, Rejection, Reply};
|
||||
|
||||
pub enum ProvenancedBlock<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>> {
|
||||
pub enum ProvenancedBlock<T: BeaconChainTypes, B: IntoGossipVerifiedBlockContents<T>> {
|
||||
/// The payload was built using a local EE.
|
||||
Local(B, PhantomData<T>),
|
||||
/// The payload was build using a remote builder (e.g., via a mev-boost
|
||||
@ -30,7 +33,7 @@ pub enum ProvenancedBlock<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>> {
|
||||
Builder(B, PhantomData<T>),
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>> ProvenancedBlock<T, B> {
|
||||
impl<T: BeaconChainTypes, B: IntoGossipVerifiedBlockContents<T>> ProvenancedBlock<T, B> {
|
||||
pub fn local(block: B) -> Self {
|
||||
Self::Local(block, PhantomData)
|
||||
}
|
||||
@ -41,7 +44,7 @@ impl<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>> ProvenancedBlock<T, B>
|
||||
}
|
||||
|
||||
/// Handles a request from the HTTP API for full blocks.
|
||||
pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
|
||||
pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlockContents<T>>(
|
||||
block_root: Option<Hash256>,
|
||||
provenanced_block: ProvenancedBlock<T, B>,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
@ -51,16 +54,18 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
|
||||
duplicate_status_code: StatusCode,
|
||||
) -> Result<Response, Rejection> {
|
||||
let seen_timestamp = timestamp_now();
|
||||
let (block, is_locally_built_block) = match provenanced_block {
|
||||
ProvenancedBlock::Local(block, _) => (block, true),
|
||||
ProvenancedBlock::Builder(block, _) => (block, false),
|
||||
|
||||
let (block_contents, is_locally_built_block) = match provenanced_block {
|
||||
ProvenancedBlock::Local(block_contents, _) => (block_contents, true),
|
||||
ProvenancedBlock::Builder(block_contents, _) => (block_contents, false),
|
||||
};
|
||||
let beacon_block = block.inner();
|
||||
let delay = get_block_delay_ms(seen_timestamp, beacon_block.message(), &chain.slot_clock);
|
||||
debug!(log, "Signed block received in HTTP API"; "slot" => beacon_block.slot());
|
||||
let block = block_contents.inner_block();
|
||||
let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock);
|
||||
debug!(log, "Signed block received in HTTP API"; "slot" => block.slot());
|
||||
|
||||
/* actually publish a block */
|
||||
let publish_block = move |block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
blobs_opt: Option<SignedBlobSidecarList<T::EthSpec>>,
|
||||
sender,
|
||||
log,
|
||||
seen_timestamp| {
|
||||
@ -71,60 +76,99 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
|
||||
|
||||
info!(log, "Signed block published to network via HTTP API"; "slot" => block.slot(), "publish_delay" => ?publish_delay);
|
||||
|
||||
let message = PubsubMessage::BeaconBlock(block);
|
||||
crate::publish_pubsub_message(&sender, message)
|
||||
.map_err(|_| BeaconChainError::UnableToPublish.into())
|
||||
match block.as_ref() {
|
||||
SignedBeaconBlock::Base(_)
|
||||
| SignedBeaconBlock::Altair(_)
|
||||
| SignedBeaconBlock::Merge(_)
|
||||
| SignedBeaconBlock::Capella(_) => {
|
||||
crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone()))
|
||||
.map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?;
|
||||
}
|
||||
SignedBeaconBlock::Deneb(_) => {
|
||||
crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone()))
|
||||
.map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?;
|
||||
if let Some(signed_blobs) = blobs_opt {
|
||||
for (blob_index, blob) in signed_blobs.into_iter().enumerate() {
|
||||
crate::publish_pubsub_message(
|
||||
&sender,
|
||||
PubsubMessage::BlobSidecar(Box::new((blob_index as u64, blob))),
|
||||
)
|
||||
.map_err(|_| {
|
||||
BlockError::BeaconChainError(BeaconChainError::UnableToPublish)
|
||||
})?;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
};
|
||||
|
||||
/* only publish if gossip- and consensus-valid and equivocation-free */
|
||||
let chain_clone = chain.clone();
|
||||
let slot = block.message().slot();
|
||||
let proposer_index = block.message().proposer_index();
|
||||
let sender_clone = network_tx.clone();
|
||||
let log_clone = log.clone();
|
||||
|
||||
// We can clone this because the blobs are `Arc`'d in `BlockContents`, but the block is not,
|
||||
// so we avoid cloning the block at this point.
|
||||
let blobs_opt = block_contents.inner_blobs();
|
||||
|
||||
/* if we can form a `GossipVerifiedBlock`, we've passed our basic gossip checks */
|
||||
let gossip_verified_block = match block.into_gossip_verified_block(&chain) {
|
||||
Ok(b) => b,
|
||||
Err(BlockError::BlockIsAlreadyKnown) => {
|
||||
// Allow the status code for duplicate blocks to be overridden based on config.
|
||||
return Ok(warp::reply::with_status(
|
||||
warp::reply::json(&ErrorMessage {
|
||||
code: duplicate_status_code.as_u16(),
|
||||
message: "duplicate block".to_string(),
|
||||
stacktraces: vec![],
|
||||
}),
|
||||
duplicate_status_code,
|
||||
)
|
||||
.into_response());
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
log,
|
||||
"Not publishing block - not gossip verified";
|
||||
"slot" => beacon_block.slot(),
|
||||
"error" => ?e
|
||||
);
|
||||
return Err(warp_utils::reject::custom_bad_request(e.to_string()));
|
||||
}
|
||||
};
|
||||
let (gossip_verified_block, gossip_verified_blobs) =
|
||||
match block_contents.into_gossip_verified_block(&chain) {
|
||||
Ok(b) => b,
|
||||
Err(BlockContentsError::BlockError(BlockError::BlockIsAlreadyKnown)) => {
|
||||
// Allow the status code for duplicate blocks to be overridden based on config.
|
||||
return Ok(warp::reply::with_status(
|
||||
warp::reply::json(&ErrorMessage {
|
||||
code: duplicate_status_code.as_u16(),
|
||||
message: "duplicate block".to_string(),
|
||||
stacktraces: vec![],
|
||||
}),
|
||||
duplicate_status_code,
|
||||
)
|
||||
.into_response());
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
log,
|
||||
"Not publishing block - not gossip verified";
|
||||
"slot" => slot,
|
||||
"error" => ?e
|
||||
);
|
||||
return Err(warp_utils::reject::custom_bad_request(e.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
// Clone here, so we can take advantage of the `Arc`. The block in `BlockContents` is not,
|
||||
// `Arc`'d but blobs are.
|
||||
let block = gossip_verified_block.block.block_cloned();
|
||||
|
||||
let block_root = block_root.unwrap_or(gossip_verified_block.block_root);
|
||||
|
||||
if let BroadcastValidation::Gossip = validation_level {
|
||||
publish_block(
|
||||
beacon_block.clone(),
|
||||
network_tx.clone(),
|
||||
block.clone(),
|
||||
blobs_opt.clone(),
|
||||
sender_clone.clone(),
|
||||
log.clone(),
|
||||
seen_timestamp,
|
||||
)
|
||||
.map_err(|_| warp_utils::reject::custom_server_error("unable to publish".into()))?;
|
||||
}
|
||||
|
||||
/* only publish if gossip- and consensus-valid and equivocation-free */
|
||||
let chain_clone = chain.clone();
|
||||
let block_clone = beacon_block.clone();
|
||||
let log_clone = log.clone();
|
||||
let sender_clone = network_tx.clone();
|
||||
let block_clone = block.clone();
|
||||
|
||||
let publish_fn = move || match validation_level {
|
||||
BroadcastValidation::Gossip => Ok(()),
|
||||
BroadcastValidation::Consensus => {
|
||||
publish_block(block_clone, sender_clone, log_clone, seen_timestamp)
|
||||
}
|
||||
BroadcastValidation::Consensus => publish_block(
|
||||
block_clone,
|
||||
blobs_opt,
|
||||
sender_clone,
|
||||
log_clone,
|
||||
seen_timestamp,
|
||||
),
|
||||
BroadcastValidation::ConsensusAndEquivocation => {
|
||||
if chain_clone
|
||||
.observed_block_producers
|
||||
@ -140,11 +184,35 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
|
||||
);
|
||||
Err(BlockError::Slashable)
|
||||
} else {
|
||||
publish_block(block_clone, sender_clone, log_clone, seen_timestamp)
|
||||
publish_block(
|
||||
block_clone,
|
||||
blobs_opt,
|
||||
sender_clone,
|
||||
log_clone,
|
||||
seen_timestamp,
|
||||
)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(gossip_verified_blobs) = gossip_verified_blobs {
|
||||
for blob in gossip_verified_blobs {
|
||||
if let Err(e) = chain.process_gossip_blob(blob).await {
|
||||
let msg = format!("Invalid blob: {e}");
|
||||
return if let BroadcastValidation::Gossip = validation_level {
|
||||
Err(warp_utils::reject::broadcast_without_import(msg))
|
||||
} else {
|
||||
error!(
|
||||
log,
|
||||
"Invalid blob provided to HTTP API";
|
||||
"reason" => &msg
|
||||
);
|
||||
Err(warp_utils::reject::custom_bad_request(msg))
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match chain
|
||||
.process_block(
|
||||
block_root,
|
||||
@ -154,20 +222,20 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(root) => {
|
||||
Ok(AvailabilityProcessingStatus::Imported(root)) => {
|
||||
info!(
|
||||
log,
|
||||
"Valid block from HTTP API";
|
||||
"block_delay" => ?delay,
|
||||
"root" => format!("{}", root),
|
||||
"proposer_index" => beacon_block.message().proposer_index(),
|
||||
"slot" => beacon_block.slot(),
|
||||
"proposer_index" => proposer_index,
|
||||
"slot" =>slot,
|
||||
);
|
||||
|
||||
// Notify the validator monitor.
|
||||
chain.validator_monitor.read().register_api_block(
|
||||
seen_timestamp,
|
||||
beacon_block.message(),
|
||||
block.message(),
|
||||
root,
|
||||
&chain.slot_clock,
|
||||
);
|
||||
@ -180,17 +248,23 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
|
||||
// blocks built with builders we consider the broadcast time to be
|
||||
// when the blinded block is published to the builder.
|
||||
if is_locally_built_block {
|
||||
late_block_logging(
|
||||
&chain,
|
||||
seen_timestamp,
|
||||
beacon_block.message(),
|
||||
root,
|
||||
"local",
|
||||
&log,
|
||||
)
|
||||
late_block_logging(&chain, seen_timestamp, block.message(), root, "local", &log)
|
||||
}
|
||||
Ok(warp::reply().into_response())
|
||||
}
|
||||
Ok(AvailabilityProcessingStatus::MissingComponents(_, block_root)) => {
|
||||
let msg = format!("Missing parts of block with root {:?}", block_root);
|
||||
if let BroadcastValidation::Gossip = validation_level {
|
||||
Err(warp_utils::reject::broadcast_without_import(msg))
|
||||
} else {
|
||||
error!(
|
||||
log,
|
||||
"Invalid block provided to HTTP API";
|
||||
"reason" => &msg
|
||||
);
|
||||
Err(warp_utils::reject::custom_bad_request(msg))
|
||||
}
|
||||
}
|
||||
Err(BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) => {
|
||||
Err(warp_utils::reject::custom_server_error(
|
||||
"unable to publish to network channel".to_string(),
|
||||
@ -220,16 +294,16 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
|
||||
/// Handles a request from the HTTP API for blinded blocks. This converts blinded blocks into full
|
||||
/// blocks before publishing.
|
||||
pub async fn publish_blinded_block<T: BeaconChainTypes>(
|
||||
block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
|
||||
block_contents: SignedBlockContents<T::EthSpec, BlindedPayload<T::EthSpec>>,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
log: Logger,
|
||||
validation_level: BroadcastValidation,
|
||||
duplicate_status_code: StatusCode,
|
||||
) -> Result<Response, Rejection> {
|
||||
let block_root = block.canonical_root();
|
||||
let full_block: ProvenancedBlock<T, Arc<SignedBeaconBlock<T::EthSpec>>> =
|
||||
reconstruct_block(chain.clone(), block_root, block, log.clone()).await?;
|
||||
let block_root = block_contents.signed_block().canonical_root();
|
||||
let full_block: ProvenancedBlock<T, SignedBlockContents<T::EthSpec>> =
|
||||
reconstruct_block(chain.clone(), block_root, block_contents, log.clone()).await?;
|
||||
publish_block::<T, _>(
|
||||
Some(block_root),
|
||||
full_block,
|
||||
@ -248,28 +322,28 @@ pub async fn publish_blinded_block<T: BeaconChainTypes>(
|
||||
pub async fn reconstruct_block<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
block_root: Hash256,
|
||||
block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
|
||||
block_contents: SignedBlockContents<T::EthSpec, BlindedPayload<T::EthSpec>>,
|
||||
log: Logger,
|
||||
) -> Result<ProvenancedBlock<T, Arc<SignedBeaconBlock<T::EthSpec>>>, Rejection> {
|
||||
) -> Result<ProvenancedBlock<T, SignedBlockContents<T::EthSpec>>, Rejection> {
|
||||
let block = block_contents.signed_block();
|
||||
let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() {
|
||||
let el = chain.execution_layer.as_ref().ok_or_else(|| {
|
||||
warp_utils::reject::custom_server_error("Missing execution layer".to_string())
|
||||
})?;
|
||||
|
||||
// If the execution block hash is zero, use an empty payload.
|
||||
let full_payload = if payload_header.block_hash() == ExecutionBlockHash::zero() {
|
||||
let payload = FullPayload::default_at_fork(
|
||||
chain
|
||||
.spec
|
||||
.fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())),
|
||||
)
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"Default payload construction error: {e:?}"
|
||||
))
|
||||
})?
|
||||
.into();
|
||||
ProvenancedPayload::Local(payload)
|
||||
let full_payload_contents = if payload_header.block_hash() == ExecutionBlockHash::zero() {
|
||||
let fork_name = chain
|
||||
.spec
|
||||
.fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch()));
|
||||
if fork_name == ForkName::Merge {
|
||||
let payload: FullPayload<T::EthSpec> = FullPayloadMerge::default().into();
|
||||
ProvenancedPayload::Local(FullPayloadContents::Payload(payload.into()))
|
||||
} else {
|
||||
Err(warp_utils::reject::custom_server_error(
|
||||
"Failed to construct full payload - block hash must be non-zero after Bellatrix.".to_string()
|
||||
))?
|
||||
}
|
||||
// If we already have an execution payload with this transactions root cached, use it.
|
||||
} else if let Some(cached_payload) =
|
||||
el.get_payload_by_root(&payload_header.tree_hash_root())
|
||||
@ -293,7 +367,7 @@ pub async fn reconstruct_block<T: BeaconChainTypes>(
|
||||
);
|
||||
|
||||
let full_payload = el
|
||||
.propose_blinded_beacon_block(block_root, &block)
|
||||
.propose_blinded_beacon_block(block_root, &block_contents)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
@ -305,7 +379,7 @@ pub async fn reconstruct_block<T: BeaconChainTypes>(
|
||||
ProvenancedPayload::Builder(full_payload)
|
||||
};
|
||||
|
||||
Some(full_payload)
|
||||
Some(full_payload_contents)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@ -313,21 +387,18 @@ pub async fn reconstruct_block<T: BeaconChainTypes>(
|
||||
match full_payload_opt {
|
||||
// A block without a payload is pre-merge and we consider it locally
|
||||
// built.
|
||||
None => block
|
||||
.try_into_full_block(None)
|
||||
.map(Arc::new)
|
||||
None => block_contents
|
||||
.try_into_full_block_and_blobs(None)
|
||||
.map(ProvenancedBlock::local),
|
||||
Some(ProvenancedPayload::Local(full_payload)) => block
|
||||
.try_into_full_block(Some(full_payload))
|
||||
.map(Arc::new)
|
||||
Some(ProvenancedPayload::Local(full_payload_contents)) => block_contents
|
||||
.try_into_full_block_and_blobs(Some(full_payload_contents))
|
||||
.map(ProvenancedBlock::local),
|
||||
Some(ProvenancedPayload::Builder(full_payload)) => block
|
||||
.try_into_full_block(Some(full_payload))
|
||||
.map(Arc::new)
|
||||
Some(ProvenancedPayload::Builder(full_payload_contents)) => block_contents
|
||||
.try_into_full_block_and_blobs(Some(full_payload_contents))
|
||||
.map(ProvenancedBlock::builder),
|
||||
}
|
||||
.ok_or_else(|| {
|
||||
warp_utils::reject::custom_server_error("Unable to add payload to block".to_string())
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!("Unable to add payload to block: {e:?}"))
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1,8 +1,6 @@
|
||||
use crate::{Config, Context};
|
||||
use beacon_chain::{
|
||||
test_utils::{
|
||||
BeaconChainHarness, BoxedMutator, Builder as HarnessBuilder, EphemeralHarnessType,
|
||||
},
|
||||
test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType},
|
||||
BeaconChain, BeaconChainTypes,
|
||||
};
|
||||
use beacon_processor::{BeaconProcessor, BeaconProcessorChannels, BeaconProcessorConfig};
|
||||
@ -53,9 +51,8 @@ pub struct ApiServer<E: EthSpec, SFut: Future<Output = ()>> {
|
||||
pub external_peer_id: PeerId,
|
||||
}
|
||||
|
||||
type Initializer<E> = Box<
|
||||
dyn FnOnce(HarnessBuilder<EphemeralHarnessType<E>>) -> HarnessBuilder<EphemeralHarnessType<E>>,
|
||||
>;
|
||||
type HarnessBuilder<E> = Builder<EphemeralHarnessType<E>>;
|
||||
type Initializer<E> = Box<dyn FnOnce(HarnessBuilder<E>) -> HarnessBuilder<E>>;
|
||||
type Mutator<E> = BoxedMutator<E, MemoryStore<E>, MemoryStore<E>>;
|
||||
|
||||
impl<E: EthSpec> InteractiveTester<E> {
|
||||
|
@ -1,12 +1,19 @@
|
||||
use beacon_chain::{
|
||||
test_utils::{AttestationStrategy, BlockStrategy},
|
||||
GossipVerifiedBlock,
|
||||
GossipVerifiedBlock, IntoGossipVerifiedBlockContents,
|
||||
};
|
||||
use eth2::types::{
|
||||
BroadcastValidation, SignedBeaconBlock, SignedBlindedBeaconBlock, SignedBlockContents,
|
||||
SignedBlockContentsTuple,
|
||||
};
|
||||
use eth2::types::{BroadcastValidation, SignedBeaconBlock, SignedBlindedBeaconBlock};
|
||||
use http_api::test_utils::InteractiveTester;
|
||||
use http_api::{publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock};
|
||||
use std::sync::Arc;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{Hash256, MainnetEthSpec, Slot};
|
||||
use types::{
|
||||
BlindedBlobSidecar, BlindedPayload, BlobSidecar, FullPayload, Hash256, MainnetEthSpec,
|
||||
SignedSidecarList, Slot,
|
||||
};
|
||||
use warp::Rejection;
|
||||
use warp_utils::reject::CustomBadRequest;
|
||||
|
||||
@ -63,7 +70,7 @@ pub async fn gossip_invalid() {
|
||||
|
||||
tester.harness.advance_slot();
|
||||
|
||||
let (block, _): (SignedBeaconBlock<E>, _) = tester
|
||||
let ((block, blobs), _): ((SignedBeaconBlock<E>, _), _) = tester
|
||||
.harness
|
||||
.make_block_with_modifier(chain_state_before, slot, |b| {
|
||||
*b.state_root_mut() = Hash256::zero();
|
||||
@ -73,7 +80,7 @@ pub async fn gossip_invalid() {
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blocks_v2(&block, validation_level)
|
||||
.post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level)
|
||||
.await;
|
||||
assert!(response.is_err());
|
||||
|
||||
@ -83,7 +90,7 @@ pub async fn gossip_invalid() {
|
||||
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
|
||||
|
||||
assert!(
|
||||
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string())
|
||||
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
@ -115,7 +122,7 @@ pub async fn gossip_partial_pass() {
|
||||
|
||||
tester.harness.advance_slot();
|
||||
|
||||
let (block, _): (SignedBeaconBlock<E>, _) = tester
|
||||
let ((block, blobs), _): ((SignedBeaconBlock<E>, _), _) = tester
|
||||
.harness
|
||||
.make_block_with_modifier(chain_state_before, slot, |b| {
|
||||
*b.state_root_mut() = Hash256::random()
|
||||
@ -124,7 +131,7 @@ pub async fn gossip_partial_pass() {
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blocks_v2(&block, validation_level)
|
||||
.post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level)
|
||||
.await;
|
||||
assert!(response.is_err());
|
||||
|
||||
@ -161,11 +168,15 @@ pub async fn gossip_full_pass() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block, _): (SignedBeaconBlock<E>, _) = tester.harness.make_block(state_a, slot_b).await;
|
||||
let ((block, blobs), _): ((SignedBeaconBlock<E>, _), _) =
|
||||
tester.harness.make_block(state_a, slot_b).await;
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blocks_v2(&block, validation_level)
|
||||
.post_beacon_blocks_v2(
|
||||
&SignedBlockContents::new(block.clone(), blobs),
|
||||
validation_level,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(response.is_ok());
|
||||
@ -203,18 +214,19 @@ pub async fn gossip_full_pass_ssz() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block, _): (SignedBeaconBlock<E>, _) = tester.harness.make_block(state_a, slot_b).await;
|
||||
let (block_contents_tuple, _) = tester.harness.make_block(state_a, slot_b).await;
|
||||
let block_contents = block_contents_tuple.into();
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blocks_v2_ssz(&block, validation_level)
|
||||
.post_beacon_blocks_v2_ssz(&block_contents, validation_level)
|
||||
.await;
|
||||
|
||||
assert!(response.is_ok());
|
||||
assert!(tester
|
||||
.harness
|
||||
.chain
|
||||
.block_is_known_to_fork_choice(&block.canonical_root()));
|
||||
.block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root()));
|
||||
}
|
||||
|
||||
/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`.
|
||||
@ -244,7 +256,7 @@ pub async fn consensus_invalid() {
|
||||
|
||||
tester.harness.advance_slot();
|
||||
|
||||
let (block, _): (SignedBeaconBlock<E>, _) = tester
|
||||
let ((block, blobs), _): ((SignedBeaconBlock<E>, _), _) = tester
|
||||
.harness
|
||||
.make_block_with_modifier(chain_state_before, slot, |b| {
|
||||
*b.state_root_mut() = Hash256::zero();
|
||||
@ -254,7 +266,7 @@ pub async fn consensus_invalid() {
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blocks_v2(&block, validation_level)
|
||||
.post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level)
|
||||
.await;
|
||||
assert!(response.is_err());
|
||||
|
||||
@ -264,7 +276,7 @@ pub async fn consensus_invalid() {
|
||||
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
|
||||
|
||||
assert!(
|
||||
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string())
|
||||
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
@ -296,14 +308,14 @@ pub async fn consensus_gossip() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block, _): (SignedBeaconBlock<E>, _) = tester
|
||||
let ((block, blobs), _): ((SignedBeaconBlock<E>, _), _) = tester
|
||||
.harness
|
||||
.make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero())
|
||||
.await;
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blocks_v2(&block, validation_level)
|
||||
.post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level)
|
||||
.await;
|
||||
assert!(response.is_err());
|
||||
|
||||
@ -346,18 +358,20 @@ pub async fn consensus_partial_pass_only_consensus() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block_a, state_after_a): (SignedBeaconBlock<E>, _) =
|
||||
let ((block_a, _), state_after_a): ((SignedBeaconBlock<E>, _), _) =
|
||||
tester.harness.make_block(state_a.clone(), slot_b).await;
|
||||
let (block_b, state_after_b): (SignedBeaconBlock<E>, _) =
|
||||
let ((block_b, blobs_b), state_after_b): ((SignedBeaconBlock<E>, _), _) =
|
||||
tester.harness.make_block(state_a, slot_b).await;
|
||||
let block_b_root = block_b.canonical_root();
|
||||
|
||||
/* check for `make_block` curios */
|
||||
assert_eq!(block_a.state_root(), state_after_a.tree_hash_root());
|
||||
assert_eq!(block_b.state_root(), state_after_b.tree_hash_root());
|
||||
assert_ne!(block_a.state_root(), block_b.state_root());
|
||||
|
||||
let gossip_block_b = GossipVerifiedBlock::new(block_b.clone().into(), &tester.harness.chain);
|
||||
assert!(gossip_block_b.is_ok());
|
||||
let gossip_block_contents_b = SignedBlockContents::new(block_b, blobs_b)
|
||||
.into_gossip_verified_block(&tester.harness.chain);
|
||||
assert!(gossip_block_contents_b.is_ok());
|
||||
let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain);
|
||||
assert!(gossip_block_a.is_err());
|
||||
|
||||
@ -366,7 +380,7 @@ pub async fn consensus_partial_pass_only_consensus() {
|
||||
|
||||
let publication_result = publish_block(
|
||||
None,
|
||||
ProvenancedBlock::local(gossip_block_b.unwrap()),
|
||||
ProvenancedBlock::local(gossip_block_contents_b.unwrap()),
|
||||
tester.harness.chain.clone(),
|
||||
&channel.0,
|
||||
test_logger,
|
||||
@ -379,7 +393,7 @@ pub async fn consensus_partial_pass_only_consensus() {
|
||||
assert!(tester
|
||||
.harness
|
||||
.chain
|
||||
.block_is_known_to_fork_choice(&block_b.canonical_root()));
|
||||
.block_is_known_to_fork_choice(&block_b_root));
|
||||
}
|
||||
|
||||
/// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=consensus`.
|
||||
@ -410,11 +424,15 @@ pub async fn consensus_full_pass() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block, _): (SignedBeaconBlock<E>, _) = tester.harness.make_block(state_a, slot_b).await;
|
||||
let ((block, blobs), _): ((SignedBeaconBlock<E>, _), _) =
|
||||
tester.harness.make_block(state_a, slot_b).await;
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blocks_v2(&block, validation_level)
|
||||
.post_beacon_blocks_v2(
|
||||
&SignedBlockContents::new(block.clone(), blobs),
|
||||
validation_level,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(response.is_ok());
|
||||
@ -453,7 +471,7 @@ pub async fn equivocation_invalid() {
|
||||
|
||||
tester.harness.advance_slot();
|
||||
|
||||
let (block, _): (SignedBeaconBlock<E>, _) = tester
|
||||
let ((block, blobs), _): ((SignedBeaconBlock<E>, _), _) = tester
|
||||
.harness
|
||||
.make_block_with_modifier(chain_state_before, slot, |b| {
|
||||
*b.state_root_mut() = Hash256::zero();
|
||||
@ -463,7 +481,7 @@ pub async fn equivocation_invalid() {
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blocks_v2(&block, validation_level)
|
||||
.post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level)
|
||||
.await;
|
||||
assert!(response.is_err());
|
||||
|
||||
@ -473,7 +491,7 @@ pub async fn equivocation_invalid() {
|
||||
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
|
||||
|
||||
assert!(
|
||||
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string())
|
||||
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
@ -506,9 +524,9 @@ pub async fn equivocation_consensus_early_equivocation() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block_a, state_after_a): (SignedBeaconBlock<E>, _) =
|
||||
let ((block_a, blobs_a), state_after_a): ((SignedBeaconBlock<E>, _), _) =
|
||||
tester.harness.make_block(state_a.clone(), slot_b).await;
|
||||
let (block_b, state_after_b): (SignedBeaconBlock<E>, _) =
|
||||
let ((block_b, blobs_b), state_after_b): ((SignedBeaconBlock<E>, _), _) =
|
||||
tester.harness.make_block(state_a, slot_b).await;
|
||||
|
||||
/* check for `make_block` curios */
|
||||
@ -519,7 +537,10 @@ pub async fn equivocation_consensus_early_equivocation() {
|
||||
/* submit `block_a` as valid */
|
||||
assert!(tester
|
||||
.client
|
||||
.post_beacon_blocks_v2(&block_a, validation_level)
|
||||
.post_beacon_blocks_v2(
|
||||
&SignedBlockContents::new(block_a.clone(), blobs_a),
|
||||
validation_level
|
||||
)
|
||||
.await
|
||||
.is_ok());
|
||||
assert!(tester
|
||||
@ -530,7 +551,10 @@ pub async fn equivocation_consensus_early_equivocation() {
|
||||
/* submit `block_b` which should induce equivocation */
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blocks_v2(&block_b, validation_level)
|
||||
.post_beacon_blocks_v2(
|
||||
&SignedBlockContents::new(block_b.clone(), blobs_b),
|
||||
validation_level,
|
||||
)
|
||||
.await;
|
||||
assert!(response.is_err());
|
||||
|
||||
@ -539,7 +563,7 @@ pub async fn equivocation_consensus_early_equivocation() {
|
||||
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
|
||||
|
||||
assert!(
|
||||
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Slashable".to_string())
|
||||
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(Slashable)".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
@ -572,14 +596,14 @@ pub async fn equivocation_gossip() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block, _): (SignedBeaconBlock<E>, _) = tester
|
||||
let ((block, blobs), _): ((SignedBeaconBlock<E>, _), _) = tester
|
||||
.harness
|
||||
.make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero())
|
||||
.await;
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blocks_v2(&block, validation_level)
|
||||
.post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level)
|
||||
.await;
|
||||
assert!(response.is_err());
|
||||
|
||||
@ -593,9 +617,11 @@ pub async fn equivocation_gossip() {
|
||||
);
|
||||
}
|
||||
|
||||
/// This test checks that a block that is valid from both a gossip and consensus perspective but that equivocates **late** is rejected when using `broadcast_validation=consensus_and_equivocation`.
|
||||
/// This test checks that a block that is valid from both a gossip and consensus perspective but
|
||||
/// that equivocates **late** is rejected when using `broadcast_validation=consensus_and_equivocation`.
|
||||
///
|
||||
/// This test is unique in that we can't actually test the HTTP API directly, but instead have to hook into the `publish_blocks` code manually. This is in order to handle the late equivocation case.
|
||||
/// This test is unique in that we can't actually test the HTTP API directly, but instead have to
|
||||
/// hook into the `publish_blocks` code manually. This is in order to handle the late equivocation case.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
pub async fn equivocation_consensus_late_equivocation() {
|
||||
/* this test targets gossip-level validation */
|
||||
@ -625,9 +651,9 @@ pub async fn equivocation_consensus_late_equivocation() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block_a, state_after_a): (SignedBeaconBlock<E>, _) =
|
||||
let ((block_a, blobs_a), state_after_a): ((SignedBeaconBlock<E>, _), _) =
|
||||
tester.harness.make_block(state_a.clone(), slot_b).await;
|
||||
let (block_b, state_after_b): (SignedBeaconBlock<E>, _) =
|
||||
let ((block_b, blobs_b), state_after_b): ((SignedBeaconBlock<E>, _), _) =
|
||||
tester.harness.make_block(state_a, slot_b).await;
|
||||
|
||||
/* check for `make_block` curios */
|
||||
@ -635,16 +661,18 @@ pub async fn equivocation_consensus_late_equivocation() {
|
||||
assert_eq!(block_b.state_root(), state_after_b.tree_hash_root());
|
||||
assert_ne!(block_a.state_root(), block_b.state_root());
|
||||
|
||||
let gossip_block_b = GossipVerifiedBlock::new(block_b.clone().into(), &tester.harness.chain);
|
||||
assert!(gossip_block_b.is_ok());
|
||||
let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain);
|
||||
assert!(gossip_block_a.is_err());
|
||||
let gossip_block_contents_b = SignedBlockContents::new(block_b, blobs_b)
|
||||
.into_gossip_verified_block(&tester.harness.chain);
|
||||
assert!(gossip_block_contents_b.is_ok());
|
||||
let gossip_block_contents_a = SignedBlockContents::new(block_a, blobs_a)
|
||||
.into_gossip_verified_block(&tester.harness.chain);
|
||||
assert!(gossip_block_contents_a.is_err());
|
||||
|
||||
let channel = tokio::sync::mpsc::unbounded_channel();
|
||||
|
||||
let publication_result = publish_block(
|
||||
None,
|
||||
ProvenancedBlock::local(gossip_block_b.unwrap()),
|
||||
ProvenancedBlock::local(gossip_block_contents_b.unwrap()),
|
||||
tester.harness.chain,
|
||||
&channel.0,
|
||||
test_logger,
|
||||
@ -694,11 +722,15 @@ pub async fn equivocation_full_pass() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block, _): (SignedBeaconBlock<E>, _) = tester.harness.make_block(state_a, slot_b).await;
|
||||
let ((block, blobs), _): ((SignedBeaconBlock<E>, _), _) =
|
||||
tester.harness.make_block(state_a, slot_b).await;
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blocks_v2(&block, validation_level)
|
||||
.post_beacon_blocks_v2(
|
||||
&SignedBlockContents::new(block.clone(), blobs),
|
||||
validation_level,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(response.is_ok());
|
||||
@ -736,7 +768,7 @@ pub async fn blinded_gossip_invalid() {
|
||||
|
||||
tester.harness.advance_slot();
|
||||
|
||||
let (block, _): (SignedBeaconBlock<E>, _) = tester
|
||||
let (block_contents_tuple, _) = tester
|
||||
.harness
|
||||
.make_block_with_modifier(chain_state_before, slot, |b| {
|
||||
*b.state_root_mut() = Hash256::zero();
|
||||
@ -744,11 +776,11 @@ pub async fn blinded_gossip_invalid() {
|
||||
})
|
||||
.await;
|
||||
|
||||
let blinded_block: SignedBlindedBeaconBlock<E> = block.into();
|
||||
let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple);
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blinded_blocks_v2(&blinded_block, validation_level)
|
||||
.post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level)
|
||||
.await;
|
||||
assert!(response.is_err());
|
||||
|
||||
@ -758,7 +790,7 @@ pub async fn blinded_gossip_invalid() {
|
||||
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
|
||||
|
||||
assert!(
|
||||
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string())
|
||||
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
@ -790,18 +822,18 @@ pub async fn blinded_gossip_partial_pass() {
|
||||
|
||||
tester.harness.advance_slot();
|
||||
|
||||
let (block, _): (SignedBeaconBlock<E>, _) = tester
|
||||
let (block_contents_tuple, _) = tester
|
||||
.harness
|
||||
.make_block_with_modifier(chain_state_before, slot, |b| {
|
||||
*b.state_root_mut() = Hash256::zero()
|
||||
})
|
||||
.await;
|
||||
|
||||
let blinded_block: SignedBlindedBeaconBlock<E> = block.into();
|
||||
let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple);
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blinded_blocks_v2(&blinded_block, validation_level)
|
||||
.post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level)
|
||||
.await;
|
||||
assert!(response.is_err());
|
||||
|
||||
@ -838,19 +870,18 @@ pub async fn blinded_gossip_full_pass() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block, _): (SignedBlindedBeaconBlock<E>, _) =
|
||||
tester.harness.make_blinded_block(state_a, slot_b).await;
|
||||
|
||||
let (block_contents_tuple, _) = tester.harness.make_blinded_block(state_a, slot_b).await;
|
||||
let block_contents = block_contents_tuple.into();
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blinded_blocks_v2(&block, validation_level)
|
||||
.post_beacon_blinded_blocks_v2(&block_contents, validation_level)
|
||||
.await;
|
||||
|
||||
assert!(response.is_ok());
|
||||
assert!(tester
|
||||
.harness
|
||||
.chain
|
||||
.block_is_known_to_fork_choice(&block.canonical_root()));
|
||||
.block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root()));
|
||||
}
|
||||
|
||||
// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`.
|
||||
@ -881,19 +912,19 @@ pub async fn blinded_gossip_full_pass_ssz() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block, _): (SignedBlindedBeaconBlock<E>, _) =
|
||||
tester.harness.make_blinded_block(state_a, slot_b).await;
|
||||
let (block_contents_tuple, _) = tester.harness.make_blinded_block(state_a, slot_b).await;
|
||||
let block_contents = block_contents_tuple.into();
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blinded_blocks_v2_ssz(&block, validation_level)
|
||||
.post_beacon_blinded_blocks_v2_ssz(&block_contents, validation_level)
|
||||
.await;
|
||||
|
||||
assert!(response.is_ok());
|
||||
assert!(tester
|
||||
.harness
|
||||
.chain
|
||||
.block_is_known_to_fork_choice(&block.canonical_root()));
|
||||
.block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root()));
|
||||
}
|
||||
|
||||
/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`.
|
||||
@ -924,7 +955,7 @@ pub async fn blinded_consensus_invalid() {
|
||||
|
||||
tester.harness.advance_slot();
|
||||
|
||||
let (block, _): (SignedBeaconBlock<E>, _) = tester
|
||||
let (block_contents_tuple, _) = tester
|
||||
.harness
|
||||
.make_block_with_modifier(chain_state_before, slot, |b| {
|
||||
*b.state_root_mut() = Hash256::zero();
|
||||
@ -932,11 +963,11 @@ pub async fn blinded_consensus_invalid() {
|
||||
})
|
||||
.await;
|
||||
|
||||
let blinded_block: SignedBlindedBeaconBlock<E> = block.into();
|
||||
let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple);
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blinded_blocks_v2(&blinded_block, validation_level)
|
||||
.post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level)
|
||||
.await;
|
||||
assert!(response.is_err());
|
||||
|
||||
@ -946,7 +977,7 @@ pub async fn blinded_consensus_invalid() {
|
||||
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
|
||||
|
||||
assert!(
|
||||
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string())
|
||||
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
@ -978,16 +1009,16 @@ pub async fn blinded_consensus_gossip() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block, _): (SignedBeaconBlock<E>, _) = tester
|
||||
let (block_contents_tuple, _) = tester
|
||||
.harness
|
||||
.make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero())
|
||||
.await;
|
||||
|
||||
let blinded_block: SignedBlindedBeaconBlock<E> = block.into();
|
||||
let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple);
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blinded_blocks_v2(&blinded_block, validation_level)
|
||||
.post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level)
|
||||
.await;
|
||||
assert!(response.is_err());
|
||||
|
||||
@ -1029,19 +1060,19 @@ pub async fn blinded_consensus_full_pass() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block, _): (SignedBlindedBeaconBlock<E>, _) =
|
||||
tester.harness.make_blinded_block(state_a, slot_b).await;
|
||||
let (block_contents_tuple, _) = tester.harness.make_blinded_block(state_a, slot_b).await;
|
||||
|
||||
let block_contents = block_contents_tuple.into();
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blinded_blocks_v2(&block, validation_level)
|
||||
.post_beacon_blinded_blocks_v2(&block_contents, validation_level)
|
||||
.await;
|
||||
|
||||
assert!(response.is_ok());
|
||||
assert!(tester
|
||||
.harness
|
||||
.chain
|
||||
.block_is_known_to_fork_choice(&block.canonical_root()));
|
||||
.block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root()));
|
||||
}
|
||||
|
||||
/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus_and_equivocation`.
|
||||
@ -1073,7 +1104,7 @@ pub async fn blinded_equivocation_invalid() {
|
||||
|
||||
tester.harness.advance_slot();
|
||||
|
||||
let (block, _): (SignedBeaconBlock<E>, _) = tester
|
||||
let (block_contents_tuple, _) = tester
|
||||
.harness
|
||||
.make_block_with_modifier(chain_state_before, slot, |b| {
|
||||
*b.state_root_mut() = Hash256::zero();
|
||||
@ -1081,11 +1112,11 @@ pub async fn blinded_equivocation_invalid() {
|
||||
})
|
||||
.await;
|
||||
|
||||
let blinded_block: SignedBlindedBeaconBlock<E> = block.into();
|
||||
let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple);
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blinded_blocks_v2(&blinded_block, validation_level)
|
||||
.post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level)
|
||||
.await;
|
||||
assert!(response.is_err());
|
||||
|
||||
@ -1095,7 +1126,7 @@ pub async fn blinded_equivocation_invalid() {
|
||||
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
|
||||
|
||||
assert!(
|
||||
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string())
|
||||
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
@ -1128,14 +1159,18 @@ pub async fn blinded_equivocation_consensus_early_equivocation() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block_a, state_after_a): (SignedBlindedBeaconBlock<E>, _) = tester
|
||||
let (block_contents_tuple_a, state_after_a) = tester
|
||||
.harness
|
||||
.make_blinded_block(state_a.clone(), slot_b)
|
||||
.await;
|
||||
let (block_b, state_after_b): (SignedBlindedBeaconBlock<E>, _) =
|
||||
let (block_contents_tuple_b, state_after_b) =
|
||||
tester.harness.make_blinded_block(state_a, slot_b).await;
|
||||
|
||||
/* check for `make_blinded_block` curios */
|
||||
let block_contents_a: SignedBlockContents<E, BlindedPayload<E>> = block_contents_tuple_a.into();
|
||||
let block_contents_b: SignedBlockContents<E, BlindedPayload<E>> = block_contents_tuple_b.into();
|
||||
let block_a = block_contents_a.signed_block();
|
||||
let block_b = block_contents_b.signed_block();
|
||||
assert_eq!(block_a.state_root(), state_after_a.tree_hash_root());
|
||||
assert_eq!(block_b.state_root(), state_after_b.tree_hash_root());
|
||||
assert_ne!(block_a.state_root(), block_b.state_root());
|
||||
@ -1143,7 +1178,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() {
|
||||
/* submit `block_a` as valid */
|
||||
assert!(tester
|
||||
.client
|
||||
.post_beacon_blinded_blocks_v2(&block_a, validation_level)
|
||||
.post_beacon_blinded_blocks_v2(&block_contents_a, validation_level)
|
||||
.await
|
||||
.is_ok());
|
||||
assert!(tester
|
||||
@ -1154,7 +1189,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() {
|
||||
/* submit `block_b` which should induce equivocation */
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blinded_blocks_v2(&block_b, validation_level)
|
||||
.post_beacon_blinded_blocks_v2(&block_contents_b, validation_level)
|
||||
.await;
|
||||
assert!(response.is_err());
|
||||
|
||||
@ -1163,7 +1198,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() {
|
||||
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
|
||||
|
||||
assert!(
|
||||
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Slashable".to_string())
|
||||
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(Slashable)".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
@ -1196,16 +1231,16 @@ pub async fn blinded_equivocation_gossip() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block, _): (SignedBeaconBlock<E>, _) = tester
|
||||
let (block_contents_tuple, _) = tester
|
||||
.harness
|
||||
.make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero())
|
||||
.await;
|
||||
|
||||
let blinded_block: SignedBlindedBeaconBlock<E> = block.into();
|
||||
let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple);
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blinded_blocks_v2(&blinded_block, validation_level)
|
||||
.post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level)
|
||||
.await;
|
||||
assert!(response.is_err());
|
||||
|
||||
@ -1251,11 +1286,11 @@ pub async fn blinded_equivocation_consensus_late_equivocation() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block_a, state_after_a): (SignedBlindedBeaconBlock<E>, _) = tester
|
||||
let ((block_a, blobs_a), state_after_a): ((SignedBlindedBeaconBlock<E>, _), _) = tester
|
||||
.harness
|
||||
.make_blinded_block(state_a.clone(), slot_b)
|
||||
.await;
|
||||
let (block_b, state_after_b): (SignedBlindedBeaconBlock<E>, _) =
|
||||
let ((block_b, blobs_b), state_after_b): ((SignedBlindedBeaconBlock<E>, _), _) =
|
||||
tester.harness.make_blinded_block(state_a, slot_b).await;
|
||||
|
||||
/* check for `make_blinded_block` curios */
|
||||
@ -1265,16 +1300,16 @@ pub async fn blinded_equivocation_consensus_late_equivocation() {
|
||||
|
||||
let unblinded_block_a = reconstruct_block(
|
||||
tester.harness.chain.clone(),
|
||||
block_a.state_root(),
|
||||
block_a,
|
||||
block_a.canonical_root(),
|
||||
SignedBlockContents::new(block_a, blobs_a),
|
||||
test_logger.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let unblinded_block_b = reconstruct_block(
|
||||
tester.harness.chain.clone(),
|
||||
block_b.clone().state_root(),
|
||||
block_b.clone(),
|
||||
block_b.canonical_root(),
|
||||
SignedBlockContents::new(block_b.clone(), blobs_b.clone()),
|
||||
test_logger.clone(),
|
||||
)
|
||||
.await
|
||||
@ -1289,15 +1324,21 @@ pub async fn blinded_equivocation_consensus_late_equivocation() {
|
||||
ProvenancedBlock::Builder(b, _) => b,
|
||||
};
|
||||
|
||||
let gossip_block_b = GossipVerifiedBlock::new(inner_block_b, &tester.harness.chain);
|
||||
let gossip_block_b = GossipVerifiedBlock::new(
|
||||
Arc::new(inner_block_b.clone().deconstruct().0),
|
||||
&tester.harness.chain,
|
||||
);
|
||||
assert!(gossip_block_b.is_ok());
|
||||
let gossip_block_a = GossipVerifiedBlock::new(inner_block_a, &tester.harness.chain);
|
||||
let gossip_block_a = GossipVerifiedBlock::new(
|
||||
Arc::new(inner_block_a.clone().deconstruct().0),
|
||||
&tester.harness.chain,
|
||||
);
|
||||
assert!(gossip_block_a.is_err());
|
||||
|
||||
let channel = tokio::sync::mpsc::unbounded_channel();
|
||||
|
||||
let publication_result = publish_blinded_block(
|
||||
block_b,
|
||||
SignedBlockContents::new(block_b, blobs_b),
|
||||
tester.harness.chain,
|
||||
&channel.0,
|
||||
test_logger,
|
||||
@ -1342,12 +1383,15 @@ pub async fn blinded_equivocation_full_pass() {
|
||||
let slot_b = slot_a + 1;
|
||||
|
||||
let state_a = tester.harness.get_current_state();
|
||||
let (block, _): (SignedBlindedBeaconBlock<E>, _) =
|
||||
let ((block, blobs), _): ((SignedBlindedBeaconBlock<E>, _), _) =
|
||||
tester.harness.make_blinded_block(state_a, slot_b).await;
|
||||
|
||||
let response: Result<(), eth2::Error> = tester
|
||||
.client
|
||||
.post_beacon_blocks_v2(&block, validation_level)
|
||||
.post_beacon_blocks_v2(
|
||||
&SignedBlockContents::new(block.clone(), blobs),
|
||||
validation_level,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(response.is_ok());
|
||||
@ -1356,3 +1400,20 @@ pub async fn blinded_equivocation_full_pass() {
|
||||
.chain
|
||||
.block_is_known_to_fork_choice(&block.canonical_root()));
|
||||
}
|
||||
|
||||
fn into_signed_blinded_block_contents(
|
||||
block_contents_tuple: SignedBlockContentsTuple<E, FullPayload<E>>,
|
||||
) -> SignedBlockContents<E, BlindedPayload<E>> {
|
||||
let (block, maybe_blobs) = block_contents_tuple;
|
||||
SignedBlockContents::new(block.into(), maybe_blobs.map(into_blinded_blob_sidecars))
|
||||
}
|
||||
|
||||
fn into_blinded_blob_sidecars(
|
||||
blobs: SignedSidecarList<E, BlobSidecar<E>>,
|
||||
) -> SignedSidecarList<E, BlindedBlobSidecar> {
|
||||
blobs
|
||||
.into_iter()
|
||||
.map(|blob| blob.into())
|
||||
.collect::<Vec<_>>()
|
||||
.into()
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ use beacon_chain::{
|
||||
StateSkipConfig,
|
||||
};
|
||||
use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee};
|
||||
use execution_layer::test_utils::generate_genesis_header;
|
||||
use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials};
|
||||
use http_api::test_utils::*;
|
||||
use std::collections::HashSet;
|
||||
@ -354,12 +355,13 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() {
|
||||
.iter()
|
||||
.map(|keypair| bls_withdrawal_credentials(&keypair.as_ref().unwrap().pk, &spec))
|
||||
.collect::<Vec<_>>();
|
||||
let header = generate_genesis_header(&spec, true);
|
||||
let genesis_state = interop_genesis_state_with_withdrawal_credentials(
|
||||
&validator_keypairs,
|
||||
&withdrawal_credentials,
|
||||
HARNESS_GENESIS_TIME,
|
||||
Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH),
|
||||
None,
|
||||
header,
|
||||
&spec,
|
||||
)
|
||||
.unwrap();
|
||||
|
@ -391,8 +391,8 @@ pub async fn proposer_boost_re_org_test(
|
||||
) {
|
||||
assert!(head_slot > 0);
|
||||
|
||||
// Test using Capella so that we simulate conditions as similar to mainnet as possible.
|
||||
let mut spec = ForkName::Capella.make_genesis_spec(E::default_spec());
|
||||
// Test using the latest fork so that we simulate conditions as similar to mainnet as possible.
|
||||
let mut spec = ForkName::latest().make_genesis_spec(E::default_spec());
|
||||
spec.terminal_total_difficulty = 1.into();
|
||||
|
||||
// Ensure there are enough validators to have `attesters_per_slot`.
|
||||
@ -551,7 +551,7 @@ pub async fn proposer_boost_re_org_test(
|
||||
|
||||
// Produce block B and process it halfway through the slot.
|
||||
let (block_b, mut state_b) = harness.make_block(state_a.clone(), slot_b).await;
|
||||
let block_b_root = block_b.canonical_root();
|
||||
let block_b_root = block_b.0.canonical_root();
|
||||
|
||||
let obs_time = slot_clock.start_of(slot_b).unwrap() + slot_clock.slot_duration() / 2;
|
||||
slot_clock.set_current_time(obs_time);
|
||||
@ -617,12 +617,13 @@ pub async fn proposer_boost_re_org_test(
|
||||
let randao_reveal = harness
|
||||
.sign_randao_reveal(&state_b, proposer_index, slot_c)
|
||||
.into();
|
||||
let unsigned_block_c = tester
|
||||
let unsigned_block_contents_c = tester
|
||||
.client
|
||||
.get_validator_blocks(slot_c, &randao_reveal, None)
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
let (unsigned_block_c, block_c_blobs) = unsigned_block_contents_c.deconstruct();
|
||||
let block_c = harness.sign_beacon_block(unsigned_block_c, &state_b);
|
||||
|
||||
if should_re_org {
|
||||
@ -633,9 +634,13 @@ pub async fn proposer_boost_re_org_test(
|
||||
assert_eq!(block_c.parent_root(), block_b_root);
|
||||
}
|
||||
|
||||
// Sign blobs.
|
||||
let block_c_signed_blobs =
|
||||
block_c_blobs.map(|blobs| harness.sign_blobs(blobs, &state_b, proposer_index));
|
||||
|
||||
// Applying block C should cause it to become head regardless (re-org or continuation).
|
||||
let block_root_c = harness
|
||||
.process_block_result(block_c.clone())
|
||||
.process_block_result((block_c.clone(), block_c_signed_blobs))
|
||||
.await
|
||||
.unwrap()
|
||||
.into();
|
||||
@ -643,8 +648,18 @@ pub async fn proposer_boost_re_org_test(
|
||||
|
||||
// Check the fork choice updates that were sent.
|
||||
let forkchoice_updates = forkchoice_updates.lock();
|
||||
let block_a_exec_hash = block_a.message().execution_payload().unwrap().block_hash();
|
||||
let block_b_exec_hash = block_b.message().execution_payload().unwrap().block_hash();
|
||||
let block_a_exec_hash = block_a
|
||||
.0
|
||||
.message()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.block_hash();
|
||||
let block_b_exec_hash = block_b
|
||||
.0
|
||||
.message()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.block_hash();
|
||||
|
||||
let block_c_timestamp = block_c.message().execution_payload().unwrap().timestamp();
|
||||
|
||||
@ -688,6 +703,11 @@ pub async fn proposer_boost_re_org_test(
|
||||
assert_ne!(expected_withdrawals, pre_advance_withdrawals);
|
||||
}
|
||||
|
||||
// Check that the `parent_beacon_block_root` of the payload attributes are correct.
|
||||
if let Ok(parent_beacon_block_root) = payload_attribs.parent_beacon_block_root() {
|
||||
assert_eq!(parent_beacon_block_root, block_c.parent_root());
|
||||
}
|
||||
|
||||
let lookahead = slot_clock
|
||||
.start_of(slot_c)
|
||||
.unwrap()
|
||||
@ -749,7 +769,7 @@ pub async fn fork_choice_before_proposal() {
|
||||
let state_a = harness.get_current_state();
|
||||
let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b).await;
|
||||
let block_root_b = harness
|
||||
.process_block(slot_b, block_b.canonical_root(), block_b)
|
||||
.process_block(slot_b, block_b.0.canonical_root(), block_b)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@ -764,7 +784,7 @@ pub async fn fork_choice_before_proposal() {
|
||||
|
||||
let (block_c, state_c) = harness.make_block(state_a, slot_c).await;
|
||||
let block_root_c = harness
|
||||
.process_block(slot_c, block_c.canonical_root(), block_c.clone())
|
||||
.process_block(slot_c, block_c.0.canonical_root(), block_c.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@ -804,7 +824,9 @@ pub async fn fork_choice_before_proposal() {
|
||||
.get_validator_blocks::<E, FullPayload<E>>(slot_d, &randao_reveal, None)
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
.data
|
||||
.deconstruct()
|
||||
.0;
|
||||
|
||||
// Head is now B.
|
||||
assert_eq!(
|
||||
|
@ -100,9 +100,10 @@ async fn el_error_on_new_payload() {
|
||||
|
||||
// Make a block.
|
||||
let pre_state = harness.get_current_state();
|
||||
let (block, _) = harness
|
||||
let (block_contents, _) = harness
|
||||
.make_block(pre_state, Slot::new(num_blocks + 1))
|
||||
.await;
|
||||
let (block, blobs) = block_contents;
|
||||
let block_hash = block
|
||||
.message()
|
||||
.body()
|
||||
@ -118,7 +119,9 @@ async fn el_error_on_new_payload() {
|
||||
// Attempt to process the block, which should error.
|
||||
harness.advance_slot();
|
||||
assert!(matches!(
|
||||
harness.process_block_result(block.clone()).await,
|
||||
harness
|
||||
.process_block_result((block.clone(), blobs.clone()))
|
||||
.await,
|
||||
Err(BlockError::ExecutionPayloadError(_))
|
||||
));
|
||||
|
||||
@ -137,7 +140,7 @@ async fn el_error_on_new_payload() {
|
||||
validation_error: None,
|
||||
},
|
||||
);
|
||||
harness.process_block_result(block).await.unwrap();
|
||||
harness.process_block_result((block, blobs)).await.unwrap();
|
||||
|
||||
let api_response = tester.client.get_node_syncing().await.unwrap().data;
|
||||
assert_eq!(api_response.el_offline, Some(false));
|
||||
|
@ -62,8 +62,8 @@ struct ApiTester {
|
||||
harness: Arc<BeaconChainHarness<EphemeralHarnessType<E>>>,
|
||||
chain: Arc<BeaconChain<EphemeralHarnessType<E>>>,
|
||||
client: BeaconNodeHttpClient,
|
||||
next_block: SignedBeaconBlock<E>,
|
||||
reorg_block: SignedBeaconBlock<E>,
|
||||
next_block: SignedBlockContents<E>,
|
||||
reorg_block: SignedBlockContents<E>,
|
||||
attestations: Vec<Attestation<E>>,
|
||||
contribution_and_proofs: Vec<SignedContributionAndProof<E>>,
|
||||
attester_slashing: AttesterSlashing<E>,
|
||||
@ -171,11 +171,13 @@ impl ApiTester {
|
||||
let (next_block, _next_state) = harness
|
||||
.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap())
|
||||
.await;
|
||||
let next_block = SignedBlockContents::from(next_block);
|
||||
|
||||
// `make_block` adds random graffiti, so this will produce an alternate block
|
||||
let (reorg_block, _reorg_state) = harness
|
||||
.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap() + 1)
|
||||
.await;
|
||||
let reorg_block = SignedBlockContents::from(reorg_block);
|
||||
|
||||
let head_state_root = head.beacon_state_root();
|
||||
let attestations = harness
|
||||
@ -310,11 +312,13 @@ impl ApiTester {
|
||||
let (next_block, _next_state) = harness
|
||||
.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap())
|
||||
.await;
|
||||
let next_block = SignedBlockContents::from(next_block);
|
||||
|
||||
// `make_block` adds random graffiti, so this will produce an alternate block
|
||||
let (reorg_block, _reorg_state) = harness
|
||||
.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap())
|
||||
.await;
|
||||
let reorg_block = SignedBlockContents::from(reorg_block);
|
||||
|
||||
let head_state_root = head.beacon_state_root();
|
||||
let attestations = harness
|
||||
@ -1252,9 +1256,9 @@ impl ApiTester {
|
||||
}
|
||||
|
||||
pub async fn test_post_beacon_blocks_valid(mut self) -> Self {
|
||||
let next_block = &self.next_block;
|
||||
let next_block = self.next_block.clone();
|
||||
|
||||
self.client.post_beacon_blocks(next_block).await.unwrap();
|
||||
self.client.post_beacon_blocks(&next_block).await.unwrap();
|
||||
|
||||
assert!(
|
||||
self.network_rx.network_recv.recv().await.is_some(),
|
||||
@ -1293,7 +1297,11 @@ impl ApiTester {
|
||||
.await
|
||||
.0;
|
||||
|
||||
assert!(self.client.post_beacon_blocks(&block).await.is_err());
|
||||
assert!(self
|
||||
.client
|
||||
.post_beacon_blocks(&SignedBlockContents::from(block))
|
||||
.await
|
||||
.is_err());
|
||||
|
||||
assert!(
|
||||
self.network_rx.network_recv.recv().await.is_some(),
|
||||
@ -1316,7 +1324,11 @@ impl ApiTester {
|
||||
.await
|
||||
.0;
|
||||
|
||||
assert!(self.client.post_beacon_blocks_ssz(&block).await.is_err());
|
||||
assert!(self
|
||||
.client
|
||||
.post_beacon_blocks_ssz(&SignedBlockContents::from(block))
|
||||
.await
|
||||
.is_err());
|
||||
|
||||
assert!(
|
||||
self.network_rx.network_recv.recv().await.is_some(),
|
||||
@ -1327,48 +1339,56 @@ impl ApiTester {
|
||||
}
|
||||
|
||||
pub async fn test_post_beacon_blocks_duplicate(self) -> Self {
|
||||
let block = self
|
||||
let block_contents = self
|
||||
.harness
|
||||
.make_block(
|
||||
self.harness.get_current_state(),
|
||||
self.harness.get_current_slot(),
|
||||
)
|
||||
.await
|
||||
.0;
|
||||
.0
|
||||
.into();
|
||||
|
||||
assert!(self.client.post_beacon_blocks(&block).await.is_ok());
|
||||
assert!(self
|
||||
.client
|
||||
.post_beacon_blocks(&block_contents)
|
||||
.await
|
||||
.is_ok());
|
||||
|
||||
let blinded_block = block.clone_as_blinded();
|
||||
let blinded_block_contents = block_contents.clone_as_blinded();
|
||||
|
||||
// Test all the POST methods in sequence, they should all behave the same.
|
||||
let responses = vec![
|
||||
self.client.post_beacon_blocks(&block).await.unwrap_err(),
|
||||
self.client
|
||||
.post_beacon_blocks_v2(&block, None)
|
||||
.post_beacon_blocks(&block_contents)
|
||||
.await
|
||||
.unwrap_err(),
|
||||
self.client
|
||||
.post_beacon_blocks_ssz(&block)
|
||||
.post_beacon_blocks_v2(&block_contents, None)
|
||||
.await
|
||||
.unwrap_err(),
|
||||
self.client
|
||||
.post_beacon_blocks_v2_ssz(&block, None)
|
||||
.post_beacon_blocks_ssz(&block_contents)
|
||||
.await
|
||||
.unwrap_err(),
|
||||
self.client
|
||||
.post_beacon_blinded_blocks(&blinded_block)
|
||||
.post_beacon_blocks_v2_ssz(&block_contents, None)
|
||||
.await
|
||||
.unwrap_err(),
|
||||
self.client
|
||||
.post_beacon_blinded_blocks_v2(&blinded_block, None)
|
||||
.post_beacon_blinded_blocks(&blinded_block_contents)
|
||||
.await
|
||||
.unwrap_err(),
|
||||
self.client
|
||||
.post_beacon_blinded_blocks_ssz(&blinded_block)
|
||||
.post_beacon_blinded_blocks_v2(&blinded_block_contents, None)
|
||||
.await
|
||||
.unwrap_err(),
|
||||
self.client
|
||||
.post_beacon_blinded_blocks_v2_ssz(&blinded_block, None)
|
||||
.post_beacon_blinded_blocks_ssz(&blinded_block_contents)
|
||||
.await
|
||||
.unwrap_err(),
|
||||
self.client
|
||||
.post_beacon_blinded_blocks_v2_ssz(&blinded_block_contents, None)
|
||||
.await
|
||||
.unwrap_err(),
|
||||
];
|
||||
@ -1794,9 +1814,9 @@ impl ApiTester {
|
||||
pub async fn test_get_config_spec(self) -> Self {
|
||||
let result = self
|
||||
.client
|
||||
.get_config_spec::<ConfigAndPresetCapella>()
|
||||
.get_config_spec::<ConfigAndPresetDeneb>()
|
||||
.await
|
||||
.map(|res| ConfigAndPreset::Capella(res.data))
|
||||
.map(|res| ConfigAndPreset::Deneb(res.data))
|
||||
.unwrap();
|
||||
let expected = ConfigAndPreset::from_chain_spec::<E>(&self.chain.spec, None);
|
||||
|
||||
@ -2495,11 +2515,18 @@ impl ApiTester {
|
||||
.get_validator_blocks::<E, FullPayload<E>>(slot, &randao_reveal, None)
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
.data
|
||||
.deconstruct()
|
||||
.0;
|
||||
|
||||
let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
|
||||
let signed_block_contents =
|
||||
SignedBlockContents::try_from(signed_block.clone()).unwrap();
|
||||
|
||||
self.client.post_beacon_blocks(&signed_block).await.unwrap();
|
||||
self.client
|
||||
.post_beacon_blocks(&signed_block_contents)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(self.chain.head_beacon_block().as_ref(), &signed_block);
|
||||
|
||||
@ -2554,18 +2581,22 @@ impl ApiTester {
|
||||
.unwrap()
|
||||
.expect("block bytes");
|
||||
|
||||
let block =
|
||||
BeaconBlock::<E, FullPayload<E>>::from_ssz_bytes(&block_bytes, &self.chain.spec)
|
||||
.expect("block bytes can be decoded");
|
||||
let block_contents =
|
||||
BlockContents::<E, FullPayload<E>>::from_ssz_bytes(&block_bytes, &self.chain.spec)
|
||||
.expect("block contents bytes can be decoded");
|
||||
|
||||
let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
|
||||
let signed_block_contents =
|
||||
block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
|
||||
|
||||
self.client
|
||||
.post_beacon_blocks_ssz(&signed_block)
|
||||
.post_beacon_blocks_ssz(&signed_block_contents)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(self.chain.head_beacon_block().as_ref(), &signed_block);
|
||||
assert_eq!(
|
||||
self.chain.head_beacon_block().as_ref(),
|
||||
signed_block_contents.signed_block()
|
||||
);
|
||||
|
||||
self.chain.slot_clock.set_slot(slot.as_u64() + 1);
|
||||
}
|
||||
@ -2587,7 +2618,9 @@ impl ApiTester {
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
.data
|
||||
.deconstruct()
|
||||
.0;
|
||||
assert_eq!(block.slot(), slot);
|
||||
self.chain.slot_clock.set_slot(slot.as_u64() + 1);
|
||||
}
|
||||
@ -2701,14 +2734,16 @@ impl ApiTester {
|
||||
.unwrap()
|
||||
.data;
|
||||
|
||||
let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
|
||||
let signed_block_contents =
|
||||
block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
|
||||
|
||||
self.client
|
||||
.post_beacon_blinded_blocks(&signed_block)
|
||||
.post_beacon_blinded_blocks(&signed_block_contents)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// This converts the generic `Payload` to a concrete type for comparison.
|
||||
let signed_block = signed_block_contents.deconstruct().0;
|
||||
let head_block = SignedBeaconBlock::from(signed_block.clone());
|
||||
assert_eq!(head_block, signed_block);
|
||||
|
||||
@ -2754,24 +2789,29 @@ impl ApiTester {
|
||||
sk.sign(message).into()
|
||||
};
|
||||
|
||||
let block_bytes = self
|
||||
let block_contents_bytes = self
|
||||
.client
|
||||
.get_validator_blinded_blocks_ssz::<E, Payload>(slot, &randao_reveal, None)
|
||||
.await
|
||||
.unwrap()
|
||||
.expect("block bytes");
|
||||
|
||||
let block = BeaconBlock::<E, Payload>::from_ssz_bytes(&block_bytes, &self.chain.spec)
|
||||
.expect("block bytes can be decoded");
|
||||
let block_contents = BlockContents::<E, Payload>::from_ssz_bytes(
|
||||
&block_contents_bytes,
|
||||
&self.chain.spec,
|
||||
)
|
||||
.expect("block contents bytes can be decoded");
|
||||
|
||||
let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
|
||||
let signed_block_contents =
|
||||
block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
|
||||
|
||||
self.client
|
||||
.post_beacon_blinded_blocks_ssz(&signed_block)
|
||||
.post_beacon_blinded_blocks_ssz(&signed_block_contents)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// This converts the generic `Payload` to a concrete type for comparison.
|
||||
let signed_block = signed_block_contents.deconstruct().0;
|
||||
let head_block = SignedBeaconBlock::from(signed_block.clone());
|
||||
assert_eq!(head_block, signed_block);
|
||||
|
||||
@ -2785,7 +2825,7 @@ impl ApiTester {
|
||||
for _ in 0..E::slots_per_epoch() {
|
||||
let slot = self.chain.slot().unwrap();
|
||||
|
||||
let block = self
|
||||
let block_contents = self
|
||||
.client
|
||||
.get_validator_blinded_blocks_modular::<E, Payload>(
|
||||
slot,
|
||||
@ -2796,7 +2836,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
assert_eq!(block.slot(), slot);
|
||||
assert_eq!(block_contents.block().slot(), slot);
|
||||
self.chain.slot_clock.set_slot(slot.as_u64() + 1);
|
||||
}
|
||||
|
||||
@ -3332,6 +3372,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -3372,6 +3413,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -3414,6 +3456,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -3462,6 +3505,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -3509,6 +3553,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -3555,6 +3600,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -3600,6 +3646,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -3632,6 +3679,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -3669,6 +3717,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -3712,6 +3761,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -3741,6 +3791,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -3790,6 +3841,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -3829,6 +3881,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -3872,6 +3925,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -3912,6 +3966,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -3948,6 +4003,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -3984,6 +4040,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -4020,6 +4077,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -4046,19 +4104,6 @@ impl ApiTester {
|
||||
)));
|
||||
|
||||
let slot = self.chain.slot().unwrap();
|
||||
let propose_state = self
|
||||
.harness
|
||||
.chain
|
||||
.state_at_slot(slot, StateSkipConfig::WithoutStateRoots)
|
||||
.unwrap();
|
||||
let withdrawals = get_expected_withdrawals(&propose_state, &self.chain.spec).unwrap();
|
||||
let withdrawals_root = withdrawals.tree_hash_root();
|
||||
// Set withdrawals root for builder
|
||||
self.mock_builder
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.add_operation(Operation::WithdrawalsRoot(withdrawals_root));
|
||||
|
||||
let epoch = self.chain.epoch().unwrap();
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
@ -4068,6 +4113,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -4084,6 +4130,42 @@ impl ApiTester {
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_builder_works_post_deneb(self) -> Self {
|
||||
// Ensure builder payload is chosen
|
||||
self.mock_builder
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.add_operation(Operation::Value(Uint256::from(
|
||||
DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1,
|
||||
)));
|
||||
|
||||
let slot = self.chain.slot().unwrap();
|
||||
let epoch = self.chain.epoch().unwrap();
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let block_contents = self
|
||||
.client
|
||||
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
let (block, maybe_sidecars) = block_contents.deconstruct();
|
||||
|
||||
// Response should contain blob sidecars
|
||||
assert!(maybe_sidecars.is_some());
|
||||
|
||||
// The builder's payload should've been chosen, so this cache should not be populated
|
||||
let payload: BlindedPayload<E> = block.body().execution_payload().unwrap().into();
|
||||
assert!(self
|
||||
.chain
|
||||
.execution_layer
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.get_payload_by_root(&payload.tree_hash_root())
|
||||
.is_none());
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_lighthouse_rejects_invalid_withdrawals_root(self) -> Self {
|
||||
// Ensure builder payload *would be* chosen
|
||||
self.mock_builder
|
||||
@ -4108,6 +4190,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.block()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
@ -4363,12 +4446,12 @@ impl ApiTester {
|
||||
|
||||
// Submit the next block, which is on an epoch boundary, so this will produce a finalized
|
||||
// checkpoint event, head event, and block event
|
||||
let block_root = self.next_block.canonical_root();
|
||||
let block_root = self.next_block.signed_block().canonical_root();
|
||||
|
||||
// current_duty_dependent_root = block root because this is the first slot of the epoch
|
||||
let current_duty_dependent_root = self.chain.head_beacon_block_root();
|
||||
let current_slot = self.chain.slot().unwrap();
|
||||
let next_slot = self.next_block.slot();
|
||||
let next_slot = self.next_block.signed_block().slot();
|
||||
let finalization_distance = E::slots_per_epoch() * 2;
|
||||
|
||||
let expected_block = EventKind::Block(SseBlock {
|
||||
@ -4380,7 +4463,7 @@ impl ApiTester {
|
||||
let expected_head = EventKind::Head(SseHead {
|
||||
block: block_root,
|
||||
slot: next_slot,
|
||||
state: self.next_block.state_root(),
|
||||
state: self.next_block.signed_block().state_root(),
|
||||
current_duty_dependent_root,
|
||||
previous_duty_dependent_root: self
|
||||
.chain
|
||||
@ -4429,13 +4512,17 @@ impl ApiTester {
|
||||
.unwrap();
|
||||
|
||||
let expected_reorg = EventKind::ChainReorg(SseChainReorg {
|
||||
slot: self.reorg_block.slot(),
|
||||
slot: self.reorg_block.signed_block().slot(),
|
||||
depth: 1,
|
||||
old_head_block: self.next_block.canonical_root(),
|
||||
old_head_state: self.next_block.state_root(),
|
||||
new_head_block: self.reorg_block.canonical_root(),
|
||||
new_head_state: self.reorg_block.state_root(),
|
||||
epoch: self.next_block.slot().epoch(E::slots_per_epoch()),
|
||||
old_head_block: self.next_block.signed_block().canonical_root(),
|
||||
old_head_state: self.next_block.signed_block().state_root(),
|
||||
new_head_block: self.reorg_block.signed_block().canonical_root(),
|
||||
new_head_state: self.reorg_block.signed_block().state_root(),
|
||||
epoch: self
|
||||
.next_block
|
||||
.signed_block()
|
||||
.slot()
|
||||
.epoch(E::slots_per_epoch()),
|
||||
execution_optimistic: false,
|
||||
});
|
||||
|
||||
@ -4565,8 +4652,8 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let block_root = self.next_block.canonical_root();
|
||||
let next_slot = self.next_block.slot();
|
||||
let block_root = self.next_block.signed_block().canonical_root();
|
||||
let next_slot = self.next_block.signed_block().slot();
|
||||
|
||||
let expected_block = EventKind::Block(SseBlock {
|
||||
block: block_root,
|
||||
@ -4577,7 +4664,7 @@ impl ApiTester {
|
||||
let expected_head = EventKind::Head(SseHead {
|
||||
block: block_root,
|
||||
slot: next_slot,
|
||||
state: self.next_block.state_root(),
|
||||
state: self.next_block.signed_block().state_root(),
|
||||
current_duty_dependent_root: self.chain.genesis_block_root,
|
||||
previous_duty_dependent_root: self.chain.genesis_block_root,
|
||||
epoch_transition: false,
|
||||
@ -5280,6 +5367,26 @@ async fn builder_works_post_capella() {
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn builder_works_post_deneb() {
|
||||
let mut config = ApiTesterConfig {
|
||||
builder_threshold: Some(0),
|
||||
retain_historic_states: false,
|
||||
spec: E::default_spec(),
|
||||
};
|
||||
config.spec.altair_fork_epoch = Some(Epoch::new(0));
|
||||
config.spec.bellatrix_fork_epoch = Some(Epoch::new(0));
|
||||
config.spec.capella_fork_epoch = Some(Epoch::new(0));
|
||||
config.spec.deneb_fork_epoch = Some(Epoch::new(0));
|
||||
|
||||
ApiTester::new_from_config(config)
|
||||
.await
|
||||
.test_post_validator_register_validator()
|
||||
.await
|
||||
.test_builder_works_post_deneb()
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn post_validator_liveness_epoch() {
|
||||
ApiTester::new()
|
||||
|
@ -10,7 +10,6 @@ unsigned-varint = { version = "0.6", features = ["codec"] }
|
||||
ssz_types = { workspace = true }
|
||||
types = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_derive = "1"
|
||||
ethereum_ssz = { workspace = true }
|
||||
ethereum_ssz_derive = { workspace = true }
|
||||
tree_hash = { workspace = true }
|
||||
|
@ -8,7 +8,7 @@ use directory::{
|
||||
use discv5::{Discv5Config, Discv5ConfigBuilder};
|
||||
use libp2p::gossipsub;
|
||||
use libp2p::Multiaddr;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
use std::num::NonZeroU16;
|
||||
@ -468,7 +468,7 @@ pub fn gossipsub_config(
|
||||
) -> Vec<u8> {
|
||||
let topic_bytes = message.topic.as_str().as_bytes();
|
||||
match fork_context.current_fork() {
|
||||
ForkName::Altair | ForkName::Merge | ForkName::Capella => {
|
||||
ForkName::Altair | ForkName::Merge | ForkName::Capella | ForkName::Deneb => {
|
||||
let topic_len_bytes = topic_bytes.len().to_le_bytes();
|
||||
let mut vec = Vec::with_capacity(
|
||||
prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(),
|
||||
|
@ -517,6 +517,11 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
RPCError::ErrorResponse(code, _) => match code {
|
||||
RPCResponseErrorCode::Unknown => PeerAction::HighToleranceError,
|
||||
RPCResponseErrorCode::ResourceUnavailable => {
|
||||
// Don't ban on this because we want to retry with a block by root request.
|
||||
if matches!(protocol, Protocol::BlobsByRoot) {
|
||||
return;
|
||||
}
|
||||
|
||||
// NOTE: This error only makes sense for the `BlocksByRange` and `BlocksByRoot`
|
||||
// protocols.
|
||||
//
|
||||
@ -545,11 +550,14 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
Protocol::Ping => PeerAction::MidToleranceError,
|
||||
Protocol::BlocksByRange => PeerAction::MidToleranceError,
|
||||
Protocol::BlocksByRoot => PeerAction::MidToleranceError,
|
||||
Protocol::BlobsByRange => PeerAction::MidToleranceError,
|
||||
Protocol::LightClientBootstrap => PeerAction::LowToleranceError,
|
||||
Protocol::BlobsByRoot => PeerAction::MidToleranceError,
|
||||
Protocol::Goodbye => PeerAction::LowToleranceError,
|
||||
Protocol::MetaData => PeerAction::LowToleranceError,
|
||||
Protocol::Status => PeerAction::LowToleranceError,
|
||||
},
|
||||
RPCResponseErrorCode::BlobsNotFoundForBlock => PeerAction::LowToleranceError,
|
||||
},
|
||||
RPCError::SSZDecodeError(_) => PeerAction::Fatal,
|
||||
RPCError::UnsupportedProtocol => {
|
||||
@ -561,6 +569,8 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
Protocol::Ping => PeerAction::Fatal,
|
||||
Protocol::BlocksByRange => return,
|
||||
Protocol::BlocksByRoot => return,
|
||||
Protocol::BlobsByRange => return,
|
||||
Protocol::BlobsByRoot => return,
|
||||
Protocol::Goodbye => return,
|
||||
Protocol::LightClientBootstrap => return,
|
||||
Protocol::MetaData => PeerAction::Fatal,
|
||||
@ -577,6 +587,8 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
Protocol::Ping => PeerAction::LowToleranceError,
|
||||
Protocol::BlocksByRange => PeerAction::MidToleranceError,
|
||||
Protocol::BlocksByRoot => PeerAction::MidToleranceError,
|
||||
Protocol::BlobsByRange => PeerAction::MidToleranceError,
|
||||
Protocol::BlobsByRoot => PeerAction::MidToleranceError,
|
||||
Protocol::LightClientBootstrap => return,
|
||||
Protocol::Goodbye => return,
|
||||
Protocol::MetaData => return,
|
||||
|
@ -194,16 +194,19 @@ mod tests {
|
||||
let altair_fork_epoch = Epoch::new(1);
|
||||
let merge_fork_epoch = Epoch::new(2);
|
||||
let capella_fork_epoch = Epoch::new(3);
|
||||
let deneb_fork_epoch = Epoch::new(4);
|
||||
|
||||
chain_spec.altair_fork_epoch = Some(altair_fork_epoch);
|
||||
chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch);
|
||||
chain_spec.capella_fork_epoch = Some(capella_fork_epoch);
|
||||
chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch);
|
||||
|
||||
let current_slot = match fork_name {
|
||||
ForkName::Base => Slot::new(0),
|
||||
ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()),
|
||||
ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()),
|
||||
ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()),
|
||||
ForkName::Deneb => deneb_fork_epoch.start_slot(Spec::slots_per_epoch()),
|
||||
};
|
||||
ForkContext::new::<Spec>(current_slot, Hash256::zero(), &chain_spec)
|
||||
}
|
||||
|
@ -15,10 +15,11 @@ use std::io::{Read, Write};
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
use tokio_util::codec::{Decoder, Encoder};
|
||||
use types::light_client_bootstrap::LightClientBootstrap;
|
||||
use types::{light_client_bootstrap::LightClientBootstrap, BlobSidecar};
|
||||
use types::{
|
||||
EthSpec, ForkContext, ForkName, Hash256, SignedBeaconBlock, SignedBeaconBlockAltair,
|
||||
SignedBeaconBlockBase, SignedBeaconBlockCapella, SignedBeaconBlockMerge,
|
||||
SignedBeaconBlockBase, SignedBeaconBlockCapella, SignedBeaconBlockDeneb,
|
||||
SignedBeaconBlockMerge,
|
||||
};
|
||||
use unsigned_varint::codec::Uvi;
|
||||
|
||||
@ -71,6 +72,8 @@ impl<TSpec: EthSpec> Encoder<RPCCodedResponse<TSpec>> for SSZSnappyInboundCodec<
|
||||
RPCResponse::Status(res) => res.as_ssz_bytes(),
|
||||
RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(),
|
||||
RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(),
|
||||
RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(),
|
||||
RPCResponse::BlobsByRoot(res) => res.as_ssz_bytes(),
|
||||
RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(),
|
||||
RPCResponse::Pong(res) => res.data.as_ssz_bytes(),
|
||||
RPCResponse::MetaData(res) =>
|
||||
@ -222,6 +225,8 @@ impl<TSpec: EthSpec> Encoder<OutboundRequest<TSpec>> for SSZSnappyOutboundCodec<
|
||||
BlocksByRootRequest::V1(req) => req.block_roots.as_ssz_bytes(),
|
||||
BlocksByRootRequest::V2(req) => req.block_roots.as_ssz_bytes(),
|
||||
},
|
||||
OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(),
|
||||
OutboundRequest::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(),
|
||||
OutboundRequest::Ping(req) => req.as_ssz_bytes(),
|
||||
OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode
|
||||
};
|
||||
@ -284,8 +289,8 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyOutboundCodec<TSpec> {
|
||||
.rpc_response_limits::<TSpec>(&self.fork_context);
|
||||
if ssz_limits.is_out_of_bounds(length, self.max_packet_size) {
|
||||
return Err(RPCError::InvalidData(format!(
|
||||
"RPC response length is out of bounds, length {}",
|
||||
length
|
||||
"RPC response length is out of bounds, length {}, max {}, min {}",
|
||||
length, ssz_limits.max, ssz_limits.min
|
||||
)));
|
||||
}
|
||||
// Calculate worst case compression length for given uncompressed length
|
||||
@ -396,22 +401,24 @@ fn context_bytes<T: EthSpec>(
|
||||
return match **ref_box_block {
|
||||
// NOTE: If you are adding another fork type here, be sure to modify the
|
||||
// `fork_context.to_context_bytes()` function to support it as well!
|
||||
SignedBeaconBlock::Deneb { .. } => {
|
||||
fork_context.to_context_bytes(ForkName::Deneb)
|
||||
}
|
||||
SignedBeaconBlock::Capella { .. } => {
|
||||
// Capella context being `None` implies that "merge never happened".
|
||||
fork_context.to_context_bytes(ForkName::Capella)
|
||||
}
|
||||
SignedBeaconBlock::Merge { .. } => {
|
||||
// Merge context being `None` implies that "merge never happened".
|
||||
fork_context.to_context_bytes(ForkName::Merge)
|
||||
}
|
||||
SignedBeaconBlock::Altair { .. } => {
|
||||
// Altair context being `None` implies that "altair never happened".
|
||||
// This code should be unreachable if altair is disabled since only Version::V1 would be valid in that case.
|
||||
fork_context.to_context_bytes(ForkName::Altair)
|
||||
}
|
||||
SignedBeaconBlock::Base { .. } => Some(fork_context.genesis_context_bytes()),
|
||||
};
|
||||
}
|
||||
if let RPCResponse::BlobsByRange(_) | RPCResponse::BlobsByRoot(_) = rpc_variant {
|
||||
return fork_context.to_context_bytes(ForkName::Deneb);
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
@ -472,6 +479,14 @@ fn handle_rpc_request<T: EthSpec>(
|
||||
block_roots: VariableList::from_ssz_bytes(decoded_buffer)?,
|
||||
}),
|
||||
))),
|
||||
SupportedProtocol::BlobsByRangeV1 => Ok(Some(InboundRequest::BlobsByRange(
|
||||
BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?,
|
||||
))),
|
||||
SupportedProtocol::BlobsByRootV1 => {
|
||||
Ok(Some(InboundRequest::BlobsByRoot(BlobsByRootRequest {
|
||||
blob_ids: VariableList::from_ssz_bytes(decoded_buffer)?,
|
||||
})))
|
||||
}
|
||||
SupportedProtocol::PingV1 => Ok(Some(InboundRequest::Ping(Ping {
|
||||
data: u64::from_ssz_bytes(decoded_buffer)?,
|
||||
}))),
|
||||
@ -526,6 +541,38 @@ fn handle_rpc_response<T: EthSpec>(
|
||||
SupportedProtocol::BlocksByRootV1 => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
|
||||
SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?),
|
||||
)))),
|
||||
SupportedProtocol::BlobsByRangeV1 => match fork_name {
|
||||
Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlobsByRange(Arc::new(
|
||||
BlobSidecar::from_ssz_bytes(decoded_buffer)?,
|
||||
)))),
|
||||
Some(_) => Err(RPCError::ErrorResponse(
|
||||
RPCResponseErrorCode::InvalidRequest,
|
||||
"Invalid fork name for blobs by range".to_string(),
|
||||
)),
|
||||
None => Err(RPCError::ErrorResponse(
|
||||
RPCResponseErrorCode::InvalidRequest,
|
||||
format!(
|
||||
"No context bytes provided for {:?} response",
|
||||
versioned_protocol
|
||||
),
|
||||
)),
|
||||
},
|
||||
SupportedProtocol::BlobsByRootV1 => match fork_name {
|
||||
Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlobsByRoot(Arc::new(
|
||||
BlobSidecar::from_ssz_bytes(decoded_buffer)?,
|
||||
)))),
|
||||
Some(_) => Err(RPCError::ErrorResponse(
|
||||
RPCResponseErrorCode::InvalidRequest,
|
||||
"Invalid fork name for blobs by root".to_string(),
|
||||
)),
|
||||
None => Err(RPCError::ErrorResponse(
|
||||
RPCResponseErrorCode::InvalidRequest,
|
||||
format!(
|
||||
"No context bytes provided for {:?} response",
|
||||
versioned_protocol
|
||||
),
|
||||
)),
|
||||
},
|
||||
SupportedProtocol::PingV1 => Ok(Some(RPCResponse::Pong(Ping {
|
||||
data: u64::from_ssz_bytes(decoded_buffer)?,
|
||||
}))),
|
||||
@ -555,6 +602,9 @@ fn handle_rpc_response<T: EthSpec>(
|
||||
decoded_buffer,
|
||||
)?),
|
||||
)))),
|
||||
Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlocksByRange(Arc::new(
|
||||
SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb::from_ssz_bytes(decoded_buffer)?),
|
||||
)))),
|
||||
None => Err(RPCError::ErrorResponse(
|
||||
RPCResponseErrorCode::InvalidRequest,
|
||||
format!(
|
||||
@ -578,6 +628,9 @@ fn handle_rpc_response<T: EthSpec>(
|
||||
decoded_buffer,
|
||||
)?),
|
||||
)))),
|
||||
Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
|
||||
SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb::from_ssz_bytes(decoded_buffer)?),
|
||||
)))),
|
||||
None => Err(RPCError::ErrorResponse(
|
||||
RPCResponseErrorCode::InvalidRequest,
|
||||
format!(
|
||||
@ -598,9 +651,13 @@ fn context_bytes_to_fork_name(
|
||||
.from_context_bytes(context_bytes)
|
||||
.cloned()
|
||||
.ok_or_else(|| {
|
||||
let encoded = hex::encode(context_bytes);
|
||||
RPCError::ErrorResponse(
|
||||
RPCResponseErrorCode::InvalidRequest,
|
||||
"Context bytes does not correspond to a valid fork".to_string(),
|
||||
format!(
|
||||
"Context bytes {} do not correspond to a valid fork",
|
||||
encoded
|
||||
),
|
||||
)
|
||||
})
|
||||
}
|
||||
@ -615,8 +672,9 @@ mod tests {
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use types::{
|
||||
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, ChainSpec, EmptyBlock,
|
||||
Epoch, ForkContext, FullPayload, Hash256, Signature, SignedBeaconBlock, Slot,
|
||||
blob_sidecar::BlobIdentifier, BeaconBlock, BeaconBlockAltair, BeaconBlockBase,
|
||||
BeaconBlockMerge, ChainSpec, EmptyBlock, Epoch, ForkContext, FullPayload, Hash256,
|
||||
Signature, SignedBeaconBlock, Slot,
|
||||
};
|
||||
|
||||
use snap::write::FrameEncoder;
|
||||
@ -630,16 +688,19 @@ mod tests {
|
||||
let altair_fork_epoch = Epoch::new(1);
|
||||
let merge_fork_epoch = Epoch::new(2);
|
||||
let capella_fork_epoch = Epoch::new(3);
|
||||
let deneb_fork_epoch = Epoch::new(4);
|
||||
|
||||
chain_spec.altair_fork_epoch = Some(altair_fork_epoch);
|
||||
chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch);
|
||||
chain_spec.capella_fork_epoch = Some(capella_fork_epoch);
|
||||
chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch);
|
||||
|
||||
let current_slot = match fork_name {
|
||||
ForkName::Base => Slot::new(0),
|
||||
ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()),
|
||||
ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()),
|
||||
ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()),
|
||||
ForkName::Deneb => deneb_fork_epoch.start_slot(Spec::slots_per_epoch()),
|
||||
};
|
||||
ForkContext::new::<Spec>(current_slot, Hash256::zero(), &chain_spec)
|
||||
}
|
||||
@ -657,6 +718,10 @@ mod tests {
|
||||
SignedBeaconBlock::from_block(full_block, Signature::empty())
|
||||
}
|
||||
|
||||
fn empty_blob_sidecar() -> Arc<BlobSidecar<Spec>> {
|
||||
Arc::new(BlobSidecar::empty())
|
||||
}
|
||||
|
||||
/// Merge block with length < max_rpc_size.
|
||||
fn merge_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> SignedBeaconBlock<Spec> {
|
||||
let mut block: BeaconBlockMerge<_, FullPayload<Spec>> =
|
||||
@ -705,6 +770,13 @@ mod tests {
|
||||
OldBlocksByRangeRequest::new(0, 10, 1)
|
||||
}
|
||||
|
||||
fn blbrange_request() -> BlobsByRangeRequest {
|
||||
BlobsByRangeRequest {
|
||||
start_slot: 0,
|
||||
count: 10,
|
||||
}
|
||||
}
|
||||
|
||||
fn bbroot_request_v1() -> BlocksByRootRequest {
|
||||
BlocksByRootRequest::new_v1(vec![Hash256::zero()].into())
|
||||
}
|
||||
@ -713,6 +785,15 @@ mod tests {
|
||||
BlocksByRootRequest::new(vec![Hash256::zero()].into())
|
||||
}
|
||||
|
||||
fn blbroot_request() -> BlobsByRootRequest {
|
||||
BlobsByRootRequest {
|
||||
blob_ids: VariableList::from(vec![BlobIdentifier {
|
||||
block_root: Hash256::zero(),
|
||||
index: 0,
|
||||
}]),
|
||||
}
|
||||
}
|
||||
|
||||
fn ping_message() -> Ping {
|
||||
Ping { data: 1 }
|
||||
}
|
||||
@ -846,6 +927,12 @@ mod tests {
|
||||
OutboundRequest::BlocksByRoot(bbroot) => {
|
||||
assert_eq!(decoded, InboundRequest::BlocksByRoot(bbroot))
|
||||
}
|
||||
OutboundRequest::BlobsByRange(blbrange) => {
|
||||
assert_eq!(decoded, InboundRequest::BlobsByRange(blbrange))
|
||||
}
|
||||
OutboundRequest::BlobsByRoot(bbroot) => {
|
||||
assert_eq!(decoded, InboundRequest::BlobsByRoot(bbroot))
|
||||
}
|
||||
OutboundRequest::Ping(ping) => {
|
||||
assert_eq!(decoded, InboundRequest::Ping(ping))
|
||||
}
|
||||
@ -952,6 +1039,26 @@ mod tests {
|
||||
),
|
||||
Ok(Some(RPCResponse::MetaData(metadata()))),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
SupportedProtocol::BlobsByRangeV1,
|
||||
RPCCodedResponse::Success(RPCResponse::BlobsByRange(empty_blob_sidecar())),
|
||||
ForkName::Deneb,
|
||||
&chain_spec
|
||||
),
|
||||
Ok(Some(RPCResponse::BlobsByRange(empty_blob_sidecar()))),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
SupportedProtocol::BlobsByRootV1,
|
||||
RPCCodedResponse::Success(RPCResponse::BlobsByRoot(empty_blob_sidecar())),
|
||||
ForkName::Deneb,
|
||||
&chain_spec
|
||||
),
|
||||
Ok(Some(RPCResponse::BlobsByRoot(empty_blob_sidecar()))),
|
||||
);
|
||||
}
|
||||
|
||||
// Test RPCResponse encoding/decoding for V1 messages
|
||||
@ -1297,6 +1404,8 @@ mod tests {
|
||||
OutboundRequest::BlocksByRoot(bbroot_request_v1()),
|
||||
OutboundRequest::BlocksByRoot(bbroot_request_v2()),
|
||||
OutboundRequest::MetaData(MetadataRequest::new_v1()),
|
||||
OutboundRequest::BlobsByRange(blbrange_request()),
|
||||
OutboundRequest::BlobsByRoot(blbroot_request()),
|
||||
OutboundRequest::MetaData(MetadataRequest::new_v2()),
|
||||
];
|
||||
|
||||
|
@ -6,7 +6,7 @@ use std::{
|
||||
|
||||
use super::{methods, rate_limiter::Quota, Protocol};
|
||||
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Auxiliary struct to aid on configuration parsing.
|
||||
///
|
||||
@ -89,6 +89,8 @@ pub struct RateLimiterConfig {
|
||||
pub(super) goodbye_quota: Quota,
|
||||
pub(super) blocks_by_range_quota: Quota,
|
||||
pub(super) blocks_by_root_quota: Quota,
|
||||
pub(super) blobs_by_range_quota: Quota,
|
||||
pub(super) blobs_by_root_quota: Quota,
|
||||
pub(super) light_client_bootstrap_quota: Quota,
|
||||
}
|
||||
|
||||
@ -100,6 +102,9 @@ impl RateLimiterConfig {
|
||||
pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota =
|
||||
Quota::n_every(methods::MAX_REQUEST_BLOCKS, 10);
|
||||
pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10);
|
||||
pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota =
|
||||
Quota::n_every(methods::MAX_REQUEST_BLOB_SIDECARS, 10);
|
||||
pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10);
|
||||
pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10);
|
||||
}
|
||||
|
||||
@ -112,6 +117,8 @@ impl Default for RateLimiterConfig {
|
||||
goodbye_quota: Self::DEFAULT_GOODBYE_QUOTA,
|
||||
blocks_by_range_quota: Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA,
|
||||
blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA,
|
||||
blobs_by_range_quota: Self::DEFAULT_BLOBS_BY_RANGE_QUOTA,
|
||||
blobs_by_root_quota: Self::DEFAULT_BLOBS_BY_ROOT_QUOTA,
|
||||
light_client_bootstrap_quota: Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA,
|
||||
}
|
||||
}
|
||||
@ -136,6 +143,8 @@ impl Debug for RateLimiterConfig {
|
||||
.field("goodbye", fmt_q!(&self.goodbye_quota))
|
||||
.field("blocks_by_range", fmt_q!(&self.blocks_by_range_quota))
|
||||
.field("blocks_by_root", fmt_q!(&self.blocks_by_root_quota))
|
||||
.field("blobs_by_range", fmt_q!(&self.blobs_by_range_quota))
|
||||
.field("blobs_by_root", fmt_q!(&self.blobs_by_root_quota))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@ -154,6 +163,8 @@ impl FromStr for RateLimiterConfig {
|
||||
let mut goodbye_quota = None;
|
||||
let mut blocks_by_range_quota = None;
|
||||
let mut blocks_by_root_quota = None;
|
||||
let mut blobs_by_range_quota = None;
|
||||
let mut blobs_by_root_quota = None;
|
||||
let mut light_client_bootstrap_quota = None;
|
||||
|
||||
for proto_def in s.split(';') {
|
||||
@ -164,6 +175,8 @@ impl FromStr for RateLimiterConfig {
|
||||
Protocol::Goodbye => goodbye_quota = goodbye_quota.or(quota),
|
||||
Protocol::BlocksByRange => blocks_by_range_quota = blocks_by_range_quota.or(quota),
|
||||
Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota),
|
||||
Protocol::BlobsByRange => blobs_by_range_quota = blobs_by_range_quota.or(quota),
|
||||
Protocol::BlobsByRoot => blobs_by_root_quota = blobs_by_root_quota.or(quota),
|
||||
Protocol::Ping => ping_quota = ping_quota.or(quota),
|
||||
Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota),
|
||||
Protocol::LightClientBootstrap => {
|
||||
@ -180,6 +193,9 @@ impl FromStr for RateLimiterConfig {
|
||||
.unwrap_or(Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA),
|
||||
blocks_by_root_quota: blocks_by_root_quota
|
||||
.unwrap_or(Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA),
|
||||
blobs_by_range_quota: blobs_by_range_quota
|
||||
.unwrap_or(Self::DEFAULT_BLOBS_BY_RANGE_QUOTA),
|
||||
blobs_by_root_quota: blobs_by_root_quota.unwrap_or(Self::DEFAULT_BLOBS_BY_ROOT_QUOTA),
|
||||
light_client_bootstrap_quota: light_client_bootstrap_quota
|
||||
.unwrap_or(Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA),
|
||||
})
|
||||
|
@ -1,7 +1,7 @@
|
||||
#![allow(clippy::type_complexity)]
|
||||
#![allow(clippy::cognitive_complexity)]
|
||||
|
||||
use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode, ResponseTermination};
|
||||
use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode};
|
||||
use super::outbound::OutboundRequestContainer;
|
||||
use super::protocol::{InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol};
|
||||
use super::{RPCReceived, RPCSend, ReqId};
|
||||
@ -42,6 +42,12 @@ const MAX_INBOUND_SUBSTREAMS: usize = 32;
|
||||
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
|
||||
pub struct SubstreamId(usize);
|
||||
|
||||
impl SubstreamId {
|
||||
pub fn new(id: usize) -> Self {
|
||||
Self(id)
|
||||
}
|
||||
}
|
||||
|
||||
type InboundSubstream<TSpec> = InboundFramed<Stream, TSpec>;
|
||||
|
||||
/// Events the handler emits to the behaviour.
|
||||
@ -593,6 +599,9 @@ where
|
||||
if matches!(info.protocol, Protocol::BlocksByRange) {
|
||||
debug!(self.log, "BlocksByRange Response sent"; "duration" => Instant::now().duration_since(info.request_start_time).as_secs());
|
||||
}
|
||||
if matches!(info.protocol, Protocol::BlobsByRange) {
|
||||
debug!(self.log, "BlobsByRange Response sent"; "duration" => Instant::now().duration_since(info.request_start_time).as_secs());
|
||||
}
|
||||
|
||||
// There is nothing more to process on this substream as it has
|
||||
// been closed. Move on to the next one.
|
||||
@ -616,6 +625,9 @@ where
|
||||
if matches!(info.protocol, Protocol::BlocksByRange) {
|
||||
debug!(self.log, "BlocksByRange Response failed"; "duration" => info.request_start_time.elapsed().as_secs());
|
||||
}
|
||||
if matches!(info.protocol, Protocol::BlobsByRange) {
|
||||
debug!(self.log, "BlobsByRange Response failed"; "duration" => info.request_start_time.elapsed().as_secs());
|
||||
}
|
||||
break;
|
||||
}
|
||||
// The sending future has not completed. Leave the state as busy and
|
||||
@ -777,13 +789,8 @@ where
|
||||
// continue sending responses beyond what we would expect. Here
|
||||
// we simply terminate the stream and report a stream
|
||||
// termination to the application
|
||||
let termination = match protocol {
|
||||
Protocol::BlocksByRange => Some(ResponseTermination::BlocksByRange),
|
||||
Protocol::BlocksByRoot => Some(ResponseTermination::BlocksByRoot),
|
||||
_ => None, // all other protocols are do not have multiple responses and we do not inform the user, we simply drop the stream.
|
||||
};
|
||||
|
||||
if let Some(termination) = termination {
|
||||
if let Some(termination) = protocol.terminator() {
|
||||
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Ok(
|
||||
RPCReceived::EndOfStream(request_id, termination),
|
||||
)));
|
||||
|
@ -6,7 +6,7 @@ use serde::Serialize;
|
||||
use ssz::Encode;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use ssz_types::{
|
||||
typenum::{U1024, U256},
|
||||
typenum::{U1024, U128, U256, U768},
|
||||
VariableList,
|
||||
};
|
||||
use std::marker::PhantomData;
|
||||
@ -14,8 +14,11 @@ use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
use strum::IntoStaticStr;
|
||||
use superstruct::superstruct;
|
||||
use types::blob_sidecar::BlobIdentifier;
|
||||
use types::consts::deneb::MAX_BLOBS_PER_BLOCK;
|
||||
use types::{
|
||||
light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot,
|
||||
blob_sidecar::BlobSidecar, light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec,
|
||||
Hash256, SignedBeaconBlock, Slot,
|
||||
};
|
||||
|
||||
/// Maximum number of blocks in a single request.
|
||||
@ -26,6 +29,12 @@ pub const MAX_REQUEST_BLOCKS: u64 = 1024;
|
||||
pub type MaxErrorLen = U256;
|
||||
pub const MAX_ERROR_LEN: u64 = 256;
|
||||
|
||||
pub type MaxRequestBlocksDeneb = U128;
|
||||
pub const MAX_REQUEST_BLOCKS_DENEB: u64 = 128;
|
||||
|
||||
pub type MaxRequestBlobSidecars = U768;
|
||||
pub const MAX_REQUEST_BLOB_SIDECARS: u64 = MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK;
|
||||
|
||||
/// Wrapper over SSZ List to represent error message in rpc responses.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ErrorType(pub VariableList<u8, MaxErrorLen>);
|
||||
@ -278,6 +287,22 @@ impl BlocksByRangeRequest {
|
||||
}
|
||||
}
|
||||
|
||||
/// Request a number of beacon blobs from a peer.
|
||||
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
||||
pub struct BlobsByRangeRequest {
|
||||
/// The starting slot to request blobs.
|
||||
pub start_slot: u64,
|
||||
|
||||
/// The number of slots from the start slot.
|
||||
pub count: u64,
|
||||
}
|
||||
|
||||
impl BlobsByRangeRequest {
|
||||
pub fn max_blobs_requested<E: EthSpec>(&self) -> u64 {
|
||||
self.count.saturating_mul(E::max_blobs_per_block() as u64)
|
||||
}
|
||||
}
|
||||
|
||||
/// Request a number of beacon block roots from a peer.
|
||||
#[superstruct(
|
||||
variants(V1, V2),
|
||||
@ -319,7 +344,10 @@ impl OldBlocksByRangeRequest {
|
||||
}
|
||||
|
||||
/// Request a number of beacon block bodies from a peer.
|
||||
#[superstruct(variants(V1, V2), variant_attributes(derive(Clone, Debug, PartialEq)))]
|
||||
#[superstruct(
|
||||
variants(V1, V2),
|
||||
variant_attributes(derive(Encode, Decode, Clone, Debug, PartialEq))
|
||||
)]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct BlocksByRootRequest {
|
||||
/// The list of beacon block bodies being requested.
|
||||
@ -336,6 +364,13 @@ impl BlocksByRootRequest {
|
||||
}
|
||||
}
|
||||
|
||||
/// Request a number of beacon blocks and blobs from a peer.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct BlobsByRootRequest {
|
||||
/// The list of beacon block roots being requested.
|
||||
pub blob_ids: VariableList<BlobIdentifier, MaxRequestBlobSidecars>,
|
||||
}
|
||||
|
||||
/* RPC Handling and Grouping */
|
||||
// Collection of enums and structs used by the Codecs to encode/decode RPC messages
|
||||
|
||||
@ -351,9 +386,15 @@ pub enum RPCResponse<T: EthSpec> {
|
||||
/// A response to a get BLOCKS_BY_ROOT request.
|
||||
BlocksByRoot(Arc<SignedBeaconBlock<T>>),
|
||||
|
||||
/// A response to a get BLOBS_BY_RANGE request
|
||||
BlobsByRange(Arc<BlobSidecar<T>>),
|
||||
|
||||
/// A response to a get LIGHTCLIENT_BOOTSTRAP request.
|
||||
LightClientBootstrap(LightClientBootstrap<T>),
|
||||
|
||||
/// A response to a get BLOBS_BY_ROOT request.
|
||||
BlobsByRoot(Arc<BlobSidecar<T>>),
|
||||
|
||||
/// A PONG response to a PING request.
|
||||
Pong(Ping),
|
||||
|
||||
@ -369,6 +410,12 @@ pub enum ResponseTermination {
|
||||
|
||||
/// Blocks by root stream termination.
|
||||
BlocksByRoot,
|
||||
|
||||
/// Blobs by range stream termination.
|
||||
BlobsByRange,
|
||||
|
||||
/// Blobs by root stream termination.
|
||||
BlobsByRoot,
|
||||
}
|
||||
|
||||
/// The structured response containing a result/code indicating success or failure
|
||||
@ -395,6 +442,7 @@ pub struct LightClientBootstrapRequest {
|
||||
#[strum(serialize_all = "snake_case")]
|
||||
pub enum RPCResponseErrorCode {
|
||||
RateLimited,
|
||||
BlobsNotFoundForBlock,
|
||||
InvalidRequest,
|
||||
ServerError,
|
||||
/// Error spec'd to indicate that a peer does not have blocks on a requested range.
|
||||
@ -424,6 +472,7 @@ impl<T: EthSpec> RPCCodedResponse<T> {
|
||||
2 => RPCResponseErrorCode::ServerError,
|
||||
3 => RPCResponseErrorCode::ResourceUnavailable,
|
||||
139 => RPCResponseErrorCode::RateLimited,
|
||||
140 => RPCResponseErrorCode::BlobsNotFoundForBlock,
|
||||
_ => RPCResponseErrorCode::Unknown,
|
||||
};
|
||||
RPCCodedResponse::Error(code, err)
|
||||
@ -436,6 +485,8 @@ impl<T: EthSpec> RPCCodedResponse<T> {
|
||||
RPCResponse::Status(_) => false,
|
||||
RPCResponse::BlocksByRange(_) => true,
|
||||
RPCResponse::BlocksByRoot(_) => true,
|
||||
RPCResponse::BlobsByRange(_) => true,
|
||||
RPCResponse::BlobsByRoot(_) => true,
|
||||
RPCResponse::Pong(_) => false,
|
||||
RPCResponse::MetaData(_) => false,
|
||||
RPCResponse::LightClientBootstrap(_) => false,
|
||||
@ -460,6 +511,7 @@ impl RPCResponseErrorCode {
|
||||
RPCResponseErrorCode::ResourceUnavailable => 3,
|
||||
RPCResponseErrorCode::Unknown => 255,
|
||||
RPCResponseErrorCode::RateLimited => 139,
|
||||
RPCResponseErrorCode::BlobsNotFoundForBlock => 140,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -471,6 +523,8 @@ impl<T: EthSpec> RPCResponse<T> {
|
||||
RPCResponse::Status(_) => Protocol::Status,
|
||||
RPCResponse::BlocksByRange(_) => Protocol::BlocksByRange,
|
||||
RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot,
|
||||
RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange,
|
||||
RPCResponse::BlobsByRoot(_) => Protocol::BlobsByRoot,
|
||||
RPCResponse::Pong(_) => Protocol::Ping,
|
||||
RPCResponse::MetaData(_) => Protocol::MetaData,
|
||||
RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
|
||||
@ -486,6 +540,7 @@ impl std::fmt::Display for RPCResponseErrorCode {
|
||||
RPCResponseErrorCode::ServerError => "Server error occurred",
|
||||
RPCResponseErrorCode::Unknown => "Unknown error occurred",
|
||||
RPCResponseErrorCode::RateLimited => "Rate limited",
|
||||
RPCResponseErrorCode::BlobsNotFoundForBlock => "No blobs for the given root",
|
||||
};
|
||||
f.write_str(repr)
|
||||
}
|
||||
@ -507,6 +562,12 @@ impl<T: EthSpec> std::fmt::Display for RPCResponse<T> {
|
||||
RPCResponse::BlocksByRoot(block) => {
|
||||
write!(f, "BlocksByRoot: Block slot: {}", block.slot())
|
||||
}
|
||||
RPCResponse::BlobsByRange(blob) => {
|
||||
write!(f, "BlobsByRange: Blob slot: {}", blob.slot)
|
||||
}
|
||||
RPCResponse::BlobsByRoot(sidecar) => {
|
||||
write!(f, "BlobsByRoot: Blob slot: {}", sidecar.slot)
|
||||
}
|
||||
RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data),
|
||||
RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()),
|
||||
RPCResponse::LightClientBootstrap(bootstrap) => {
|
||||
@ -565,6 +626,26 @@ impl std::fmt::Display for OldBlocksByRangeRequest {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BlobsByRootRequest {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Request: BlobsByRoot: Number of Requested Roots: {}",
|
||||
self.blob_ids.len()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BlobsByRangeRequest {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Request: BlobsByRange: Start Slot: {}, Count: {}",
|
||||
self.start_slot, self.count
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl slog::KV for StatusMessage {
|
||||
fn serialize(
|
||||
&self,
|
||||
|
@ -324,8 +324,10 @@ where
|
||||
Err(RateLimitedErr::TooLarge) => {
|
||||
// we set the batch sizes, so this is a coding/config err for most protocols
|
||||
let protocol = req.versioned_protocol().protocol();
|
||||
if matches!(protocol, Protocol::BlocksByRange) {
|
||||
debug!(self.log, "Blocks by range request will never be processed"; "request" => %req);
|
||||
if matches!(protocol, Protocol::BlocksByRange)
|
||||
|| matches!(protocol, Protocol::BlobsByRange)
|
||||
{
|
||||
debug!(self.log, "By range request will never be processed"; "request" => %req, "protocol" => %protocol);
|
||||
} else {
|
||||
crit!(self.log, "Request size too large to ever be processed"; "protocol" => %protocol);
|
||||
}
|
||||
@ -416,6 +418,8 @@ where
|
||||
match end {
|
||||
ResponseTermination::BlocksByRange => Protocol::BlocksByRange,
|
||||
ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot,
|
||||
ResponseTermination::BlobsByRange => Protocol::BlobsByRange,
|
||||
ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot,
|
||||
},
|
||||
),
|
||||
},
|
||||
|
@ -35,6 +35,8 @@ pub enum OutboundRequest<TSpec: EthSpec> {
|
||||
Goodbye(GoodbyeReason),
|
||||
BlocksByRange(OldBlocksByRangeRequest),
|
||||
BlocksByRoot(BlocksByRootRequest),
|
||||
BlobsByRange(BlobsByRangeRequest),
|
||||
BlobsByRoot(BlobsByRootRequest),
|
||||
Ping(Ping),
|
||||
MetaData(MetadataRequest<TSpec>),
|
||||
}
|
||||
@ -70,6 +72,14 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
||||
ProtocolId::new(SupportedProtocol::BlocksByRootV2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(SupportedProtocol::BlocksByRootV1, Encoding::SSZSnappy),
|
||||
],
|
||||
OutboundRequest::BlobsByRange(_) => vec![ProtocolId::new(
|
||||
SupportedProtocol::BlobsByRangeV1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
OutboundRequest::BlobsByRoot(_) => vec![ProtocolId::new(
|
||||
SupportedProtocol::BlobsByRootV1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
OutboundRequest::Ping(_) => vec![ProtocolId::new(
|
||||
SupportedProtocol::PingV1,
|
||||
Encoding::SSZSnappy,
|
||||
@ -89,6 +99,8 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
||||
OutboundRequest::Goodbye(_) => 0,
|
||||
OutboundRequest::BlocksByRange(req) => *req.count(),
|
||||
OutboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64,
|
||||
OutboundRequest::BlobsByRange(req) => req.max_blobs_requested::<TSpec>(),
|
||||
OutboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64,
|
||||
OutboundRequest::Ping(_) => 1,
|
||||
OutboundRequest::MetaData(_) => 1,
|
||||
}
|
||||
@ -107,6 +119,8 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
||||
BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1,
|
||||
BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2,
|
||||
},
|
||||
OutboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1,
|
||||
OutboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1,
|
||||
OutboundRequest::Ping(_) => SupportedProtocol::PingV1,
|
||||
OutboundRequest::MetaData(req) => match req {
|
||||
MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1,
|
||||
@ -123,6 +137,8 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
||||
// variants that have `multiple_responses()` can have values.
|
||||
OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
|
||||
OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
|
||||
OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange,
|
||||
OutboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot,
|
||||
OutboundRequest::Status(_) => unreachable!(),
|
||||
OutboundRequest::Goodbye(_) => unreachable!(),
|
||||
OutboundRequest::Ping(_) => unreachable!(),
|
||||
@ -178,6 +194,8 @@ impl<TSpec: EthSpec> std::fmt::Display for OutboundRequest<TSpec> {
|
||||
OutboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason),
|
||||
OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req),
|
||||
OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req),
|
||||
OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req),
|
||||
OutboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req),
|
||||
OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
|
||||
OutboundRequest::MetaData(_) => write!(f, "MetaData request"),
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ use tokio_util::{
|
||||
};
|
||||
use types::{
|
||||
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge,
|
||||
EmptyBlock, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature,
|
||||
BlobSidecar, EmptyBlock, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature,
|
||||
SignedBeaconBlock,
|
||||
};
|
||||
|
||||
@ -83,6 +83,12 @@ lazy_static! {
|
||||
+ types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_capella_size() // adding max size of execution payload (~16gb)
|
||||
+ ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field
|
||||
|
||||
pub static ref SIGNED_BEACON_BLOCK_DENEB_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD
|
||||
+ types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_deneb_size() // adding max size of execution payload (~16gb)
|
||||
+ ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload`
|
||||
+ (<types::KzgCommitment as Encode>::ssz_fixed_len() * <MainnetEthSpec>::max_blobs_per_block())
|
||||
+ ssz::BYTES_PER_LENGTH_OFFSET; // Length offset for the blob commitments field.
|
||||
|
||||
pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize =
|
||||
VariableList::<Hash256, MaxRequestBlocks>::from(Vec::<Hash256>::new())
|
||||
.as_ssz_bytes()
|
||||
@ -95,6 +101,20 @@ lazy_static! {
|
||||
])
|
||||
.as_ssz_bytes()
|
||||
.len();
|
||||
|
||||
pub static ref BLOBS_BY_ROOT_REQUEST_MIN: usize =
|
||||
VariableList::<Hash256, MaxRequestBlobSidecars>::from(Vec::<Hash256>::new())
|
||||
.as_ssz_bytes()
|
||||
.len();
|
||||
pub static ref BLOBS_BY_ROOT_REQUEST_MAX: usize =
|
||||
VariableList::<Hash256, MaxRequestBlobSidecars>::from(vec![
|
||||
Hash256::zero();
|
||||
MAX_REQUEST_BLOB_SIDECARS
|
||||
as usize
|
||||
])
|
||||
.as_ssz_bytes()
|
||||
.len();
|
||||
|
||||
pub static ref ERROR_TYPE_MIN: usize =
|
||||
VariableList::<u8, MaxErrorLen>::from(Vec::<u8>::new())
|
||||
.as_ssz_bytes()
|
||||
@ -121,6 +141,7 @@ pub fn max_rpc_size(fork_context: &ForkContext, max_chunk_size: usize) -> usize
|
||||
ForkName::Altair | ForkName::Base => max_chunk_size / 10,
|
||||
ForkName::Merge => max_chunk_size,
|
||||
ForkName::Capella => max_chunk_size,
|
||||
ForkName::Deneb => max_chunk_size,
|
||||
}
|
||||
}
|
||||
|
||||
@ -145,6 +166,10 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits {
|
||||
*SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks
|
||||
*SIGNED_BEACON_BLOCK_CAPELLA_MAX, // Capella block is larger than base, altair and merge blocks
|
||||
),
|
||||
ForkName::Deneb => RpcLimits::new(
|
||||
*SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks
|
||||
*SIGNED_BEACON_BLOCK_DENEB_MAX, // EIP 4844 block is larger than all prior fork blocks
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
@ -162,6 +187,12 @@ pub enum Protocol {
|
||||
/// The `BlocksByRoot` protocol name.
|
||||
#[strum(serialize = "beacon_blocks_by_root")]
|
||||
BlocksByRoot,
|
||||
/// The `BlobsByRange` protocol name.
|
||||
#[strum(serialize = "blob_sidecars_by_range")]
|
||||
BlobsByRange,
|
||||
/// The `BlobsByRoot` protocol name.
|
||||
#[strum(serialize = "blob_sidecars_by_root")]
|
||||
BlobsByRoot,
|
||||
/// The `Ping` protocol name.
|
||||
Ping,
|
||||
/// The `MetaData` protocol name.
|
||||
@ -172,6 +203,22 @@ pub enum Protocol {
|
||||
LightClientBootstrap,
|
||||
}
|
||||
|
||||
impl Protocol {
|
||||
pub(crate) fn terminator(self) -> Option<ResponseTermination> {
|
||||
match self {
|
||||
Protocol::Status => None,
|
||||
Protocol::Goodbye => None,
|
||||
Protocol::BlocksByRange => Some(ResponseTermination::BlocksByRange),
|
||||
Protocol::BlocksByRoot => Some(ResponseTermination::BlocksByRoot),
|
||||
Protocol::BlobsByRange => Some(ResponseTermination::BlobsByRange),
|
||||
Protocol::BlobsByRoot => Some(ResponseTermination::BlobsByRoot),
|
||||
Protocol::Ping => None,
|
||||
Protocol::MetaData => None,
|
||||
Protocol::LightClientBootstrap => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// RPC Encondings supported.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Encoding {
|
||||
@ -187,6 +234,8 @@ pub enum SupportedProtocol {
|
||||
BlocksByRangeV2,
|
||||
BlocksByRootV1,
|
||||
BlocksByRootV2,
|
||||
BlobsByRangeV1,
|
||||
BlobsByRootV1,
|
||||
PingV1,
|
||||
MetaDataV1,
|
||||
MetaDataV2,
|
||||
@ -202,6 +251,8 @@ impl SupportedProtocol {
|
||||
SupportedProtocol::BlocksByRangeV2 => "2",
|
||||
SupportedProtocol::BlocksByRootV1 => "1",
|
||||
SupportedProtocol::BlocksByRootV2 => "2",
|
||||
SupportedProtocol::BlobsByRangeV1 => "1",
|
||||
SupportedProtocol::BlobsByRootV1 => "1",
|
||||
SupportedProtocol::PingV1 => "1",
|
||||
SupportedProtocol::MetaDataV1 => "1",
|
||||
SupportedProtocol::MetaDataV2 => "2",
|
||||
@ -217,6 +268,8 @@ impl SupportedProtocol {
|
||||
SupportedProtocol::BlocksByRangeV2 => Protocol::BlocksByRange,
|
||||
SupportedProtocol::BlocksByRootV1 => Protocol::BlocksByRoot,
|
||||
SupportedProtocol::BlocksByRootV2 => Protocol::BlocksByRoot,
|
||||
SupportedProtocol::BlobsByRangeV1 => Protocol::BlobsByRange,
|
||||
SupportedProtocol::BlobsByRootV1 => Protocol::BlobsByRoot,
|
||||
SupportedProtocol::PingV1 => Protocol::Ping,
|
||||
SupportedProtocol::MetaDataV1 => Protocol::MetaData,
|
||||
SupportedProtocol::MetaDataV2 => Protocol::MetaData,
|
||||
@ -224,8 +277,8 @@ impl SupportedProtocol {
|
||||
}
|
||||
}
|
||||
|
||||
fn currently_supported() -> Vec<ProtocolId> {
|
||||
vec![
|
||||
fn currently_supported(fork_context: &ForkContext) -> Vec<ProtocolId> {
|
||||
let mut supported = vec![
|
||||
ProtocolId::new(Self::StatusV1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Self::GoodbyeV1, Encoding::SSZSnappy),
|
||||
// V2 variants have higher preference then V1
|
||||
@ -236,7 +289,14 @@ impl SupportedProtocol {
|
||||
ProtocolId::new(Self::PingV1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Self::MetaDataV2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Self::MetaDataV1, Encoding::SSZSnappy),
|
||||
]
|
||||
];
|
||||
if fork_context.fork_exists(ForkName::Deneb) {
|
||||
supported.extend_from_slice(&[
|
||||
ProtocolId::new(SupportedProtocol::BlobsByRootV1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(SupportedProtocol::BlobsByRangeV1, Encoding::SSZSnappy),
|
||||
]);
|
||||
}
|
||||
supported
|
||||
}
|
||||
}
|
||||
|
||||
@ -264,7 +324,7 @@ impl<TSpec: EthSpec> UpgradeInfo for RPCProtocol<TSpec> {
|
||||
|
||||
/// The list of supported RPC protocols for Lighthouse.
|
||||
fn protocol_info(&self) -> Self::InfoIter {
|
||||
let mut supported_protocols = SupportedProtocol::currently_supported();
|
||||
let mut supported_protocols = SupportedProtocol::currently_supported(&self.fork_context);
|
||||
if self.enable_light_client_server {
|
||||
supported_protocols.push(ProtocolId::new(
|
||||
SupportedProtocol::LightClientBootstrapV1,
|
||||
@ -333,6 +393,13 @@ impl ProtocolId {
|
||||
Protocol::BlocksByRoot => {
|
||||
RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX)
|
||||
}
|
||||
Protocol::BlobsByRange => RpcLimits::new(
|
||||
<BlobsByRangeRequest as Encode>::ssz_fixed_len(),
|
||||
<BlobsByRangeRequest as Encode>::ssz_fixed_len(),
|
||||
),
|
||||
Protocol::BlobsByRoot => {
|
||||
RpcLimits::new(*BLOBS_BY_ROOT_REQUEST_MIN, *BLOBS_BY_ROOT_REQUEST_MAX)
|
||||
}
|
||||
Protocol::Ping => RpcLimits::new(
|
||||
<Ping as Encode>::ssz_fixed_len(),
|
||||
<Ping as Encode>::ssz_fixed_len(),
|
||||
@ -355,6 +422,8 @@ impl ProtocolId {
|
||||
Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response
|
||||
Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()),
|
||||
Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()),
|
||||
Protocol::BlobsByRange => rpc_blob_limits::<T>(),
|
||||
Protocol::BlobsByRoot => rpc_blob_limits::<T>(),
|
||||
Protocol::Ping => RpcLimits::new(
|
||||
<Ping as Encode>::ssz_fixed_len(),
|
||||
<Ping as Encode>::ssz_fixed_len(),
|
||||
@ -376,6 +445,8 @@ impl ProtocolId {
|
||||
match self.versioned_protocol {
|
||||
SupportedProtocol::BlocksByRangeV2
|
||||
| SupportedProtocol::BlocksByRootV2
|
||||
| SupportedProtocol::BlobsByRangeV1
|
||||
| SupportedProtocol::BlobsByRootV1
|
||||
| SupportedProtocol::LightClientBootstrapV1 => true,
|
||||
SupportedProtocol::StatusV1
|
||||
| SupportedProtocol::BlocksByRootV1
|
||||
@ -407,6 +478,13 @@ impl ProtocolId {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn rpc_blob_limits<T: EthSpec>() -> RpcLimits {
|
||||
RpcLimits::new(
|
||||
BlobSidecar::<T>::empty().as_ssz_bytes().len(),
|
||||
BlobSidecar::<T>::max_size(),
|
||||
)
|
||||
}
|
||||
|
||||
/* Inbound upgrade */
|
||||
|
||||
// The inbound protocol reads the request, decodes it and returns the stream to the protocol
|
||||
@ -478,6 +556,8 @@ pub enum InboundRequest<TSpec: EthSpec> {
|
||||
Goodbye(GoodbyeReason),
|
||||
BlocksByRange(OldBlocksByRangeRequest),
|
||||
BlocksByRoot(BlocksByRootRequest),
|
||||
BlobsByRange(BlobsByRangeRequest),
|
||||
BlobsByRoot(BlobsByRootRequest),
|
||||
LightClientBootstrap(LightClientBootstrapRequest),
|
||||
Ping(Ping),
|
||||
MetaData(MetadataRequest<TSpec>),
|
||||
@ -494,6 +574,8 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
|
||||
InboundRequest::Goodbye(_) => 0,
|
||||
InboundRequest::BlocksByRange(req) => *req.count(),
|
||||
InboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64,
|
||||
InboundRequest::BlobsByRange(req) => req.max_blobs_requested::<TSpec>(),
|
||||
InboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64,
|
||||
InboundRequest::Ping(_) => 1,
|
||||
InboundRequest::MetaData(_) => 1,
|
||||
InboundRequest::LightClientBootstrap(_) => 1,
|
||||
@ -513,6 +595,8 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
|
||||
BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1,
|
||||
BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2,
|
||||
},
|
||||
InboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1,
|
||||
InboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1,
|
||||
InboundRequest::Ping(_) => SupportedProtocol::PingV1,
|
||||
InboundRequest::MetaData(req) => match req {
|
||||
MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1,
|
||||
@ -530,6 +614,8 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
|
||||
// variants that have `multiple_responses()` can have values.
|
||||
InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
|
||||
InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
|
||||
InboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange,
|
||||
InboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot,
|
||||
InboundRequest::Status(_) => unreachable!(),
|
||||
InboundRequest::Goodbye(_) => unreachable!(),
|
||||
InboundRequest::Ping(_) => unreachable!(),
|
||||
@ -636,6 +722,8 @@ impl<TSpec: EthSpec> std::fmt::Display for InboundRequest<TSpec> {
|
||||
InboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason),
|
||||
InboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req),
|
||||
InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req),
|
||||
InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req),
|
||||
InboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req),
|
||||
InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
|
||||
InboundRequest::MetaData(_) => write!(f, "MetaData request"),
|
||||
InboundRequest::LightClientBootstrap(bootstrap) => {
|
||||
|
@ -2,7 +2,7 @@ use super::config::RateLimiterConfig;
|
||||
use crate::rpc::Protocol;
|
||||
use fnv::FnvHashMap;
|
||||
use libp2p::PeerId;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::convert::TryInto;
|
||||
use std::future::Future;
|
||||
use std::hash::Hash;
|
||||
@ -94,6 +94,10 @@ pub struct RPCRateLimiter {
|
||||
bbrange_rl: Limiter<PeerId>,
|
||||
/// BlocksByRoot rate limiter.
|
||||
bbroots_rl: Limiter<PeerId>,
|
||||
/// BlobsByRange rate limiter.
|
||||
blbrange_rl: Limiter<PeerId>,
|
||||
/// BlobsByRoot rate limiter.
|
||||
blbroot_rl: Limiter<PeerId>,
|
||||
/// LightClientBootstrap rate limiter.
|
||||
lcbootstrap_rl: Limiter<PeerId>,
|
||||
}
|
||||
@ -122,6 +126,10 @@ pub struct RPCRateLimiterBuilder {
|
||||
bbrange_quota: Option<Quota>,
|
||||
/// Quota for the BlocksByRoot protocol.
|
||||
bbroots_quota: Option<Quota>,
|
||||
/// Quota for the BlobsByRange protocol.
|
||||
blbrange_quota: Option<Quota>,
|
||||
/// Quota for the BlobsByRoot protocol.
|
||||
blbroot_quota: Option<Quota>,
|
||||
/// Quota for the LightClientBootstrap protocol.
|
||||
lcbootstrap_quota: Option<Quota>,
|
||||
}
|
||||
@ -137,6 +145,8 @@ impl RPCRateLimiterBuilder {
|
||||
Protocol::Goodbye => self.goodbye_quota = q,
|
||||
Protocol::BlocksByRange => self.bbrange_quota = q,
|
||||
Protocol::BlocksByRoot => self.bbroots_quota = q,
|
||||
Protocol::BlobsByRange => self.blbrange_quota = q,
|
||||
Protocol::BlobsByRoot => self.blbroot_quota = q,
|
||||
Protocol::LightClientBootstrap => self.lcbootstrap_quota = q,
|
||||
}
|
||||
self
|
||||
@ -158,6 +168,14 @@ impl RPCRateLimiterBuilder {
|
||||
.lcbootstrap_quota
|
||||
.ok_or("LightClientBootstrap quota not specified")?;
|
||||
|
||||
let blbrange_quota = self
|
||||
.blbrange_quota
|
||||
.ok_or("BlobsByRange quota not specified")?;
|
||||
|
||||
let blbroots_quota = self
|
||||
.blbroot_quota
|
||||
.ok_or("BlobsByRoot quota not specified")?;
|
||||
|
||||
// create the rate limiters
|
||||
let ping_rl = Limiter::from_quota(ping_quota)?;
|
||||
let metadata_rl = Limiter::from_quota(metadata_quota)?;
|
||||
@ -165,6 +183,8 @@ impl RPCRateLimiterBuilder {
|
||||
let goodbye_rl = Limiter::from_quota(goodbye_quota)?;
|
||||
let bbroots_rl = Limiter::from_quota(bbroots_quota)?;
|
||||
let bbrange_rl = Limiter::from_quota(bbrange_quota)?;
|
||||
let blbrange_rl = Limiter::from_quota(blbrange_quota)?;
|
||||
let blbroot_rl = Limiter::from_quota(blbroots_quota)?;
|
||||
let lcbootstrap_rl = Limiter::from_quota(lcbootstrap_quote)?;
|
||||
|
||||
// check for peers to prune every 30 seconds, starting in 30 seconds
|
||||
@ -179,6 +199,8 @@ impl RPCRateLimiterBuilder {
|
||||
goodbye_rl,
|
||||
bbroots_rl,
|
||||
bbrange_rl,
|
||||
blbrange_rl,
|
||||
blbroot_rl,
|
||||
lcbootstrap_rl,
|
||||
init_time: Instant::now(),
|
||||
})
|
||||
@ -219,6 +241,8 @@ impl RPCRateLimiter {
|
||||
goodbye_quota,
|
||||
blocks_by_range_quota,
|
||||
blocks_by_root_quota,
|
||||
blobs_by_range_quota,
|
||||
blobs_by_root_quota,
|
||||
light_client_bootstrap_quota,
|
||||
} = config;
|
||||
|
||||
@ -229,6 +253,8 @@ impl RPCRateLimiter {
|
||||
.set_quota(Protocol::Goodbye, goodbye_quota)
|
||||
.set_quota(Protocol::BlocksByRange, blocks_by_range_quota)
|
||||
.set_quota(Protocol::BlocksByRoot, blocks_by_root_quota)
|
||||
.set_quota(Protocol::BlobsByRange, blobs_by_range_quota)
|
||||
.set_quota(Protocol::BlobsByRoot, blobs_by_root_quota)
|
||||
.set_quota(Protocol::LightClientBootstrap, light_client_bootstrap_quota)
|
||||
.build()
|
||||
}
|
||||
@ -255,6 +281,8 @@ impl RPCRateLimiter {
|
||||
Protocol::Goodbye => &mut self.goodbye_rl,
|
||||
Protocol::BlocksByRange => &mut self.bbrange_rl,
|
||||
Protocol::BlocksByRoot => &mut self.bbroots_rl,
|
||||
Protocol::BlobsByRange => &mut self.blbrange_rl,
|
||||
Protocol::BlobsByRoot => &mut self.blbroot_rl,
|
||||
Protocol::LightClientBootstrap => &mut self.lcbootstrap_rl,
|
||||
};
|
||||
check(limiter)
|
||||
@ -268,6 +296,8 @@ impl RPCRateLimiter {
|
||||
self.goodbye_rl.prune(time_since_start);
|
||||
self.bbrange_rl.prune(time_since_start);
|
||||
self.bbroots_rl.prune(time_since_start);
|
||||
self.blbrange_rl.prune(time_since_start);
|
||||
self.blbroot_rl.prune(time_since_start);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,8 +2,9 @@ use std::sync::Arc;
|
||||
|
||||
use libp2p::swarm::ConnectionId;
|
||||
use types::light_client_bootstrap::LightClientBootstrap;
|
||||
use types::{EthSpec, SignedBeaconBlock};
|
||||
use types::{BlobSidecar, EthSpec, SignedBeaconBlock};
|
||||
|
||||
use crate::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest};
|
||||
use crate::rpc::{
|
||||
methods::{
|
||||
BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest,
|
||||
@ -34,10 +35,14 @@ pub enum Request {
|
||||
Status(StatusMessage),
|
||||
/// A blocks by range request.
|
||||
BlocksByRange(BlocksByRangeRequest),
|
||||
/// A blobs by range request.
|
||||
BlobsByRange(BlobsByRangeRequest),
|
||||
/// A request blocks root request.
|
||||
BlocksByRoot(BlocksByRootRequest),
|
||||
// light client bootstrap request
|
||||
LightClientBootstrap(LightClientBootstrapRequest),
|
||||
/// A request blobs root request.
|
||||
BlobsByRoot(BlobsByRootRequest),
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> std::convert::From<Request> for OutboundRequest<TSpec> {
|
||||
@ -63,6 +68,8 @@ impl<TSpec: EthSpec> std::convert::From<Request> for OutboundRequest<TSpec> {
|
||||
Request::LightClientBootstrap(_) => {
|
||||
unreachable!("Lighthouse never makes an outbound light client request")
|
||||
}
|
||||
Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r),
|
||||
Request::BlobsByRoot(r) => OutboundRequest::BlobsByRoot(r),
|
||||
Request::Status(s) => OutboundRequest::Status(s),
|
||||
}
|
||||
}
|
||||
@ -80,8 +87,12 @@ pub enum Response<TSpec: EthSpec> {
|
||||
Status(StatusMessage),
|
||||
/// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch.
|
||||
BlocksByRange(Option<Arc<SignedBeaconBlock<TSpec>>>),
|
||||
/// A response to a get BLOBS_BY_RANGE request. A None response signals the end of the batch.
|
||||
BlobsByRange(Option<Arc<BlobSidecar<TSpec>>>),
|
||||
/// A response to a get BLOCKS_BY_ROOT request.
|
||||
BlocksByRoot(Option<Arc<SignedBeaconBlock<TSpec>>>),
|
||||
/// A response to a get BLOBS_BY_ROOT request.
|
||||
BlobsByRoot(Option<Arc<BlobSidecar<TSpec>>>),
|
||||
/// A response to a LightClientUpdate request.
|
||||
LightClientBootstrap(LightClientBootstrap<TSpec>),
|
||||
}
|
||||
@ -97,6 +108,14 @@ impl<TSpec: EthSpec> std::convert::From<Response<TSpec>> for RPCCodedResponse<TS
|
||||
Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)),
|
||||
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange),
|
||||
},
|
||||
Response::BlobsByRoot(r) => match r {
|
||||
Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRoot(b)),
|
||||
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRoot),
|
||||
},
|
||||
Response::BlobsByRange(r) => match r {
|
||||
Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRange(b)),
|
||||
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange),
|
||||
},
|
||||
Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)),
|
||||
Response::LightClientBootstrap(b) => {
|
||||
RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b))
|
||||
|
@ -20,6 +20,8 @@ pub struct GossipCache {
|
||||
topic_msgs: HashMap<GossipTopic, HashMap<Vec<u8>, Key>>,
|
||||
/// Timeout for blocks.
|
||||
beacon_block: Option<Duration>,
|
||||
/// Timeout for blobs.
|
||||
blob_sidecar: Option<Duration>,
|
||||
/// Timeout for aggregate attestations.
|
||||
aggregates: Option<Duration>,
|
||||
/// Timeout for attestations.
|
||||
@ -47,6 +49,8 @@ pub struct GossipCacheBuilder {
|
||||
default_timeout: Option<Duration>,
|
||||
/// Timeout for blocks.
|
||||
beacon_block: Option<Duration>,
|
||||
/// Timeout for blob sidecars.
|
||||
blob_sidecar: Option<Duration>,
|
||||
/// Timeout for aggregate attestations.
|
||||
aggregates: Option<Duration>,
|
||||
/// Timeout for attestations.
|
||||
@ -147,6 +151,7 @@ impl GossipCacheBuilder {
|
||||
let GossipCacheBuilder {
|
||||
default_timeout,
|
||||
beacon_block,
|
||||
blob_sidecar,
|
||||
aggregates,
|
||||
attestation,
|
||||
voluntary_exit,
|
||||
@ -162,6 +167,7 @@ impl GossipCacheBuilder {
|
||||
expirations: DelayQueue::default(),
|
||||
topic_msgs: HashMap::default(),
|
||||
beacon_block: beacon_block.or(default_timeout),
|
||||
blob_sidecar: blob_sidecar.or(default_timeout),
|
||||
aggregates: aggregates.or(default_timeout),
|
||||
attestation: attestation.or(default_timeout),
|
||||
voluntary_exit: voluntary_exit.or(default_timeout),
|
||||
@ -187,6 +193,7 @@ impl GossipCache {
|
||||
pub fn insert(&mut self, topic: GossipTopic, data: Vec<u8>) {
|
||||
let expire_timeout = match topic.kind() {
|
||||
GossipKind::BeaconBlock => self.beacon_block,
|
||||
GossipKind::BlobSidecar(_) => self.blob_sidecar,
|
||||
GossipKind::BeaconAggregateAndProof => self.aggregates,
|
||||
GossipKind::Attestation(_) => self.attestation,
|
||||
GossipKind::VoluntaryExit => self.voluntary_exit,
|
||||
|
@ -15,7 +15,8 @@ use crate::service::behaviour::BehaviourEvent;
|
||||
pub use crate::service::behaviour::Gossipsub;
|
||||
use crate::types::{
|
||||
fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic,
|
||||
SnappyTransform, Subnet, SubnetDiscovery,
|
||||
SnappyTransform, Subnet, SubnetDiscovery, ALTAIR_CORE_TOPICS, BASE_CORE_TOPICS,
|
||||
CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS,
|
||||
};
|
||||
use crate::EnrExt;
|
||||
use crate::Eth2Enr;
|
||||
@ -41,7 +42,8 @@ use std::{
|
||||
};
|
||||
use types::ForkName;
|
||||
use types::{
|
||||
consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId,
|
||||
consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, consts::deneb::BLOB_SIDECAR_SUBNET_COUNT,
|
||||
EnrForkId, EthSpec, ForkContext, Slot, SubnetId,
|
||||
};
|
||||
use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER};
|
||||
|
||||
@ -70,6 +72,8 @@ pub enum NetworkEvent<AppReqId: ReqId, TSpec: EthSpec> {
|
||||
id: AppReqId,
|
||||
/// The peer to which this request was sent.
|
||||
peer_id: PeerId,
|
||||
/// The error of the failed request.
|
||||
error: RPCError,
|
||||
},
|
||||
RequestReceived {
|
||||
/// The peer that sent the request.
|
||||
@ -220,15 +224,27 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
||||
// Set up a scoring update interval
|
||||
let update_gossipsub_scores = tokio::time::interval(params.decay_interval);
|
||||
|
||||
let max_topics = ctx.chain_spec.attestation_subnet_count as usize
|
||||
+ SYNC_COMMITTEE_SUBNET_COUNT as usize
|
||||
+ BLOB_SIDECAR_SUBNET_COUNT as usize
|
||||
+ BASE_CORE_TOPICS.len()
|
||||
+ ALTAIR_CORE_TOPICS.len()
|
||||
+ CAPELLA_CORE_TOPICS.len()
|
||||
+ DENEB_CORE_TOPICS.len()
|
||||
+ LIGHT_CLIENT_GOSSIP_TOPICS.len();
|
||||
|
||||
let possible_fork_digests = ctx.fork_context.all_fork_digests();
|
||||
let filter = gossipsub::MaxCountSubscriptionFilter {
|
||||
filter: utils::create_whitelist_filter(
|
||||
possible_fork_digests,
|
||||
ctx.chain_spec.attestation_subnet_count,
|
||||
SYNC_COMMITTEE_SUBNET_COUNT,
|
||||
BLOB_SIDECAR_SUBNET_COUNT,
|
||||
),
|
||||
max_subscribed_topics: 200,
|
||||
max_subscriptions_per_request: 150, // 148 in theory = (64 attestation + 4 sync committee + 6 core topics) * 2
|
||||
// during a fork we subscribe to both the old and new topics
|
||||
max_subscribed_topics: max_topics * 4,
|
||||
// 162 in theory = (64 attestation + 4 sync committee + 7 core topics + 6 blob topics) * 2
|
||||
max_subscriptions_per_request: max_topics * 2,
|
||||
};
|
||||
|
||||
let gossipsub_config_params = GossipsubConfigParams {
|
||||
@ -601,7 +617,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
||||
}
|
||||
|
||||
// Subscribe to core topics for the new fork
|
||||
for kind in fork_core_topics(&new_fork) {
|
||||
for kind in fork_core_topics::<TSpec>(&new_fork) {
|
||||
let topic = GossipTopic::new(kind, GossipEncoding::default(), new_fork_digest);
|
||||
self.subscribe(topic);
|
||||
}
|
||||
@ -1078,6 +1094,12 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
||||
Request::BlocksByRoot { .. } => {
|
||||
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"])
|
||||
}
|
||||
Request::BlobsByRange { .. } => {
|
||||
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"])
|
||||
}
|
||||
Request::BlobsByRoot { .. } => {
|
||||
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_root"])
|
||||
}
|
||||
}
|
||||
NetworkEvent::RequestReceived {
|
||||
peer_id,
|
||||
@ -1256,9 +1278,9 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
||||
&error,
|
||||
ConnectionDirection::Outgoing,
|
||||
);
|
||||
// inform failures of requests comming outside the behaviour
|
||||
// inform failures of requests coming outside the behaviour
|
||||
if let RequestId::Application(id) = id {
|
||||
Some(NetworkEvent::RPCFailed { peer_id, id })
|
||||
Some(NetworkEvent::RPCFailed { peer_id, id, error })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -1341,6 +1363,19 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
||||
);
|
||||
Some(event)
|
||||
}
|
||||
InboundRequest::BlobsByRange(req) => {
|
||||
let event = self.build_request(
|
||||
peer_request_id,
|
||||
peer_id,
|
||||
Request::BlobsByRange(req),
|
||||
);
|
||||
Some(event)
|
||||
}
|
||||
InboundRequest::BlobsByRoot(req) => {
|
||||
let event =
|
||||
self.build_request(peer_request_id, peer_id, Request::BlobsByRoot(req));
|
||||
Some(event)
|
||||
}
|
||||
InboundRequest::LightClientBootstrap(req) => {
|
||||
let event = self.build_request(
|
||||
peer_request_id,
|
||||
@ -1373,9 +1408,15 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
||||
RPCResponse::BlocksByRange(resp) => {
|
||||
self.build_response(id, peer_id, Response::BlocksByRange(Some(resp)))
|
||||
}
|
||||
RPCResponse::BlobsByRange(resp) => {
|
||||
self.build_response(id, peer_id, Response::BlobsByRange(Some(resp)))
|
||||
}
|
||||
RPCResponse::BlocksByRoot(resp) => {
|
||||
self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp)))
|
||||
}
|
||||
RPCResponse::BlobsByRoot(resp) => {
|
||||
self.build_response(id, peer_id, Response::BlobsByRoot(Some(resp)))
|
||||
}
|
||||
// Should never be reached
|
||||
RPCResponse::LightClientBootstrap(bootstrap) => {
|
||||
self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap))
|
||||
@ -1386,6 +1427,8 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
||||
let response = match termination {
|
||||
ResponseTermination::BlocksByRange => Response::BlocksByRange(None),
|
||||
ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None),
|
||||
ResponseTermination::BlobsByRange => Response::BlobsByRange(None),
|
||||
ResponseTermination::BlobsByRoot => Response::BlobsByRoot(None),
|
||||
};
|
||||
self.build_response(id, peer_id, response)
|
||||
}
|
||||
|
@ -233,6 +233,7 @@ pub(crate) fn create_whitelist_filter(
|
||||
possible_fork_digests: Vec<[u8; 4]>,
|
||||
attestation_subnet_count: u64,
|
||||
sync_committee_subnet_count: u64,
|
||||
blob_sidecar_subnet_count: u64,
|
||||
) -> gossipsub::WhitelistSubscriptionFilter {
|
||||
let mut possible_hashes = HashSet::new();
|
||||
for fork_digest in possible_fork_digests {
|
||||
@ -258,6 +259,9 @@ pub(crate) fn create_whitelist_filter(
|
||||
for id in 0..sync_committee_subnet_count {
|
||||
add(SyncCommitteeMessage(SyncSubnetId::new(id)));
|
||||
}
|
||||
for id in 0..blob_sidecar_subnet_count {
|
||||
add(BlobSidecar(id));
|
||||
}
|
||||
}
|
||||
gossipsub::WhitelistSubscriptionFilter(possible_hashes)
|
||||
}
|
||||
|
@ -18,5 +18,6 @@ pub use subnet::{Subnet, SubnetDiscovery};
|
||||
pub use sync_state::{BackFillState, SyncState};
|
||||
pub use topics::{
|
||||
core_topics_to_subscribe, fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind,
|
||||
GossipTopic, LIGHT_CLIENT_GOSSIP_TOPICS,
|
||||
GossipTopic, ALTAIR_CORE_TOPICS, BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS,
|
||||
LIGHT_CLIENT_GOSSIP_TOPICS,
|
||||
};
|
||||
|
@ -12,14 +12,16 @@ use types::{
|
||||
Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate,
|
||||
LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock,
|
||||
SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella,
|
||||
SignedBeaconBlockMerge, SignedBlsToExecutionChange, SignedContributionAndProof,
|
||||
SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId,
|
||||
SignedBeaconBlockDeneb, SignedBeaconBlockMerge, SignedBlobSidecar, SignedBlsToExecutionChange,
|
||||
SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum PubsubMessage<T: EthSpec> {
|
||||
/// Gossipsub message providing notification of a new block.
|
||||
BeaconBlock(Arc<SignedBeaconBlock<T>>),
|
||||
/// Gossipsub message providing notification of a [`SignedBlobSidecar`] along with the subnet id where it was received.
|
||||
BlobSidecar(Box<(u64, SignedBlobSidecar<T>)>),
|
||||
/// Gossipsub message providing notification of a Aggregate attestation and associated proof.
|
||||
AggregateAndProofAttestation(Box<SignedAggregateAndProof<T>>),
|
||||
/// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id.
|
||||
@ -113,6 +115,9 @@ impl<T: EthSpec> PubsubMessage<T> {
|
||||
pub fn kind(&self) -> GossipKind {
|
||||
match self {
|
||||
PubsubMessage::BeaconBlock(_) => GossipKind::BeaconBlock,
|
||||
PubsubMessage::BlobSidecar(blob_sidecar_data) => {
|
||||
GossipKind::BlobSidecar(blob_sidecar_data.0)
|
||||
}
|
||||
PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof,
|
||||
PubsubMessage::Attestation(attestation_data) => {
|
||||
GossipKind::Attestation(attestation_data.0)
|
||||
@ -183,6 +188,10 @@ impl<T: EthSpec> PubsubMessage<T> {
|
||||
SignedBeaconBlockCapella::from_ssz_bytes(data)
|
||||
.map_err(|e| format!("{:?}", e))?,
|
||||
),
|
||||
Some(ForkName::Deneb) => SignedBeaconBlock::<T>::Deneb(
|
||||
SignedBeaconBlockDeneb::from_ssz_bytes(data)
|
||||
.map_err(|e| format!("{:?}", e))?,
|
||||
),
|
||||
None => {
|
||||
return Err(format!(
|
||||
"Unknown gossipsub fork digest: {:?}",
|
||||
@ -192,6 +201,28 @@ impl<T: EthSpec> PubsubMessage<T> {
|
||||
};
|
||||
Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block)))
|
||||
}
|
||||
GossipKind::BlobSidecar(blob_index) => {
|
||||
match fork_context.from_context_bytes(gossip_topic.fork_digest) {
|
||||
Some(ForkName::Deneb) => {
|
||||
let blob_sidecar = SignedBlobSidecar::from_ssz_bytes(data)
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
Ok(PubsubMessage::BlobSidecar(Box::new((
|
||||
*blob_index,
|
||||
blob_sidecar,
|
||||
))))
|
||||
}
|
||||
Some(
|
||||
ForkName::Base
|
||||
| ForkName::Altair
|
||||
| ForkName::Merge
|
||||
| ForkName::Capella,
|
||||
)
|
||||
| None => Err(format!(
|
||||
"beacon_blobs_and_sidecar topic invalid for given fork digest {:?}",
|
||||
gossip_topic.fork_digest
|
||||
)),
|
||||
}
|
||||
}
|
||||
GossipKind::VoluntaryExit => {
|
||||
let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data)
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
@ -260,6 +291,7 @@ impl<T: EthSpec> PubsubMessage<T> {
|
||||
// messages for us.
|
||||
match &self {
|
||||
PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(),
|
||||
PubsubMessage::BlobSidecar(data) => data.1.as_ssz_bytes(),
|
||||
PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(),
|
||||
PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(),
|
||||
PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(),
|
||||
@ -283,6 +315,11 @@ impl<T: EthSpec> std::fmt::Display for PubsubMessage<T> {
|
||||
block.slot(),
|
||||
block.message().proposer_index()
|
||||
),
|
||||
PubsubMessage::BlobSidecar(data) => write!(
|
||||
f,
|
||||
"BlobSidecar: slot: {}, blob index: {}",
|
||||
data.1.message.slot, data.1.message.index,
|
||||
),
|
||||
PubsubMessage::AggregateAndProofAttestation(att) => write!(
|
||||
f,
|
||||
"Aggregate and Proof: slot: {}, index: {}, aggregator_index: {}",
|
||||
|
@ -1,7 +1,8 @@
|
||||
use libp2p::gossipsub::{IdentTopic as Topic, TopicHash};
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use strum::AsRefStr;
|
||||
use types::{ForkName, SubnetId, SyncSubnetId};
|
||||
use types::consts::deneb::BLOB_SIDECAR_SUBNET_COUNT;
|
||||
use types::{EthSpec, ForkName, SubnetId, SyncSubnetId};
|
||||
|
||||
use crate::Subnet;
|
||||
|
||||
@ -13,6 +14,7 @@ pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy";
|
||||
pub const BEACON_BLOCK_TOPIC: &str = "beacon_block";
|
||||
pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof";
|
||||
pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_";
|
||||
pub const BLOB_SIDECAR_PREFIX: &str = "blob_sidecar_";
|
||||
pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit";
|
||||
pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing";
|
||||
pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing";
|
||||
@ -39,22 +41,34 @@ pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [
|
||||
GossipKind::LightClientOptimisticUpdate,
|
||||
];
|
||||
|
||||
pub const DENEB_CORE_TOPICS: [GossipKind; 0] = [];
|
||||
|
||||
/// Returns the core topics associated with each fork that are new to the previous fork
|
||||
pub fn fork_core_topics(fork_name: &ForkName) -> Vec<GossipKind> {
|
||||
pub fn fork_core_topics<T: EthSpec>(fork_name: &ForkName) -> Vec<GossipKind> {
|
||||
match fork_name {
|
||||
ForkName::Base => BASE_CORE_TOPICS.to_vec(),
|
||||
ForkName::Altair => ALTAIR_CORE_TOPICS.to_vec(),
|
||||
ForkName::Merge => vec![],
|
||||
ForkName::Capella => CAPELLA_CORE_TOPICS.to_vec(),
|
||||
ForkName::Deneb => {
|
||||
// All of deneb blob topics are core topics
|
||||
let mut deneb_blob_topics = Vec::new();
|
||||
for i in 0..BLOB_SIDECAR_SUBNET_COUNT {
|
||||
deneb_blob_topics.push(GossipKind::BlobSidecar(i));
|
||||
}
|
||||
let mut deneb_topics = DENEB_CORE_TOPICS.to_vec();
|
||||
deneb_topics.append(&mut deneb_blob_topics);
|
||||
deneb_topics
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns all the topics that we need to subscribe to for a given fork
|
||||
/// including topics from older forks and new topics for the current fork.
|
||||
pub fn core_topics_to_subscribe(mut current_fork: ForkName) -> Vec<GossipKind> {
|
||||
let mut topics = fork_core_topics(¤t_fork);
|
||||
pub fn core_topics_to_subscribe<T: EthSpec>(mut current_fork: ForkName) -> Vec<GossipKind> {
|
||||
let mut topics = fork_core_topics::<T>(¤t_fork);
|
||||
while let Some(previous_fork) = current_fork.previous_fork() {
|
||||
let previous_fork_topics = fork_core_topics(&previous_fork);
|
||||
let previous_fork_topics = fork_core_topics::<T>(&previous_fork);
|
||||
topics.extend(previous_fork_topics);
|
||||
current_fork = previous_fork;
|
||||
}
|
||||
@ -82,6 +96,8 @@ pub enum GossipKind {
|
||||
BeaconBlock,
|
||||
/// Topic for publishing aggregate attestations and proofs.
|
||||
BeaconAggregateAndProof,
|
||||
/// Topic for publishing BlobSidecars.
|
||||
BlobSidecar(u64),
|
||||
/// Topic for publishing raw attestations on a particular subnet.
|
||||
#[strum(serialize = "beacon_attestation")]
|
||||
Attestation(SubnetId),
|
||||
@ -111,6 +127,9 @@ impl std::fmt::Display for GossipKind {
|
||||
GossipKind::SyncCommitteeMessage(subnet_id) => {
|
||||
write!(f, "sync_committee_{}", **subnet_id)
|
||||
}
|
||||
GossipKind::BlobSidecar(blob_index) => {
|
||||
write!(f, "{}{}", BLOB_SIDECAR_PREFIX, blob_index)
|
||||
}
|
||||
x => f.write_str(x.as_ref()),
|
||||
}
|
||||
}
|
||||
@ -178,11 +197,8 @@ impl GossipTopic {
|
||||
BLS_TO_EXECUTION_CHANGE_TOPIC => GossipKind::BlsToExecutionChange,
|
||||
LIGHT_CLIENT_FINALITY_UPDATE => GossipKind::LightClientFinalityUpdate,
|
||||
LIGHT_CLIENT_OPTIMISTIC_UPDATE => GossipKind::LightClientOptimisticUpdate,
|
||||
topic => match committee_topic_index(topic) {
|
||||
Some(subnet) => match subnet {
|
||||
Subnet::Attestation(s) => GossipKind::Attestation(s),
|
||||
Subnet::SyncCommittee(s) => GossipKind::SyncCommitteeMessage(s),
|
||||
},
|
||||
topic => match subnet_topic_index(topic) {
|
||||
Some(kind) => kind,
|
||||
None => return Err(format!("Unknown topic: {}", topic)),
|
||||
},
|
||||
};
|
||||
@ -236,6 +252,9 @@ impl std::fmt::Display for GossipTopic {
|
||||
GossipKind::SyncCommitteeMessage(index) => {
|
||||
format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index)
|
||||
}
|
||||
GossipKind::BlobSidecar(blob_index) => {
|
||||
format!("{}{}", BLOB_SIDECAR_PREFIX, blob_index)
|
||||
}
|
||||
GossipKind::BlsToExecutionChange => BLS_TO_EXECUTION_CHANGE_TOPIC.into(),
|
||||
GossipKind::LightClientFinalityUpdate => LIGHT_CLIENT_FINALITY_UPDATE.into(),
|
||||
GossipKind::LightClientOptimisticUpdate => LIGHT_CLIENT_OPTIMISTIC_UPDATE.into(),
|
||||
@ -267,28 +286,26 @@ pub fn subnet_from_topic_hash(topic_hash: &TopicHash) -> Option<Subnet> {
|
||||
GossipTopic::decode(topic_hash.as_str()).ok()?.subnet_id()
|
||||
}
|
||||
|
||||
// Determines if a string is an attestation or sync committee topic.
|
||||
fn committee_topic_index(topic: &str) -> Option<Subnet> {
|
||||
if topic.starts_with(BEACON_ATTESTATION_PREFIX) {
|
||||
return Some(Subnet::Attestation(SubnetId::new(
|
||||
topic
|
||||
.trim_start_matches(BEACON_ATTESTATION_PREFIX)
|
||||
.parse::<u64>()
|
||||
.ok()?,
|
||||
// Determines if the topic name is of an indexed topic.
|
||||
fn subnet_topic_index(topic: &str) -> Option<GossipKind> {
|
||||
if let Some(index) = topic.strip_prefix(BEACON_ATTESTATION_PREFIX) {
|
||||
return Some(GossipKind::Attestation(SubnetId::new(
|
||||
index.parse::<u64>().ok()?,
|
||||
)));
|
||||
} else if topic.starts_with(SYNC_COMMITTEE_PREFIX_TOPIC) {
|
||||
return Some(Subnet::SyncCommittee(SyncSubnetId::new(
|
||||
topic
|
||||
.trim_start_matches(SYNC_COMMITTEE_PREFIX_TOPIC)
|
||||
.parse::<u64>()
|
||||
.ok()?,
|
||||
} else if let Some(index) = topic.strip_prefix(SYNC_COMMITTEE_PREFIX_TOPIC) {
|
||||
return Some(GossipKind::SyncCommitteeMessage(SyncSubnetId::new(
|
||||
index.parse::<u64>().ok()?,
|
||||
)));
|
||||
} else if let Some(index) = topic.strip_prefix(BLOB_SIDECAR_PREFIX) {
|
||||
return Some(GossipKind::BlobSidecar(index.parse::<u64>().ok()?));
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use types::MainnetEthSpec;
|
||||
|
||||
use super::GossipKind::*;
|
||||
use super::*;
|
||||
|
||||
@ -417,12 +434,15 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_core_topics_to_subscribe() {
|
||||
type E = MainnetEthSpec;
|
||||
let mut all_topics = Vec::new();
|
||||
let mut deneb_core_topics = fork_core_topics::<E>(&ForkName::Deneb);
|
||||
all_topics.append(&mut deneb_core_topics);
|
||||
all_topics.extend(CAPELLA_CORE_TOPICS);
|
||||
all_topics.extend(ALTAIR_CORE_TOPICS);
|
||||
all_topics.extend(BASE_CORE_TOPICS);
|
||||
|
||||
let latest_fork = *ForkName::list_all().last().unwrap();
|
||||
assert_eq!(core_topics_to_subscribe(latest_fork), all_topics);
|
||||
assert_eq!(core_topics_to_subscribe::<E>(latest_fork), all_topics);
|
||||
}
|
||||
}
|
||||
|
@ -25,16 +25,19 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext {
|
||||
let altair_fork_epoch = Epoch::new(1);
|
||||
let merge_fork_epoch = Epoch::new(2);
|
||||
let capella_fork_epoch = Epoch::new(3);
|
||||
let deneb_fork_epoch = Epoch::new(4);
|
||||
|
||||
chain_spec.altair_fork_epoch = Some(altair_fork_epoch);
|
||||
chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch);
|
||||
chain_spec.capella_fork_epoch = Some(capella_fork_epoch);
|
||||
chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch);
|
||||
|
||||
let current_slot = match fork_name {
|
||||
ForkName::Base => Slot::new(0),
|
||||
ForkName::Altair => altair_fork_epoch.start_slot(E::slots_per_epoch()),
|
||||
ForkName::Merge => merge_fork_epoch.start_slot(E::slots_per_epoch()),
|
||||
ForkName::Capella => capella_fork_epoch.start_slot(E::slots_per_epoch()),
|
||||
ForkName::Deneb => deneb_fork_epoch.start_slot(E::slots_per_epoch()),
|
||||
};
|
||||
ForkContext::new::<E>(current_slot, Hash256::zero(), &chain_spec)
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user