Merge branch 'capella' into unstable

This commit is contained in:
Michael Sproul 2023-02-22 10:25:45 +11:00
commit fa8b920dd8
No known key found for this signature in database
GPG Key ID: 77B1309D2E54E914
267 changed files with 11892 additions and 2096 deletions

View File

@ -5,6 +5,7 @@ on:
branches: branches:
- unstable - unstable
- stable - stable
- capella
tags: tags:
- v* - v*
@ -34,6 +35,11 @@ jobs:
run: | run: |
echo "VERSION=latest" >> $GITHUB_ENV echo "VERSION=latest" >> $GITHUB_ENV
echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV
- name: Extract version (if capella)
if: github.event.ref == 'refs/heads/capella'
run: |
echo "VERSION=capella" >> $GITHUB_ENV
echo "VERSION_SUFFIX=" >> $GITHUB_ENV
- name: Extract version (if tagged release) - name: Extract version (if tagged release)
if: startsWith(github.event.ref, 'refs/tags') if: startsWith(github.event.ref, 'refs/tags')
run: | run: |

76
Cargo.lock generated
View File

@ -211,9 +211,8 @@ checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800"
[[package]] [[package]]
name = "arbitrary" name = "arbitrary"
version = "1.2.3" version = "1.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "git+https://github.com/michaelsproul/arbitrary?rev=a572fd8743012a4f1ada5ee5968b1b3619c427ba#a572fd8743012a4f1ada5ee5968b1b3619c427ba"
checksum = "3e90af4de65aa7b293ef2d09daff88501eb254f58edde2e1ac02c82d873eadad"
dependencies = [ dependencies = [
"derive_arbitrary", "derive_arbitrary",
] ]
@ -545,7 +544,7 @@ checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf"
[[package]] [[package]]
name = "beacon-api-client" name = "beacon-api-client"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/ralexstokes/beacon-api-client?rev=7d5d8dad1648f771573f42585ad8080a45b05689#7d5d8dad1648f771573f42585ad8080a45b05689" source = "git+https://github.com/ralexstokes/beacon-api-client#53690a711e33614d59d4d44fb09762b4699e2a4e"
dependencies = [ dependencies = [
"ethereum-consensus", "ethereum-consensus",
"http", "http",
@ -607,7 +606,7 @@ dependencies = [
"state_processing", "state_processing",
"store", "store",
"strum", "strum",
"superstruct", "superstruct 0.5.0",
"task_executor", "task_executor",
"tempfile", "tempfile",
"tokio", "tokio",
@ -1057,8 +1056,10 @@ dependencies = [
"lazy_static", "lazy_static",
"lighthouse_metrics", "lighthouse_metrics",
"lighthouse_network", "lighthouse_network",
"logging",
"monitoring_api", "monitoring_api",
"network", "network",
"operation_pool",
"parking_lot 0.12.1", "parking_lot 0.12.1",
"sensitive_url", "sensitive_url",
"serde", "serde",
@ -1068,6 +1069,7 @@ dependencies = [
"slasher_service", "slasher_service",
"slog", "slog",
"slot_clock", "slot_clock",
"state_processing",
"store", "store",
"task_executor", "task_executor",
"time 0.3.17", "time 0.3.17",
@ -1678,10 +1680,10 @@ dependencies = [
[[package]] [[package]]
name = "derive_arbitrary" name = "derive_arbitrary"
version = "1.2.3" version = "1.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "git+https://github.com/michaelsproul/arbitrary?rev=a572fd8743012a4f1ada5ee5968b1b3619c427ba#a572fd8743012a4f1ada5ee5968b1b3619c427ba"
checksum = "8beee4701e2e229e8098bbdecdca12449bc3e322f137d269182fa1291e20bd00"
dependencies = [ dependencies = [
"darling 0.14.3",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn", "syn",
@ -2066,7 +2068,7 @@ dependencies = [
"slog", "slog",
"sloggers", "sloggers",
"state_processing", "state_processing",
"superstruct", "superstruct 0.5.0",
"task_executor", "task_executor",
"tokio", "tokio",
"tree_hash", "tree_hash",
@ -2219,9 +2221,10 @@ dependencies = [
[[package]] [[package]]
name = "eth2_ssz_derive" name = "eth2_ssz_derive"
version = "0.3.0" version = "0.3.1"
dependencies = [ dependencies = [
"darling 0.13.4", "darling 0.13.4",
"eth2_ssz",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn", "syn",
@ -2332,7 +2335,7 @@ dependencies = [
[[package]] [[package]]
name = "ethereum-consensus" name = "ethereum-consensus"
version = "0.1.1" version = "0.1.1"
source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=a8110af76d97bf2bf27fb987a671808fcbdf1834#a8110af76d97bf2bf27fb987a671808fcbdf1834" source = "git+https://github.com/ralexstokes//ethereum-consensus?rev=9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d#9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d"
dependencies = [ dependencies = [
"async-stream", "async-stream",
"blst", "blst",
@ -2341,6 +2344,7 @@ dependencies = [
"hex", "hex",
"integer-sqrt", "integer-sqrt",
"multiaddr 0.14.0", "multiaddr 0.14.0",
"multihash",
"rand 0.8.5", "rand 0.8.5",
"serde", "serde",
"serde_json", "serde_json",
@ -2499,7 +2503,7 @@ dependencies = [
"lazy_static", "lazy_static",
"lighthouse_metrics", "lighthouse_metrics",
"lru 0.7.8", "lru 0.7.8",
"mev-build-rs", "mev-rs",
"parking_lot 0.12.1", "parking_lot 0.12.1",
"rand 0.8.5", "rand 0.8.5",
"reqwest", "reqwest",
@ -2511,6 +2515,7 @@ dependencies = [
"ssz-rs", "ssz-rs",
"state_processing", "state_processing",
"strum", "strum",
"superstruct 0.6.0",
"task_executor", "task_executor",
"tempfile", "tempfile",
"tokio", "tokio",
@ -3213,6 +3218,7 @@ dependencies = [
"eth2_ssz", "eth2_ssz",
"execution_layer", "execution_layer",
"futures", "futures",
"genesis",
"hex", "hex",
"lazy_static", "lazy_static",
"lighthouse_metrics", "lighthouse_metrics",
@ -3221,6 +3227,7 @@ dependencies = [
"logging", "logging",
"lru 0.7.8", "lru 0.7.8",
"network", "network",
"operation_pool",
"parking_lot 0.12.1", "parking_lot 0.12.1",
"proto_array", "proto_array",
"safe_arith", "safe_arith",
@ -4399,13 +4406,15 @@ dependencies = [
"smallvec", "smallvec",
"snap", "snap",
"strum", "strum",
"superstruct", "superstruct 0.5.0",
"task_executor", "task_executor",
"tempfile", "tempfile",
"tiny-keccak", "tiny-keccak",
"tokio", "tokio",
"tokio-io-timeout", "tokio-io-timeout",
"tokio-util 0.6.10", "tokio-util 0.6.10",
"tree_hash",
"tree_hash_derive",
"types", "types",
"unsigned-varint 0.6.0", "unsigned-varint 0.6.0",
"unused_port", "unused_port",
@ -4663,18 +4672,19 @@ dependencies = [
] ]
[[package]] [[package]]
name = "mev-build-rs" name = "mev-rs"
version = "0.2.1" version = "0.2.1"
source = "git+https://github.com/ralexstokes/mev-rs?rev=6c99b0fbdc0427b1625469d2e575303ce08de5b8#6c99b0fbdc0427b1625469d2e575303ce08de5b8" source = "git+https://github.com/ralexstokes//mev-rs?rev=7813d4a4a564e0754e9aaab2d95520ba437c3889#7813d4a4a564e0754e9aaab2d95520ba437c3889"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"axum", "axum",
"beacon-api-client", "beacon-api-client",
"ethereum-consensus", "ethereum-consensus",
"hyper",
"serde", "serde",
"serde_json",
"ssz-rs", "ssz-rs",
"thiserror", "thiserror",
"tokio",
"tracing", "tracing",
] ]
@ -4997,6 +5007,7 @@ dependencies = [
"lru_cache", "lru_cache",
"matches", "matches",
"num_cpus", "num_cpus",
"operation_pool",
"rand 0.8.5", "rand 0.8.5",
"rlp", "rlp",
"slog", "slog",
@ -5332,6 +5343,7 @@ dependencies = [
"lighthouse_metrics", "lighthouse_metrics",
"maplit", "maplit",
"parking_lot 0.12.1", "parking_lot 0.12.1",
"rand 0.8.5",
"rayon", "rayon",
"serde", "serde",
"serde_derive", "serde_derive",
@ -6772,6 +6784,16 @@ dependencies = [
"serde_derive", "serde_derive",
] ]
[[package]]
name = "serde-big-array"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18b20e7752957bbe9661cff4e0bb04d183d0948cdab2ea58cdb9df36a61dfe62"
dependencies = [
"serde",
"serde_derive",
]
[[package]] [[package]]
name = "serde_array_query" name = "serde_array_query"
version = "0.1.0" version = "0.1.0"
@ -7277,11 +7299,10 @@ dependencies = [
[[package]] [[package]]
name = "ssz-rs" name = "ssz-rs"
version = "0.8.0" version = "0.8.0"
source = "git+https://github.com/ralexstokes/ssz-rs?rev=cb08f1#cb08f18ca919cc1b685b861d0fa9e2daabe89737" source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1"
dependencies = [ dependencies = [
"bitvec 1.0.1", "bitvec 1.0.1",
"hex", "hex",
"lazy_static",
"num-bigint", "num-bigint",
"serde", "serde",
"sha2 0.9.9", "sha2 0.9.9",
@ -7292,7 +7313,7 @@ dependencies = [
[[package]] [[package]]
name = "ssz-rs-derive" name = "ssz-rs-derive"
version = "0.8.0" version = "0.8.0"
source = "git+https://github.com/ralexstokes/ssz-rs?rev=cb08f1#cb08f18ca919cc1b685b861d0fa9e2daabe89737" source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -7451,6 +7472,20 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "superstruct"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75b9e5728aa1a87141cefd4e7509903fc01fa0dcb108022b1e841a67c5159fc5"
dependencies = [
"darling 0.13.4",
"itertools",
"proc-macro2",
"quote",
"smallvec",
"syn",
]
[[package]] [[package]]
name = "swap_or_not_shuffle" name = "swap_or_not_shuffle"
version = "0.2.0" version = "0.2.0"
@ -8266,6 +8301,7 @@ dependencies = [
"rusqlite", "rusqlite",
"safe_arith", "safe_arith",
"serde", "serde",
"serde-big-array",
"serde_derive", "serde_derive",
"serde_json", "serde_json",
"serde_with", "serde_with",
@ -8273,7 +8309,7 @@ dependencies = [
"slog", "slog",
"smallvec", "smallvec",
"state_processing", "state_processing",
"superstruct", "superstruct 0.6.0",
"swap_or_not_shuffle", "swap_or_not_shuffle",
"tempfile", "tempfile",
"test_random_derive", "test_random_derive",

View File

@ -100,6 +100,14 @@ eth2_hashing = { path = "crypto/eth2_hashing" }
tree_hash = { path = "consensus/tree_hash" } tree_hash = { path = "consensus/tree_hash" }
tree_hash_derive = { path = "consensus/tree_hash_derive" } tree_hash_derive = { path = "consensus/tree_hash_derive" }
eth2_serde_utils = { path = "consensus/serde_utils" } eth2_serde_utils = { path = "consensus/serde_utils" }
arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="a572fd8743012a4f1ada5ee5968b1b3619c427ba" }
[patch."https://github.com/ralexstokes/mev-rs"]
mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" }
[patch."https://github.com/ralexstokes/ethereum-consensus"]
ethereum-consensus = { git = "https://github.com/ralexstokes//ethereum-consensus", rev = "9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" }
[patch."https://github.com/ralexstokes/ssz-rs"]
ssz-rs = { git = "https://github.com/ralexstokes//ssz-rs", rev = "adf1a0b14cef90b9536f28ef89da1fab316465e1" }
[profile.maxperf] [profile.maxperf]
inherits = "release" inherits = "release"

View File

@ -1,4 +1,4 @@
FROM rust:1.62.1-bullseye AS builder FROM rust:1.65.0-bullseye AS builder
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler
COPY . lighthouse COPY . lighthouse
ARG FEATURES ARG FEATURES

View File

@ -28,12 +28,15 @@ CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,jemalloc
# Cargo profile for Cross builds. Default is for local builds, CI uses an override. # Cargo profile for Cross builds. Default is for local builds, CI uses an override.
CROSS_PROFILE ?= release CROSS_PROFILE ?= release
# List of features to use when running EF tests.
EF_TEST_FEATURES ?=
# Cargo profile for regular builds. # Cargo profile for regular builds.
PROFILE ?= release PROFILE ?= release
# List of all hard forks. This list is used to set env variables for several tests so that # List of all hard forks. This list is used to set env variables for several tests so that
# they run for different forks. # they run for different forks.
FORKS=phase0 altair merge FORKS=phase0 altair merge capella
# Builds the Lighthouse binary in release (optimized). # Builds the Lighthouse binary in release (optimized).
# #
@ -112,9 +115,9 @@ check-benches:
# Runs only the ef-test vectors. # Runs only the ef-test vectors.
run-ef-tests: run-ef-tests:
rm -rf $(EF_TESTS)/.accessed_file_log.txt rm -rf $(EF_TESTS)/.accessed_file_log.txt
cargo test --release -p ef_tests --features "ef_tests" cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES)"
cargo test --release -p ef_tests --features "ef_tests,fake_crypto" cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto"
cargo test --release -p ef_tests --features "ef_tests,milagro" cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro"
./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests
# Run the tests in the `beacon_chain` crate for all known forks. # Run the tests in the `beacon_chain` crate for all known forks.

View File

@ -33,7 +33,7 @@ slot_clock = { path = "../../common/slot_clock" }
eth2_hashing = "0.3.0" eth2_hashing = "0.3.0"
eth2_ssz = "0.4.1" eth2_ssz = "0.4.1"
eth2_ssz_types = "0.2.2" eth2_ssz_types = "0.2.2"
eth2_ssz_derive = "0.3.0" eth2_ssz_derive = "0.3.1"
state_processing = { path = "../../consensus/state_processing" } state_processing = { path = "../../consensus/state_processing" }
tree_hash = "0.4.1" tree_hash = "0.4.1"
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }

View File

@ -15,12 +15,12 @@ use store::{
consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR},
RelativeEpoch, RelativeEpoch,
}; };
use types::{BeaconBlockRef, BeaconState, BeaconStateError, ExecPayload, Hash256}; use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, Hash256};
type BeaconBlockSubRewardValue = u64; type BeaconBlockSubRewardValue = u64;
impl<T: BeaconChainTypes> BeaconChain<T> { impl<T: BeaconChainTypes> BeaconChain<T> {
pub fn compute_beacon_block_reward<Payload: ExecPayload<T::EthSpec>>( pub fn compute_beacon_block_reward<Payload: AbstractExecPayload<T::EthSpec>>(
&self, &self,
block: BeaconBlockRef<'_, T::EthSpec, Payload>, block: BeaconBlockRef<'_, T::EthSpec, Payload>,
block_root: Hash256, block_root: Hash256,
@ -97,7 +97,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}) })
} }
fn compute_beacon_block_sync_aggregate_reward<Payload: ExecPayload<T::EthSpec>>( fn compute_beacon_block_sync_aggregate_reward<Payload: AbstractExecPayload<T::EthSpec>>(
&self, &self,
block: BeaconBlockRef<'_, T::EthSpec, Payload>, block: BeaconBlockRef<'_, T::EthSpec, Payload>,
state: &BeaconState<T::EthSpec>, state: &BeaconState<T::EthSpec>,
@ -111,7 +111,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
} }
} }
fn compute_beacon_block_proposer_slashing_reward<Payload: ExecPayload<T::EthSpec>>( fn compute_beacon_block_proposer_slashing_reward<Payload: AbstractExecPayload<T::EthSpec>>(
&self, &self,
block: BeaconBlockRef<'_, T::EthSpec, Payload>, block: BeaconBlockRef<'_, T::EthSpec, Payload>,
state: &BeaconState<T::EthSpec>, state: &BeaconState<T::EthSpec>,
@ -132,7 +132,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Ok(proposer_slashing_reward) Ok(proposer_slashing_reward)
} }
fn compute_beacon_block_attester_slashing_reward<Payload: ExecPayload<T::EthSpec>>( fn compute_beacon_block_attester_slashing_reward<Payload: AbstractExecPayload<T::EthSpec>>(
&self, &self,
block: BeaconBlockRef<'_, T::EthSpec, Payload>, block: BeaconBlockRef<'_, T::EthSpec, Payload>,
state: &BeaconState<T::EthSpec>, state: &BeaconState<T::EthSpec>,
@ -155,7 +155,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Ok(attester_slashing_reward) Ok(attester_slashing_reward)
} }
fn compute_beacon_block_attestation_reward_base<Payload: ExecPayload<T::EthSpec>>( fn compute_beacon_block_attestation_reward_base<Payload: AbstractExecPayload<T::EthSpec>>(
&self, &self,
block: BeaconBlockRef<'_, T::EthSpec, Payload>, block: BeaconBlockRef<'_, T::EthSpec, Payload>,
block_root: Hash256, block_root: Hash256,
@ -173,7 +173,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Ok(block_attestation_reward) Ok(block_attestation_reward)
} }
fn compute_beacon_block_attestation_reward_altair<Payload: ExecPayload<T::EthSpec>>( fn compute_beacon_block_attestation_reward_altair<Payload: AbstractExecPayload<T::EthSpec>>(
&self, &self,
block: BeaconBlockRef<'_, T::EthSpec, Payload>, block: BeaconBlockRef<'_, T::EthSpec, Payload>,
state: &mut BeaconState<T::EthSpec>, state: &mut BeaconState<T::EthSpec>,

View File

@ -12,6 +12,7 @@ use crate::block_verification::{
signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock, signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock,
IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER, IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER,
}; };
pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock};
use crate::chain_config::ChainConfig; use crate::chain_config::ChainConfig;
use crate::early_attester_cache::EarlyAttesterCache; use crate::early_attester_cache::EarlyAttesterCache;
use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::errors::{BeaconChainError as Error, BlockProductionError};
@ -58,8 +59,10 @@ use crate::validator_pubkey_cache::ValidatorPubkeyCache;
use crate::{metrics, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead}; use crate::{metrics, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead};
use eth2::types::{EventKind, SseBlock, SyncDuty}; use eth2::types::{EventKind, SseBlock, SyncDuty};
use execution_layer::{ use execution_layer::{
BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition,
PayloadAttributes, PayloadStatus,
}; };
pub use fork_choice::CountUnrealized;
use fork_choice::{ use fork_choice::{
AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters,
InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses,
@ -67,7 +70,7 @@ use fork_choice::{
use futures::channel::mpsc::Sender; use futures::channel::mpsc::Sender;
use itertools::process_results; use itertools::process_results;
use itertools::Itertools; use itertools::Itertools;
use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool}; use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella};
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
use proto_array::{CountUnrealizedFull, DoNotReOrg, ProposerHeadError}; use proto_array::{CountUnrealizedFull, DoNotReOrg, ProposerHeadError};
use safe_arith::SafeArith; use safe_arith::SafeArith;
@ -79,8 +82,8 @@ use state_processing::{
common::get_attesting_indices_from_state, common::get_attesting_indices_from_state,
per_block_processing, per_block_processing,
per_block_processing::{ per_block_processing::{
errors::AttestationValidationError, verify_attestation_for_block_inclusion, errors::AttestationValidationError, get_expected_withdrawals,
VerifySignatures, verify_attestation_for_block_inclusion, VerifySignatures,
}, },
per_slot_processing, per_slot_processing,
state_advance::{complete_state_advance, partial_state_advance}, state_advance::{complete_state_advance, partial_state_advance},
@ -103,9 +106,6 @@ use types::beacon_state::CloneConfig;
use types::consts::merge::INTERVALS_PER_SLOT; use types::consts::merge::INTERVALS_PER_SLOT;
use types::*; use types::*;
pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock};
pub use fork_choice::CountUnrealized;
pub type ForkChoiceError = fork_choice::Error<crate::ForkChoiceStoreError>; pub type ForkChoiceError = fork_choice::Error<crate::ForkChoiceStoreError>;
/// Alias to appease clippy. /// Alias to appease clippy.
@ -269,7 +269,7 @@ pub trait BeaconChainTypes: Send + Sync + 'static {
} }
/// Used internally to split block production into discrete functions. /// Used internally to split block production into discrete functions.
struct PartialBeaconBlock<E: EthSpec, Payload> { struct PartialBeaconBlock<E: EthSpec, Payload: AbstractExecPayload<E>> {
state: BeaconState<E>, state: BeaconState<E>,
slot: Slot, slot: Slot,
proposer_index: u64, proposer_index: u64,
@ -283,7 +283,8 @@ struct PartialBeaconBlock<E: EthSpec, Payload> {
deposits: Vec<Deposit>, deposits: Vec<Deposit>,
voluntary_exits: Vec<SignedVoluntaryExit>, voluntary_exits: Vec<SignedVoluntaryExit>,
sync_aggregate: Option<SyncAggregate<E>>, sync_aggregate: Option<SyncAggregate<E>>,
prepare_payload_handle: Option<PreparePayloadHandle<Payload>>, prepare_payload_handle: Option<PreparePayloadHandle<E, Payload>>,
bls_to_execution_changes: Vec<SignedBlsToExecutionChange>,
} }
pub type BeaconForkChoice<T> = ForkChoice< pub type BeaconForkChoice<T> = ForkChoice<
@ -360,6 +361,9 @@ pub struct BeaconChain<T: BeaconChainTypes> {
/// Maintains a record of which validators we've seen attester slashings for. /// Maintains a record of which validators we've seen attester slashings for.
pub(crate) observed_attester_slashings: pub(crate) observed_attester_slashings:
Mutex<ObservedOperations<AttesterSlashing<T::EthSpec>, T::EthSpec>>, Mutex<ObservedOperations<AttesterSlashing<T::EthSpec>, T::EthSpec>>,
/// Maintains a record of which validators we've seen BLS to execution changes for.
pub(crate) observed_bls_to_execution_changes:
Mutex<ObservedOperations<SignedBlsToExecutionChange, T::EthSpec>>,
/// The most recently validated light client finality update received on gossip. /// The most recently validated light client finality update received on gossip.
pub latest_seen_finality_update: Mutex<Option<LightClientFinalityUpdate<T::EthSpec>>>, pub latest_seen_finality_update: Mutex<Option<LightClientFinalityUpdate<T::EthSpec>>>,
/// The most recently validated light client optimistic update received on gossip. /// The most recently validated light client optimistic update received on gossip.
@ -959,21 +963,22 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Some(DatabaseBlock::Blinded(block)) => block, Some(DatabaseBlock::Blinded(block)) => block,
None => return Ok(None), None => return Ok(None),
}; };
let fork = blinded_block.fork_name(&self.spec)?;
// If we only have a blinded block, load the execution payload from the EL. // If we only have a blinded block, load the execution payload from the EL.
let block_message = blinded_block.message(); let block_message = blinded_block.message();
let execution_payload_header = &block_message let execution_payload_header = block_message
.execution_payload() .execution_payload()
.map_err(|_| Error::BlockVariantLacksExecutionPayload(*block_root))? .map_err(|_| Error::BlockVariantLacksExecutionPayload(*block_root))?
.execution_payload_header; .to_execution_payload_header();
let exec_block_hash = execution_payload_header.block_hash; let exec_block_hash = execution_payload_header.block_hash();
let execution_payload = self let execution_payload = self
.execution_layer .execution_layer
.as_ref() .as_ref()
.ok_or(Error::ExecutionLayerMissing)? .ok_or(Error::ExecutionLayerMissing)?
.get_payload_by_block_hash(exec_block_hash) .get_payload_by_block_hash(exec_block_hash, fork)
.await .await
.map_err(|e| { .map_err(|e| {
Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, Box::new(e)) Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, Box::new(e))
@ -981,9 +986,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?;
// Verify payload integrity. // Verify payload integrity.
let header_from_payload = ExecutionPayloadHeader::from(&execution_payload); let header_from_payload = ExecutionPayloadHeader::from(execution_payload.to_ref());
if header_from_payload != *execution_payload_header { if header_from_payload != execution_payload_header {
for txn in &execution_payload.transactions { for txn in execution_payload.transactions() {
debug!( debug!(
self.log, self.log,
"Reconstructed txn"; "Reconstructed txn";
@ -994,8 +999,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
return Err(Error::InconsistentPayloadReconstructed { return Err(Error::InconsistentPayloadReconstructed {
slot: blinded_block.slot(), slot: blinded_block.slot(),
exec_block_hash, exec_block_hash,
canonical_transactions_root: execution_payload_header.transactions_root, canonical_transactions_root: execution_payload_header.transactions_root(),
reconstructed_transactions_root: header_from_payload.transactions_root, reconstructed_transactions_root: header_from_payload.transactions_root(),
}); });
} }
@ -2218,6 +2223,79 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
} }
} }
/// Verify a signed BLS to execution change before allowing it to propagate on the gossip network.
pub fn verify_bls_to_execution_change_for_http_api(
&self,
bls_to_execution_change: SignedBlsToExecutionChange,
) -> Result<ObservationOutcome<SignedBlsToExecutionChange, T::EthSpec>, Error> {
// Before checking the gossip duplicate filter, check that no prior change is already
// in our op pool. Ignore these messages: do not gossip, do not try to override the pool.
match self
.op_pool
.bls_to_execution_change_in_pool_equals(&bls_to_execution_change)
{
Some(true) => return Ok(ObservationOutcome::AlreadyKnown),
Some(false) => return Err(Error::BlsToExecutionConflictsWithPool),
None => (),
}
// Use the head state to save advancing to the wall-clock slot unnecessarily. The message is
// signed with respect to the genesis fork version, and the slot check for gossip is applied
// separately. This `Arc` clone of the head is nice and cheap.
let head_snapshot = self.head().snapshot;
let head_state = &head_snapshot.beacon_state;
Ok(self
.observed_bls_to_execution_changes
.lock()
.verify_and_observe(bls_to_execution_change, head_state, &self.spec)?)
}
/// Verify a signed BLS to execution change before allowing it to propagate on the gossip network.
pub fn verify_bls_to_execution_change_for_gossip(
&self,
bls_to_execution_change: SignedBlsToExecutionChange,
) -> Result<ObservationOutcome<SignedBlsToExecutionChange, T::EthSpec>, Error> {
// Ignore BLS to execution changes on gossip prior to Capella.
if !self.current_slot_is_post_capella()? {
return Err(Error::BlsToExecutionPriorToCapella);
}
self.verify_bls_to_execution_change_for_http_api(bls_to_execution_change)
.or_else(|e| {
// On gossip treat conflicts the same as duplicates [IGNORE].
match e {
Error::BlsToExecutionConflictsWithPool => Ok(ObservationOutcome::AlreadyKnown),
e => Err(e),
}
})
}
/// Check if the current slot is greater than or equal to the Capella fork epoch.
pub fn current_slot_is_post_capella(&self) -> Result<bool, Error> {
let current_fork = self.spec.fork_name_at_slot::<T::EthSpec>(self.slot()?);
if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork {
Ok(false)
} else {
Ok(true)
}
}
/// Import a BLS to execution change to the op pool.
///
/// Return `true` if the change was added to the pool.
pub fn import_bls_to_execution_change(
&self,
bls_to_execution_change: SigVerifiedOp<SignedBlsToExecutionChange, T::EthSpec>,
received_pre_capella: ReceivedPreCapella,
) -> bool {
if self.eth1_chain.is_some() {
self.op_pool
.insert_bls_to_execution_change(bls_to_execution_change, received_pre_capella)
} else {
false
}
}
/// Attempt to obtain sync committee duties from the head. /// Attempt to obtain sync committee duties from the head.
pub fn sync_committee_duties_from_head( pub fn sync_committee_duties_from_head(
&self, &self,
@ -3444,7 +3522,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// ///
/// The produced block will not be inherently valid, it must be signed by a block producer. /// The produced block will not be inherently valid, it must be signed by a block producer.
/// Block signing is out of the scope of this function and should be done by a separate program. /// Block signing is out of the scope of this function and should be done by a separate program.
pub async fn produce_block<Payload: ExecPayload<T::EthSpec>>( pub async fn produce_block<Payload: AbstractExecPayload<T::EthSpec> + 'static>(
self: &Arc<Self>, self: &Arc<Self>,
randao_reveal: Signature, randao_reveal: Signature,
slot: Slot, slot: Slot,
@ -3460,7 +3538,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
} }
/// Same as `produce_block` but allowing for configuration of RANDAO-verification. /// Same as `produce_block` but allowing for configuration of RANDAO-verification.
pub async fn produce_block_with_verification<Payload: ExecPayload<T::EthSpec>>( pub async fn produce_block_with_verification<
Payload: AbstractExecPayload<T::EthSpec> + 'static,
>(
self: &Arc<Self>, self: &Arc<Self>,
randao_reveal: Signature, randao_reveal: Signature,
slot: Slot, slot: Slot,
@ -3980,7 +4060,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// The provided `state_root_opt` should only ever be set to `Some` if the contained value is /// The provided `state_root_opt` should only ever be set to `Some` if the contained value is
/// equal to the root of `state`. Providing this value will serve as an optimization to avoid /// equal to the root of `state`. Providing this value will serve as an optimization to avoid
/// performing a tree hash in some scenarios. /// performing a tree hash in some scenarios.
pub async fn produce_block_on_state<Payload: ExecPayload<T::EthSpec>>( pub async fn produce_block_on_state<Payload: AbstractExecPayload<T::EthSpec> + 'static>(
self: &Arc<Self>, self: &Arc<Self>,
state: BeaconState<T::EthSpec>, state: BeaconState<T::EthSpec>,
state_root_opt: Option<Hash256>, state_root_opt: Option<Hash256>,
@ -4015,16 +4095,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// //
// Wait for the execution layer to return an execution payload (if one is required). // Wait for the execution layer to return an execution payload (if one is required).
let prepare_payload_handle = partial_beacon_block.prepare_payload_handle.take(); let prepare_payload_handle = partial_beacon_block.prepare_payload_handle.take();
let execution_payload = if let Some(prepare_payload_handle) = prepare_payload_handle { let block_contents = if let Some(prepare_payload_handle) = prepare_payload_handle {
let execution_payload = prepare_payload_handle Some(
.await prepare_payload_handle
.map_err(BlockProductionError::TokioJoin)? .await
.ok_or(BlockProductionError::ShuttingDown)??; .map_err(BlockProductionError::TokioJoin)?
Some(execution_payload) .ok_or(BlockProductionError::ShuttingDown)??,
)
} else { } else {
None None
}; };
//FIXME(sean) waiting for the BN<>EE api for this to stabilize
let kzg_commitments = vec![];
// Part 3/3 (blocking) // Part 3/3 (blocking)
// //
// Perform the final steps of combining all the parts and computing the state root. // Perform the final steps of combining all the parts and computing the state root.
@ -4034,7 +4118,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
move || { move || {
chain.complete_partial_beacon_block( chain.complete_partial_beacon_block(
partial_beacon_block, partial_beacon_block,
execution_payload, block_contents,
kzg_commitments,
verification, verification,
) )
}, },
@ -4045,7 +4130,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.map_err(BlockProductionError::TokioJoin)? .map_err(BlockProductionError::TokioJoin)?
} }
fn produce_partial_beacon_block<Payload: ExecPayload<T::EthSpec>>( fn produce_partial_beacon_block<Payload: AbstractExecPayload<T::EthSpec> + 'static>(
self: &Arc<Self>, self: &Arc<Self>,
mut state: BeaconState<T::EthSpec>, mut state: BeaconState<T::EthSpec>,
state_root_opt: Option<Hash256>, state_root_opt: Option<Hash256>,
@ -4105,7 +4190,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// allows it to run concurrently with things like attestation packing. // allows it to run concurrently with things like attestation packing.
let prepare_payload_handle = match &state { let prepare_payload_handle = match &state {
BeaconState::Base(_) | BeaconState::Altair(_) => None, BeaconState::Base(_) | BeaconState::Altair(_) => None,
BeaconState::Merge(_) => { BeaconState::Merge(_) | BeaconState::Capella(_) | BeaconState::Eip4844(_) => {
let prepare_payload_handle = let prepare_payload_handle =
get_execution_payload(self.clone(), &state, proposer_index, builder_params)?; get_execution_payload(self.clone(), &state, proposer_index, builder_params)?;
Some(prepare_payload_handle) Some(prepare_payload_handle)
@ -4118,6 +4203,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?;
let deposits = eth1_chain.deposits_for_block_inclusion(&state, &eth1_data, &self.spec)?; let deposits = eth1_chain.deposits_for_block_inclusion(&state, &eth1_data, &self.spec)?;
let bls_to_execution_changes = self
.op_pool
.get_bls_to_execution_changes(&state, &self.spec);
// Iterate through the naive aggregation pool and ensure all the attestations from there // Iterate through the naive aggregation pool and ensure all the attestations from there
// are included in the operation pool. // are included in the operation pool.
let unagg_import_timer = let unagg_import_timer =
@ -4276,13 +4365,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
voluntary_exits, voluntary_exits,
sync_aggregate, sync_aggregate,
prepare_payload_handle, prepare_payload_handle,
bls_to_execution_changes,
}) })
} }
fn complete_partial_beacon_block<Payload: ExecPayload<T::EthSpec>>( fn complete_partial_beacon_block<Payload: AbstractExecPayload<T::EthSpec>>(
&self, &self,
partial_beacon_block: PartialBeaconBlock<T::EthSpec, Payload>, partial_beacon_block: PartialBeaconBlock<T::EthSpec, Payload>,
execution_payload: Option<Payload>, block_contents: Option<BlockProposalContents<T::EthSpec, Payload>>,
kzg_commitments: Vec<KzgCommitment>,
verification: ProduceBlockVerification, verification: ProduceBlockVerification,
) -> Result<BeaconBlockAndState<T::EthSpec, Payload>, BlockProductionError> { ) -> Result<BeaconBlockAndState<T::EthSpec, Payload>, BlockProductionError> {
let PartialBeaconBlock { let PartialBeaconBlock {
@ -4303,6 +4394,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// this function. We can assume that the handle has already been consumed in order to // this function. We can assume that the handle has already been consumed in order to
// produce said `execution_payload`. // produce said `execution_payload`.
prepare_payload_handle: _, prepare_payload_handle: _,
bls_to_execution_changes,
} = partial_beacon_block; } = partial_beacon_block;
let inner_block = match &state { let inner_block = match &state {
@ -4358,8 +4450,60 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
voluntary_exits: voluntary_exits.into(), voluntary_exits: voluntary_exits.into(),
sync_aggregate: sync_aggregate sync_aggregate: sync_aggregate
.ok_or(BlockProductionError::MissingSyncAggregate)?, .ok_or(BlockProductionError::MissingSyncAggregate)?,
execution_payload: execution_payload execution_payload: block_contents
.ok_or(BlockProductionError::MissingExecutionPayload)?, .ok_or(BlockProductionError::MissingExecutionPayload)?
.to_payload()
.try_into()
.map_err(|_| BlockProductionError::InvalidPayloadFork)?,
},
}),
BeaconState::Capella(_) => BeaconBlock::Capella(BeaconBlockCapella {
slot,
proposer_index,
parent_root,
state_root: Hash256::zero(),
body: BeaconBlockBodyCapella {
randao_reveal,
eth1_data,
graffiti,
proposer_slashings: proposer_slashings.into(),
attester_slashings: attester_slashings.into(),
attestations: attestations.into(),
deposits: deposits.into(),
voluntary_exits: voluntary_exits.into(),
sync_aggregate: sync_aggregate
.ok_or(BlockProductionError::MissingSyncAggregate)?,
execution_payload: block_contents
.ok_or(BlockProductionError::MissingExecutionPayload)?
.to_payload()
.try_into()
.map_err(|_| BlockProductionError::InvalidPayloadFork)?,
bls_to_execution_changes: bls_to_execution_changes.into(),
},
}),
BeaconState::Eip4844(_) => BeaconBlock::Eip4844(BeaconBlockEip4844 {
slot,
proposer_index,
parent_root,
state_root: Hash256::zero(),
body: BeaconBlockBodyEip4844 {
randao_reveal,
eth1_data,
graffiti,
proposer_slashings: proposer_slashings.into(),
attester_slashings: attester_slashings.into(),
attestations: attestations.into(),
deposits: deposits.into(),
voluntary_exits: voluntary_exits.into(),
sync_aggregate: sync_aggregate
.ok_or(BlockProductionError::MissingSyncAggregate)?,
execution_payload: block_contents
.ok_or(BlockProductionError::MissingExecutionPayload)?
.to_payload()
.try_into()
.map_err(|_| BlockProductionError::InvalidPayloadFork)?,
bls_to_execution_changes: bls_to_execution_changes.into(),
blob_kzg_commitments: VariableList::from(kzg_commitments),
}, },
}), }),
}; };
@ -4614,16 +4758,40 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
return Ok(()); return Ok(());
} }
let withdrawals = match self.spec.fork_name_at_slot::<T::EthSpec>(prepare_slot) {
ForkName::Base | ForkName::Altair | ForkName::Merge => None,
ForkName::Capella | ForkName::Eip4844 => {
// We must use the advanced state because balances can change at epoch boundaries
// and balances affect withdrawals.
// FIXME(mark)
// Might implement caching here in the future..
let prepare_state = self
.state_at_slot(prepare_slot, StateSkipConfig::WithoutStateRoots)
.map_err(|e| {
error!(self.log, "State advance for withdrawals failed"; "error" => ?e);
e
})?;
Some(get_expected_withdrawals(&prepare_state, &self.spec))
}
}
.transpose()
.map_err(|e| {
error!(self.log, "Error preparing beacon proposer"; "error" => ?e);
e
})
.map(|withdrawals_opt| withdrawals_opt.map(|w| w.into()))
.map_err(Error::PrepareProposerFailed)?;
let head_root = forkchoice_update_params.head_root; let head_root = forkchoice_update_params.head_root;
let payload_attributes = PayloadAttributes { let payload_attributes = PayloadAttributes::new(
timestamp: self self.slot_clock
.slot_clock
.start_of(prepare_slot) .start_of(prepare_slot)
.ok_or(Error::InvalidSlot(prepare_slot))? .ok_or(Error::InvalidSlot(prepare_slot))?
.as_secs(), .as_secs(),
prev_randao: pre_payload_attributes.prev_randao, pre_payload_attributes.prev_randao,
suggested_fee_recipient: execution_layer.get_suggested_fee_recipient(proposer).await, execution_layer.get_suggested_fee_recipient(proposer).await,
}; withdrawals,
);
debug!( debug!(
self.log, self.log,
@ -4772,7 +4940,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
{ {
// We are a proposer, check for terminal_pow_block_hash // We are a proposer, check for terminal_pow_block_hash
if let Some(terminal_pow_block_hash) = execution_layer if let Some(terminal_pow_block_hash) = execution_layer
.get_terminal_pow_block_hash(&self.spec, payload_attributes.timestamp) .get_terminal_pow_block_hash(&self.spec, payload_attributes.timestamp())
.await .await
.map_err(Error::ForkchoiceUpdate)? .map_err(Error::ForkchoiceUpdate)?
{ {
@ -4947,7 +5115,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`.
/// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic` or has /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic` or has
/// `ExecutionStatus::Invalid`. /// `ExecutionStatus::Invalid`.
pub fn is_optimistic_or_invalid_block<Payload: ExecPayload<T::EthSpec>>( pub fn is_optimistic_or_invalid_block<Payload: AbstractExecPayload<T::EthSpec>>(
&self, &self,
block: &SignedBeaconBlock<T::EthSpec, Payload>, block: &SignedBeaconBlock<T::EthSpec, Payload>,
) -> Result<bool, BeaconChainError> { ) -> Result<bool, BeaconChainError> {
@ -4973,7 +5141,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// ///
/// There is a potential race condition when syncing where the block_root of `head_block` could /// There is a potential race condition when syncing where the block_root of `head_block` could
/// be pruned from the fork choice store before being read. /// be pruned from the fork choice store before being read.
pub fn is_optimistic_or_invalid_head_block<Payload: ExecPayload<T::EthSpec>>( pub fn is_optimistic_or_invalid_head_block<Payload: AbstractExecPayload<T::EthSpec>>(
&self, &self,
head_block: &SignedBeaconBlock<T::EthSpec, Payload>, head_block: &SignedBeaconBlock<T::EthSpec, Payload>,
) -> Result<bool, BeaconChainError> { ) -> Result<bool, BeaconChainError> {

View File

@ -16,7 +16,7 @@ use std::sync::Arc;
use store::{Error as StoreError, HotColdDB, ItemStore}; use store::{Error as StoreError, HotColdDB, ItemStore};
use superstruct::superstruct; use superstruct::superstruct;
use types::{ use types::{
BeaconBlockRef, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, ExecPayload, AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec,
Hash256, Slot, Hash256, Slot,
}; };
@ -260,7 +260,7 @@ where
self.time = slot self.time = slot
} }
fn on_verified_block<Payload: ExecPayload<E>>( fn on_verified_block<Payload: AbstractExecPayload<E>>(
&mut self, &mut self,
_block: BeaconBlockRef<E, Payload>, _block: BeaconBlockRef<E, Payload>,
block_root: Hash256, block_root: Hash256,

View File

@ -1,20 +1,20 @@
use serde_derive::Serialize; use serde_derive::Serialize;
use std::sync::Arc; use std::sync::Arc;
use types::{ use types::{
beacon_state::CloneConfig, BeaconState, EthSpec, ExecPayload, FullPayload, Hash256, beacon_state::CloneConfig, AbstractExecPayload, BeaconState, EthSpec, FullPayload, Hash256,
SignedBeaconBlock, SignedBeaconBlock,
}; };
/// Represents some block and its associated state. Generally, this will be used for tracking the /// Represents some block and its associated state. Generally, this will be used for tracking the
/// head, justified head and finalized head. /// head, justified head and finalized head.
#[derive(Clone, Serialize, PartialEq, Debug)] #[derive(Clone, Serialize, PartialEq, Debug)]
pub struct BeaconSnapshot<E: EthSpec, Payload: ExecPayload<E> = FullPayload<E>> { pub struct BeaconSnapshot<E: EthSpec, Payload: AbstractExecPayload<E> = FullPayload<E>> {
pub beacon_block: Arc<SignedBeaconBlock<E, Payload>>, pub beacon_block: Arc<SignedBeaconBlock<E, Payload>>,
pub beacon_block_root: Hash256, pub beacon_block_root: Hash256,
pub beacon_state: BeaconState<E>, pub beacon_state: BeaconState<E>,
} }
impl<E: EthSpec, Payload: ExecPayload<E>> BeaconSnapshot<E, Payload> { impl<E: EthSpec, Payload: AbstractExecPayload<E>> BeaconSnapshot<E, Payload> {
/// Create a new checkpoint. /// Create a new checkpoint.
pub fn new( pub fn new(
beacon_block: Arc<SignedBeaconBlock<E, Payload>>, beacon_block: Arc<SignedBeaconBlock<E, Payload>>,

View File

@ -0,0 +1,136 @@
use derivative::Derivative;
use slot_clock::SlotClock;
use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY};
use crate::BeaconChainError;
use bls::PublicKey;
use types::{consts::eip4844::BLS_MODULUS, BeaconStateError, BlobsSidecar, Slot};
pub enum BlobError {
/// The blob sidecar is from a slot that is later than the current slot (with respect to the
/// gossip clock disparity).
///
/// ## Peer scoring
///
/// Assuming the local clock is correct, the peer has sent an invalid message.
FutureSlot {
message_slot: Slot,
latest_permissible_slot: Slot,
},
/// The blob sidecar is from a slot that is prior to the earliest permissible slot (with
/// respect to the gossip clock disparity).
///
/// ## Peer scoring
///
/// Assuming the local clock is correct, the peer has sent an invalid message.
PastSlot {
message_slot: Slot,
earliest_permissible_slot: Slot,
},
/// The blob sidecar contains an incorrectly formatted `BLSFieldElement` > `BLS_MODULUS`.
///
///
/// ## Peer scoring
///
/// The peer has sent an invalid message.
BlobOutOfRange { blob_index: usize },
/// The blob sidecar contains a KZGCommitment that is not a valid G1 point on
/// the bls curve.
///
/// ## Peer scoring
///
/// The peer has sent an invalid message.
InvalidKZGCommitment,
/// The proposal signature in invalid.
///
/// ## Peer scoring
///
/// The signature on the blob sidecar invalid and the peer is faulty.
ProposalSignatureInvalid,
/// A blob sidecar for this proposer and slot has already been observed.
///
/// ## Peer scoring
///
/// The `proposer` has already proposed a sidecar at this slot. The existing sidecar may or may not
/// be equal to the given sidecar.
RepeatSidecar { proposer: u64, slot: Slot },
/// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid.
///
/// ## Peer scoring
///
/// We were unable to process this sync committee message due to an internal error. It's unclear if the
/// sync committee message is valid.
BeaconChainError(BeaconChainError),
}
impl From<BeaconChainError> for BlobError {
fn from(e: BeaconChainError) -> Self {
BlobError::BeaconChainError(e)
}
}
impl From<BeaconStateError> for BlobError {
fn from(e: BeaconStateError) -> Self {
BlobError::BeaconChainError(BeaconChainError::BeaconStateError(e))
}
}
/// A wrapper around a `BlobsSidecar` that indicates it has been verified w.r.t the corresponding
/// `SignedBeaconBlock`.
#[derive(Derivative)]
#[derivative(Debug(bound = "T: BeaconChainTypes"))]
pub struct VerifiedBlobsSidecar<'a, T: BeaconChainTypes> {
pub blob_sidecar: &'a BlobsSidecar<T::EthSpec>,
}
impl<'a, T: BeaconChainTypes> VerifiedBlobsSidecar<'a, T> {
pub fn verify(
blob_sidecar: &'a BlobsSidecar<T::EthSpec>,
chain: &BeaconChain<T>,
) -> Result<Self, BlobError> {
let blob_slot = blob_sidecar.beacon_block_slot;
// Do not gossip or process blobs from future or past slots.
let latest_permissible_slot = chain
.slot_clock
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
.ok_or(BeaconChainError::UnableToReadSlot)?;
if blob_slot > latest_permissible_slot {
return Err(BlobError::FutureSlot {
message_slot: latest_permissible_slot,
latest_permissible_slot: blob_slot,
});
}
let earliest_permissible_slot = chain
.slot_clock
.now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
.ok_or(BeaconChainError::UnableToReadSlot)?;
if blob_slot > earliest_permissible_slot {
return Err(BlobError::PastSlot {
message_slot: earliest_permissible_slot,
earliest_permissible_slot: blob_slot,
});
}
// Verify that blobs are properly formatted
//TODO: add the check while constructing a Blob type from bytes instead of after
for (i, blob) in blob_sidecar.blobs.iter().enumerate() {
if blob.iter().any(|b| *b >= *BLS_MODULUS) {
return Err(BlobError::BlobOutOfRange { blob_index: i });
}
}
// Verify that the KZG proof is a valid G1 point
if PublicKey::deserialize(&blob_sidecar.kzg_aggregate_proof.0).is_err() {
return Err(BlobError::InvalidKZGCommitment);
}
// TODO: Check that we have not already received a sidecar with a valid signature for this slot.
Ok(Self { blob_sidecar })
}
}

View File

@ -5,10 +5,10 @@ use state_processing::{
common::get_attesting_indices_from_state, common::get_attesting_indices_from_state,
per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards, per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards,
}; };
use types::{BeaconBlockRef, BeaconState, EthSpec, ExecPayload, Hash256}; use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, EthSpec, Hash256};
impl<T: BeaconChainTypes> BeaconChain<T> { impl<T: BeaconChainTypes> BeaconChain<T> {
pub fn compute_block_reward<Payload: ExecPayload<T::EthSpec>>( pub fn compute_block_reward<Payload: AbstractExecPayload<T::EthSpec>>(
&self, &self,
block: BeaconBlockRef<'_, T::EthSpec, Payload>, block: BeaconBlockRef<'_, T::EthSpec, Payload>,
block_root: Hash256, block_root: Hash256,

View File

@ -88,6 +88,7 @@ use std::time::Duration;
use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp};
use task_executor::JoinHandle; use task_executor::JoinHandle;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::ExecPayload;
use types::{ use types::{
BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch, BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch,
EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes,
@ -1185,7 +1186,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
.message() .message()
.body() .body()
.execution_payload() .execution_payload()
.map(|full_payload| full_payload.execution_payload.block_hash); .map(|full_payload| full_payload.block_hash());
// Ensure the block is a candidate for optimistic import. // Ensure the block is a candidate for optimistic import.
if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await?
@ -1850,7 +1851,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
} }
/// Obtains a read-locked `ValidatorPubkeyCache` from the `chain`. /// Obtains a read-locked `ValidatorPubkeyCache` from the `chain`.
fn get_validator_pubkey_cache<T: BeaconChainTypes>( pub fn get_validator_pubkey_cache<T: BeaconChainTypes>(
chain: &BeaconChain<T>, chain: &BeaconChain<T>,
) -> Result<RwLockReadGuard<ValidatorPubkeyCache<T>>, BlockError<T::EthSpec>> { ) -> Result<RwLockReadGuard<ValidatorPubkeyCache<T>>, BlockError<T::EthSpec>> {
chain chain

View File

@ -800,6 +800,7 @@ where
observed_voluntary_exits: <_>::default(), observed_voluntary_exits: <_>::default(),
observed_proposer_slashings: <_>::default(), observed_proposer_slashings: <_>::default(),
observed_attester_slashings: <_>::default(), observed_attester_slashings: <_>::default(),
observed_bls_to_execution_changes: <_>::default(),
latest_seen_finality_update: <_>::default(), latest_seen_finality_update: <_>::default(),
latest_seen_optimistic_update: <_>::default(), latest_seen_optimistic_update: <_>::default(),
eth1_chain: self.eth1_chain, eth1_chain: self.eth1_chain,

View File

@ -930,8 +930,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.execution_status .execution_status
.is_optimistic_or_invalid(); .is_optimistic_or_invalid();
self.op_pool self.op_pool.prune_all(
.prune_all(&new_snapshot.beacon_state, self.epoch()?); &new_snapshot.beacon_block,
&new_snapshot.beacon_state,
self.epoch()?,
&self.spec,
);
self.observed_block_producers.write().prune( self.observed_block_producers.write().prune(
new_view new_view

View File

@ -0,0 +1,122 @@
//! Provides tools for checking if a node is ready for the Capella upgrade and following merge
//! transition.
use crate::{BeaconChain, BeaconChainTypes};
use execution_layer::http::{
ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V2,
};
use serde::{Deserialize, Serialize};
use std::fmt;
use std::time::Duration;
use types::*;
/// The time before the Capella fork when we will start issuing warnings about preparation.
use super::merge_readiness::SECONDS_IN_A_WEEK;
pub const CAPELLA_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2;
pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300;
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
#[serde(tag = "type")]
pub enum CapellaReadiness {
/// The execution engine is capella-enabled (as far as we can tell)
Ready,
/// We are connected to an execution engine which doesn't support the V2 engine api methods
V2MethodsNotSupported { error: String },
/// The transition configuration with the EL failed, there might be a problem with
/// connectivity, authentication or a difference in configuration.
ExchangeCapabilitiesFailed { error: String },
/// The user has not configured an execution endpoint
NoExecutionEndpoint,
}
impl fmt::Display for CapellaReadiness {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
CapellaReadiness::Ready => {
write!(f, "This node appears ready for Capella.")
}
CapellaReadiness::ExchangeCapabilitiesFailed { error } => write!(
f,
"Could not exchange capabilities with the \
execution endpoint: {}",
error
),
CapellaReadiness::NoExecutionEndpoint => write!(
f,
"The --execution-endpoint flag is not specified, this is a \
requirement post-merge"
),
CapellaReadiness::V2MethodsNotSupported { error } => write!(
f,
"Execution endpoint does not support Capella methods: {}",
error
),
}
}
}
impl<T: BeaconChainTypes> BeaconChain<T> {
/// Returns `true` if capella epoch is set and Capella fork has occurred or will
/// occur within `CAPELLA_READINESS_PREPARATION_SECONDS`
pub fn is_time_to_prepare_for_capella(&self, current_slot: Slot) -> bool {
if let Some(capella_epoch) = self.spec.capella_fork_epoch {
let capella_slot = capella_epoch.start_slot(T::EthSpec::slots_per_epoch());
let capella_readiness_preparation_slots =
CAPELLA_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot;
// Return `true` if Capella has happened or is within the preparation time.
current_slot + capella_readiness_preparation_slots > capella_slot
} else {
// The Capella fork epoch has not been defined yet, no need to prepare.
false
}
}
/// Attempts to connect to the EL and confirm that it is ready for capella.
pub async fn check_capella_readiness(&self) -> CapellaReadiness {
if let Some(el) = self.execution_layer.as_ref() {
match el
.get_engine_capabilities(Some(Duration::from_secs(
ENGINE_CAPABILITIES_REFRESH_INTERVAL,
)))
.await
{
Err(e) => {
// The EL was either unreachable or responded with an error
CapellaReadiness::ExchangeCapabilitiesFailed {
error: format!("{:?}", e),
}
}
Ok(capabilities) => {
let mut missing_methods = String::from("Required Methods Unsupported:");
let mut all_good = true;
if !capabilities.get_payload_v2 {
missing_methods.push(' ');
missing_methods.push_str(ENGINE_GET_PAYLOAD_V2);
all_good = false;
}
if !capabilities.forkchoice_updated_v2 {
missing_methods.push(' ');
missing_methods.push_str(ENGINE_FORKCHOICE_UPDATED_V2);
all_good = false;
}
if !capabilities.new_payload_v2 {
missing_methods.push(' ');
missing_methods.push_str(ENGINE_NEW_PAYLOAD_V2);
all_good = false;
}
if all_good {
CapellaReadiness::Ready
} else {
CapellaReadiness::V2MethodsNotSupported {
error: missing_methods,
}
}
}
}
} else {
CapellaReadiness::NoExecutionEndpoint
}
}
}

View File

@ -91,6 +91,7 @@ impl Default for ChainConfig {
count_unrealized_full: CountUnrealizedFull::default(), count_unrealized_full: CountUnrealizedFull::default(),
checkpoint_sync_url_timeout: 60, checkpoint_sync_url_timeout: 60,
prepare_payload_lookahead: Duration::from_secs(4), prepare_payload_lookahead: Duration::from_secs(4),
// This value isn't actually read except in tests.
optimistic_finalized_sync: true, optimistic_finalized_sync: true,
} }
} }

View File

@ -17,8 +17,9 @@ use ssz_types::Error as SszTypesError;
use state_processing::{ use state_processing::{
block_signature_verifier::Error as BlockSignatureVerifierError, block_signature_verifier::Error as BlockSignatureVerifierError,
per_block_processing::errors::{ per_block_processing::errors::{
AttestationValidationError, AttesterSlashingValidationError, ExitValidationError, AttestationValidationError, AttesterSlashingValidationError,
ProposerSlashingValidationError, SyncCommitteeMessageValidationError, BlsExecutionChangeValidationError, ExitValidationError, ProposerSlashingValidationError,
SyncCommitteeMessageValidationError,
}, },
signature_sets::Error as SignatureSetError, signature_sets::Error as SignatureSetError,
state_advance::Error as StateAdvanceError, state_advance::Error as StateAdvanceError,
@ -69,6 +70,7 @@ pub enum BeaconChainError {
ExitValidationError(ExitValidationError), ExitValidationError(ExitValidationError),
ProposerSlashingValidationError(ProposerSlashingValidationError), ProposerSlashingValidationError(ProposerSlashingValidationError),
AttesterSlashingValidationError(AttesterSlashingValidationError), AttesterSlashingValidationError(AttesterSlashingValidationError),
BlsExecutionChangeValidationError(BlsExecutionChangeValidationError),
StateSkipTooLarge { StateSkipTooLarge {
start_slot: Slot, start_slot: Slot,
requested_slot: Slot, requested_slot: Slot,
@ -150,7 +152,7 @@ pub enum BeaconChainError {
}, },
AddPayloadLogicError, AddPayloadLogicError,
ExecutionForkChoiceUpdateFailed(execution_layer::Error), ExecutionForkChoiceUpdateFailed(execution_layer::Error),
PrepareProposerBlockingFailed(execution_layer::Error), PrepareProposerFailed(BlockProcessingError),
ExecutionForkChoiceUpdateInvalid { ExecutionForkChoiceUpdateInvalid {
status: PayloadStatus, status: PayloadStatus,
}, },
@ -204,6 +206,9 @@ pub enum BeaconChainError {
MissingPersistedForkChoice, MissingPersistedForkChoice,
CommitteePromiseFailed(oneshot_broadcast::Error), CommitteePromiseFailed(oneshot_broadcast::Error),
MaxCommitteePromises(usize), MaxCommitteePromises(usize),
BlsToExecutionPriorToCapella,
BlsToExecutionConflictsWithPool,
InconsistentFork(InconsistentFork),
ProposerHeadForkChoiceError(fork_choice::Error<proto_array::Error>), ProposerHeadForkChoiceError(fork_choice::Error<proto_array::Error>),
} }
@ -213,6 +218,7 @@ easy_from_to!(SyncCommitteeMessageValidationError, BeaconChainError);
easy_from_to!(ExitValidationError, BeaconChainError); easy_from_to!(ExitValidationError, BeaconChainError);
easy_from_to!(ProposerSlashingValidationError, BeaconChainError); easy_from_to!(ProposerSlashingValidationError, BeaconChainError);
easy_from_to!(AttesterSlashingValidationError, BeaconChainError); easy_from_to!(AttesterSlashingValidationError, BeaconChainError);
easy_from_to!(BlsExecutionChangeValidationError, BeaconChainError);
easy_from_to!(SszTypesError, BeaconChainError); easy_from_to!(SszTypesError, BeaconChainError);
easy_from_to!(OpPoolError, BeaconChainError); easy_from_to!(OpPoolError, BeaconChainError);
easy_from_to!(NaiveAggregationError, BeaconChainError); easy_from_to!(NaiveAggregationError, BeaconChainError);
@ -227,6 +233,7 @@ easy_from_to!(ForkChoiceStoreError, BeaconChainError);
easy_from_to!(HistoricalBlockError, BeaconChainError); easy_from_to!(HistoricalBlockError, BeaconChainError);
easy_from_to!(StateAdvanceError, BeaconChainError); easy_from_to!(StateAdvanceError, BeaconChainError);
easy_from_to!(BlockReplayError, BeaconChainError); easy_from_to!(BlockReplayError, BeaconChainError);
easy_from_to!(InconsistentFork, BeaconChainError);
#[derive(Debug)] #[derive(Debug)]
pub enum BlockProductionError { pub enum BlockProductionError {
@ -251,6 +258,11 @@ pub enum BlockProductionError {
BlockingFailed(execution_layer::Error), BlockingFailed(execution_layer::Error),
TerminalPoWBlockLookupFailed(execution_layer::Error), TerminalPoWBlockLookupFailed(execution_layer::Error),
GetPayloadFailed(execution_layer::Error), GetPayloadFailed(execution_layer::Error),
GetBlobsFailed(execution_layer::Error),
BlobPayloadMismatch {
blob_block_hash: ExecutionBlockHash,
payload_block_hash: ExecutionBlockHash,
},
FailedToReadFinalizedBlock(store::Error), FailedToReadFinalizedBlock(store::Error),
MissingFinalizedBlock(Hash256), MissingFinalizedBlock(Hash256),
BlockTooLarge(usize), BlockTooLarge(usize),
@ -259,6 +271,7 @@ pub enum BlockProductionError {
MissingExecutionPayload, MissingExecutionPayload,
TokioJoin(tokio::task::JoinError), TokioJoin(tokio::task::JoinError),
BeaconChain(BeaconChainError), BeaconChain(BeaconChainError),
InvalidPayloadFork,
} }
easy_from_to!(BlockProcessingError, BlockProductionError); easy_from_to!(BlockProcessingError, BlockProductionError);

View File

@ -12,22 +12,23 @@ use crate::{
BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError,
ExecutionPayloadError, ExecutionPayloadError,
}; };
use execution_layer::{BuilderParams, PayloadStatus}; use execution_layer::{BlockProposalContents, BuilderParams, PayloadAttributes, PayloadStatus};
use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use fork_choice::{InvalidationOperation, PayloadVerificationStatus};
use proto_array::{Block as ProtoBlock, ExecutionStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus};
use slog::{debug, warn}; use slog::{debug, warn};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use state_processing::per_block_processing::{ use state_processing::per_block_processing::{
compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, compute_timestamp_at_slot, get_expected_withdrawals, is_execution_enabled,
partially_verify_execution_payload, is_merge_transition_complete, partially_verify_execution_payload,
}; };
use std::sync::Arc; use std::sync::Arc;
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::*; use types::*;
pub type PreparePayloadResult<Payload> = Result<Payload, BlockProductionError>; pub type PreparePayloadResult<E, Payload> =
pub type PreparePayloadHandle<Payload> = JoinHandle<Option<PreparePayloadResult<Payload>>>; Result<BlockProposalContents<E, Payload>, BlockProductionError>;
pub type PreparePayloadHandle<E, Payload> = JoinHandle<Option<PreparePayloadResult<E, Payload>>>;
#[derive(PartialEq)] #[derive(PartialEq)]
pub enum AllowOptimisticImport { pub enum AllowOptimisticImport {
@ -68,8 +69,13 @@ impl<T: BeaconChainTypes> PayloadNotifier<T> {
// where we do not send the block to the EL at all. // where we do not send the block to the EL at all.
let block_message = block.message(); let block_message = block.message();
let payload = block_message.execution_payload()?; let payload = block_message.execution_payload()?;
partially_verify_execution_payload(state, block.slot(), payload, &chain.spec) partially_verify_execution_payload::<_, FullPayload<_>>(
.map_err(BlockError::PerBlockProcessingError)?; state,
block.slot(),
payload,
&chain.spec,
)
.map_err(BlockError::PerBlockProcessingError)?;
match notify_execution_layer { match notify_execution_layer {
NotifyExecutionLayer::No if chain.config.optimistic_finalized_sync => { NotifyExecutionLayer::No if chain.config.optimistic_finalized_sync => {
@ -81,7 +87,7 @@ impl<T: BeaconChainTypes> PayloadNotifier<T> {
.ok_or(ExecutionPayloadError::NoExecutionConnection)?; .ok_or(ExecutionPayloadError::NoExecutionConnection)?;
if let Err(e) = if let Err(e) =
execution_layer.verify_payload_block_hash(&payload.execution_payload) execution_layer.verify_payload_block_hash(payload.execution_payload_ref())
{ {
warn!( warn!(
chain.log, chain.log,
@ -140,7 +146,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>(
.ok_or(ExecutionPayloadError::NoExecutionConnection)?; .ok_or(ExecutionPayloadError::NoExecutionConnection)?;
let new_payload_response = execution_layer let new_payload_response = execution_layer
.notify_new_payload(&execution_payload.execution_payload) .notify_new_payload(&execution_payload.into())
.await; .await;
match new_payload_response { match new_payload_response {
@ -158,7 +164,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>(
"Invalid execution payload"; "Invalid execution payload";
"validation_error" => ?validation_error, "validation_error" => ?validation_error,
"latest_valid_hash" => ?latest_valid_hash, "latest_valid_hash" => ?latest_valid_hash,
"execution_block_hash" => ?execution_payload.execution_payload.block_hash, "execution_block_hash" => ?execution_payload.block_hash(),
"root" => ?block.tree_hash_root(), "root" => ?block.tree_hash_root(),
"graffiti" => block.body().graffiti().as_utf8_lossy(), "graffiti" => block.body().graffiti().as_utf8_lossy(),
"proposer_index" => block.proposer_index(), "proposer_index" => block.proposer_index(),
@ -191,7 +197,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>(
chain.log, chain.log,
"Invalid execution payload block hash"; "Invalid execution payload block hash";
"validation_error" => ?validation_error, "validation_error" => ?validation_error,
"execution_block_hash" => ?execution_payload.execution_payload.block_hash, "execution_block_hash" => ?execution_payload.block_hash(),
"root" => ?block.tree_hash_root(), "root" => ?block.tree_hash_root(),
"graffiti" => block.body().graffiti().as_utf8_lossy(), "graffiti" => block.body().graffiti().as_utf8_lossy(),
"proposer_index" => block.proposer_index(), "proposer_index" => block.proposer_index(),
@ -344,7 +350,7 @@ pub fn validate_execution_payload_for_gossip<T: BeaconChainTypes>(
} }
}; };
if is_merge_transition_complete || execution_payload != &<_>::default() { if is_merge_transition_complete || !execution_payload.is_default_with_empty_roots() {
let expected_timestamp = chain let expected_timestamp = chain
.slot_clock .slot_clock
.start_of(block.slot()) .start_of(block.slot())
@ -382,13 +388,13 @@ pub fn validate_execution_payload_for_gossip<T: BeaconChainTypes>(
/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal
pub fn get_execution_payload< pub fn get_execution_payload<
T: BeaconChainTypes, T: BeaconChainTypes,
Payload: ExecPayload<T::EthSpec> + Default + Send + 'static, Payload: AbstractExecPayload<T::EthSpec> + 'static,
>( >(
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
state: &BeaconState<T::EthSpec>, state: &BeaconState<T::EthSpec>,
proposer_index: u64, proposer_index: u64,
builder_params: BuilderParams, builder_params: BuilderParams,
) -> Result<PreparePayloadHandle<Payload>, BlockProductionError> { ) -> Result<PreparePayloadHandle<T::EthSpec, Payload>, BlockProductionError> {
// Compute all required values from the `state` now to avoid needing to pass it into a spawned // Compute all required values from the `state` now to avoid needing to pass it into a spawned
// task. // task.
let spec = &chain.spec; let spec = &chain.spec;
@ -398,7 +404,15 @@ pub fn get_execution_payload<
compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?; compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?;
let random = *state.get_randao_mix(current_epoch)?; let random = *state.get_randao_mix(current_epoch)?;
let latest_execution_payload_header_block_hash = let latest_execution_payload_header_block_hash =
state.latest_execution_payload_header()?.block_hash; state.latest_execution_payload_header()?.block_hash();
let withdrawals = match state {
&BeaconState::Capella(_) | &BeaconState::Eip4844(_) => {
Some(get_expected_withdrawals(state, spec)?.into())
}
&BeaconState::Merge(_) => None,
// These shouldn't happen but they're here to make the pattern irrefutable
&BeaconState::Base(_) | &BeaconState::Altair(_) => None,
};
// Spawn a task to obtain the execution payload from the EL via a series of async calls. The // Spawn a task to obtain the execution payload from the EL via a series of async calls. The
// `join_handle` can be used to await the result of the function. // `join_handle` can be used to await the result of the function.
@ -415,6 +429,7 @@ pub fn get_execution_payload<
proposer_index, proposer_index,
latest_execution_payload_header_block_hash, latest_execution_payload_header_block_hash,
builder_params, builder_params,
withdrawals,
) )
.await .await
}, },
@ -448,13 +463,15 @@ pub async fn prepare_execution_payload<T, Payload>(
proposer_index: u64, proposer_index: u64,
latest_execution_payload_header_block_hash: ExecutionBlockHash, latest_execution_payload_header_block_hash: ExecutionBlockHash,
builder_params: BuilderParams, builder_params: BuilderParams,
) -> Result<Payload, BlockProductionError> withdrawals: Option<Vec<Withdrawal>>,
) -> Result<BlockProposalContents<T::EthSpec, Payload>, BlockProductionError>
where where
T: BeaconChainTypes, T: BeaconChainTypes,
Payload: ExecPayload<T::EthSpec> + Default, Payload: AbstractExecPayload<T::EthSpec>,
{ {
let current_epoch = builder_params.slot.epoch(T::EthSpec::slots_per_epoch()); let current_epoch = builder_params.slot.epoch(T::EthSpec::slots_per_epoch());
let spec = &chain.spec; let spec = &chain.spec;
let fork = spec.fork_name_at_slot::<T::EthSpec>(builder_params.slot);
let execution_layer = chain let execution_layer = chain
.execution_layer .execution_layer
.as_ref() .as_ref()
@ -468,7 +485,7 @@ where
if is_terminal_block_hash_set && !is_activation_epoch_reached { if is_terminal_block_hash_set && !is_activation_epoch_reached {
// Use the "empty" payload if there's a terminal block hash, but we haven't reached the // Use the "empty" payload if there's a terminal block hash, but we haven't reached the
// terminal block epoch yet. // terminal block epoch yet.
return Ok(<_>::default()); return BlockProposalContents::default_at_fork(fork).map_err(Into::into);
} }
let terminal_pow_block_hash = execution_layer let terminal_pow_block_hash = execution_layer
@ -481,7 +498,7 @@ where
} else { } else {
// If the merge transition hasn't occurred yet and the EL hasn't found the terminal // If the merge transition hasn't occurred yet and the EL hasn't found the terminal
// block, return an "empty" payload. // block, return an "empty" payload.
return Ok(<_>::default()); return BlockProposalContents::default_at_fork(fork).map_err(Into::into);
} }
} else { } else {
latest_execution_payload_header_block_hash latest_execution_payload_header_block_hash
@ -505,21 +522,26 @@ where
.await .await
.map_err(BlockProductionError::BeaconChain)?; .map_err(BlockProductionError::BeaconChain)?;
let suggested_fee_recipient = execution_layer
.get_suggested_fee_recipient(proposer_index)
.await;
let payload_attributes =
PayloadAttributes::new(timestamp, random, suggested_fee_recipient, withdrawals);
// Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter.
// //
// This future is not executed here, it's up to the caller to await it. // This future is not executed here, it's up to the caller to await it.
let execution_payload = execution_layer let block_contents = execution_layer
.get_payload::<Payload>( .get_payload::<Payload>(
parent_hash, parent_hash,
timestamp, &payload_attributes,
random,
proposer_index,
forkchoice_update_params, forkchoice_update_params,
builder_params, builder_params,
fork,
&chain.spec, &chain.spec,
) )
.await .await
.map_err(BlockProductionError::GetPayloadFailed)?; .map_err(BlockProductionError::GetPayloadFailed)?;
Ok(execution_payload) Ok(block_contents)
} }

View File

@ -7,11 +7,13 @@ mod beacon_chain;
mod beacon_fork_choice_store; mod beacon_fork_choice_store;
pub mod beacon_proposer_cache; pub mod beacon_proposer_cache;
mod beacon_snapshot; mod beacon_snapshot;
pub mod blob_verification;
pub mod block_reward; pub mod block_reward;
mod block_times_cache; mod block_times_cache;
mod block_verification; mod block_verification;
pub mod builder; pub mod builder;
pub mod canonical_head; pub mod canonical_head;
pub mod capella_readiness;
pub mod chain_config; pub mod chain_config;
mod early_attester_cache; mod early_attester_cache;
mod errors; mod errors;

View File

@ -8,7 +8,7 @@ use std::fmt::Write;
use types::*; use types::*;
/// The time before the Bellatrix fork when we will start issuing warnings about preparation. /// The time before the Bellatrix fork when we will start issuing warnings about preparation.
const SECONDS_IN_A_WEEK: u64 = 604800; pub const SECONDS_IN_A_WEEK: u64 = 604800;
pub const MERGE_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; pub const MERGE_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2;
#[derive(Default, Debug, Serialize, Deserialize)] #[derive(Default, Debug, Serialize, Deserialize)]

View File

@ -972,6 +972,22 @@ lazy_static! {
"beacon_pre_finalization_block_lookup_count", "beacon_pre_finalization_block_lookup_count",
"Number of block roots subject to single block lookups" "Number of block roots subject to single block lookups"
); );
/*
* Blob sidecar Verification
*/
pub static ref BLOBS_SIDECAR_PROCESSING_REQUESTS: Result<IntCounter> = try_create_int_counter(
"beacon_blobs_sidecar_processing_requests_total",
"Count of all blob sidecars submitted for processing"
);
pub static ref BLOBS_SIDECAR_PROCESSING_SUCCESSES: Result<IntCounter> = try_create_int_counter(
"beacon_blobs_sidecar_processing_successes_total",
"Number of blob sidecars verified for gossip"
);
pub static ref BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES: Result<Histogram> = try_create_histogram(
"beacon_blobs_sidecar_gossip_verification_seconds",
"Full runtime of blob sidecars gossip verification"
);
} }
// Fifth lazy-static block is used to account for macro recursion limit. // Fifth lazy-static block is used to account for macro recursion limit.

View File

@ -1,12 +1,12 @@
use derivative::Derivative; use derivative::Derivative;
use smallvec::SmallVec; use smallvec::{smallvec, SmallVec};
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
use state_processing::{SigVerifiedOp, VerifyOperation}; use state_processing::{SigVerifiedOp, VerifyOperation};
use std::collections::HashSet; use std::collections::HashSet;
use std::marker::PhantomData; use std::marker::PhantomData;
use types::{ use types::{
AttesterSlashing, BeaconState, ChainSpec, EthSpec, ForkName, ProposerSlashing, AttesterSlashing, BeaconState, ChainSpec, EthSpec, ForkName, ProposerSlashing,
SignedVoluntaryExit, Slot, SignedBlsToExecutionChange, SignedVoluntaryExit, Slot,
}; };
/// Number of validator indices to store on the stack in `observed_validators`. /// Number of validator indices to store on the stack in `observed_validators`.
@ -39,7 +39,7 @@ pub enum ObservationOutcome<T: Encode + Decode, E: EthSpec> {
AlreadyKnown, AlreadyKnown,
} }
/// Trait for exits and slashings which can be observed using `ObservedOperations`. /// Trait for operations which can be observed using `ObservedOperations`.
pub trait ObservableOperation<E: EthSpec>: VerifyOperation<E> + Sized { pub trait ObservableOperation<E: EthSpec>: VerifyOperation<E> + Sized {
/// The set of validator indices involved in this operation. /// The set of validator indices involved in this operation.
/// ///
@ -49,13 +49,13 @@ pub trait ObservableOperation<E: EthSpec>: VerifyOperation<E> + Sized {
impl<E: EthSpec> ObservableOperation<E> for SignedVoluntaryExit { impl<E: EthSpec> ObservableOperation<E> for SignedVoluntaryExit {
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> { fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
std::iter::once(self.message.validator_index).collect() smallvec![self.message.validator_index]
} }
} }
impl<E: EthSpec> ObservableOperation<E> for ProposerSlashing { impl<E: EthSpec> ObservableOperation<E> for ProposerSlashing {
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> { fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
std::iter::once(self.signed_header_1.message.proposer_index).collect() smallvec![self.signed_header_1.message.proposer_index]
} }
} }
@ -80,6 +80,12 @@ impl<E: EthSpec> ObservableOperation<E> for AttesterSlashing<E> {
} }
} }
impl<E: EthSpec> ObservableOperation<E> for SignedBlsToExecutionChange {
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
smallvec![self.message.validator_index]
}
}
impl<T: ObservableOperation<E>, E: EthSpec> ObservedOperations<T, E> { impl<T: ObservableOperation<E>, E: EthSpec> ObservedOperations<T, E> {
pub fn verify_and_observe( pub fn verify_and_observe(
&mut self, &mut self,

View File

@ -1,6 +1,8 @@
//! Utilities for managing database schema changes. //! Utilities for managing database schema changes.
mod migration_schema_v12; mod migration_schema_v12;
mod migration_schema_v13; mod migration_schema_v13;
mod migration_schema_v14;
mod migration_schema_v15;
use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY};
use crate::eth1_chain::SszEth1; use crate::eth1_chain::SszEth1;
@ -114,6 +116,22 @@ pub fn migrate_schema<T: BeaconChainTypes>(
Ok(()) Ok(())
} }
(SchemaVersion(13), SchemaVersion(14)) => {
let ops = migration_schema_v14::upgrade_to_v14::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(14), SchemaVersion(13)) => {
let ops = migration_schema_v14::downgrade_from_v14::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(14), SchemaVersion(15)) => {
let ops = migration_schema_v15::upgrade_to_v15::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(15), SchemaVersion(14)) => {
let ops = migration_schema_v15::downgrade_from_v15::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
// Anything else is an error. // Anything else is an error.
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
target_version: to, target_version: to,

View File

@ -168,16 +168,14 @@ pub fn downgrade_from_v12<T: BeaconChainTypes>(
log: Logger, log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> { ) -> Result<Vec<KeyValueStoreOp>, Error> {
// Load a V12 op pool and transform it to V5. // Load a V12 op pool and transform it to V5.
let PersistedOperationPoolV12 { let PersistedOperationPoolV12::<T::EthSpec> {
attestations, attestations,
sync_contributions, sync_contributions,
attester_slashings, attester_slashings,
proposer_slashings, proposer_slashings,
voluntary_exits, voluntary_exits,
} = if let Some(PersistedOperationPool::<T::EthSpec>::V12(op_pool)) = } = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? {
db.get_item(&OP_POOL_DB_KEY)? op_pool_v12
{
op_pool
} else { } else {
debug!(log, "Nothing to do, no operation pool stored"); debug!(log, "Nothing to do, no operation pool stored");
return Ok(vec![]); return Ok(vec![]);

View File

@ -0,0 +1,125 @@
use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY};
use operation_pool::{
PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14,
};
use slog::{debug, error, info, Logger};
use slot_clock::SlotClock;
use std::sync::Arc;
use std::time::Duration;
use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem};
use types::{EthSpec, Hash256, Slot};
/// The slot clock isn't usually available before the database is initialized, so we construct a
/// temporary slot clock by reading the genesis state. It should always exist if the database is
/// initialized at a prior schema version, however we still handle the lack of genesis state
/// gracefully.
fn get_slot_clock<T: BeaconChainTypes>(
db: &HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>,
log: &Logger,
) -> Result<Option<T::SlotClock>, Error> {
let spec = db.get_chain_spec();
let genesis_block = if let Some(block) = db.get_blinded_block(&Hash256::zero())? {
block
} else {
error!(log, "Missing genesis block");
return Ok(None);
};
let genesis_state =
if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? {
state
} else {
error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root());
return Ok(None);
};
Ok(Some(T::SlotClock::new(
spec.genesis_slot,
Duration::from_secs(genesis_state.genesis_time()),
Duration::from_secs(spec.seconds_per_slot),
)))
}
pub fn upgrade_to_v14<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
// Load a V12 op pool and transform it to V14.
let PersistedOperationPoolV12::<T::EthSpec> {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
} = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? {
op_pool_v12
} else {
debug!(log, "Nothing to do, no operation pool stored");
return Ok(vec![]);
};
// initialize with empty vector
let bls_to_execution_changes = vec![];
let v14 = PersistedOperationPool::V14(PersistedOperationPoolV14 {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
bls_to_execution_changes,
});
Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)])
}
pub fn downgrade_from_v14<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
// We cannot downgrade from V14 once the Capella fork has been reached because there will
// be HistoricalSummaries stored in the database instead of HistoricalRoots and prior versions
// of Lighthouse can't handle that.
if let Some(capella_fork_epoch) = db.get_chain_spec().capella_fork_epoch {
let current_epoch = get_slot_clock::<T>(&db, &log)?
.and_then(|clock| clock.now())
.map(|slot| slot.epoch(T::EthSpec::slots_per_epoch()))
.ok_or(Error::SlotClockUnavailableForMigration)?;
if current_epoch >= capella_fork_epoch {
error!(
log,
"Capella already active: v14+ is mandatory";
"current_epoch" => current_epoch,
"capella_fork_epoch" => capella_fork_epoch,
);
return Err(Error::UnableToDowngrade);
}
}
// Load a V14 op pool and transform it to V12.
let PersistedOperationPoolV14::<T::EthSpec> {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
bls_to_execution_changes,
} = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? {
op_pool
} else {
debug!(log, "Nothing to do, no operation pool stored");
return Ok(vec![]);
};
info!(
log,
"Dropping bls_to_execution_changes from pool";
"count" => bls_to_execution_changes.len(),
);
let v12 = PersistedOperationPoolV12 {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
};
Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)])
}

View File

@ -0,0 +1,76 @@
use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY};
use operation_pool::{
PersistedOperationPool, PersistedOperationPoolV14, PersistedOperationPoolV15,
};
use slog::{debug, info, Logger};
use std::sync::Arc;
use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem};
pub fn upgrade_to_v15<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
// Load a V14 op pool and transform it to V15.
let PersistedOperationPoolV14::<T::EthSpec> {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
bls_to_execution_changes,
} = if let Some(op_pool_v14) = db.get_item(&OP_POOL_DB_KEY)? {
op_pool_v14
} else {
debug!(log, "Nothing to do, no operation pool stored");
return Ok(vec![]);
};
let v15 = PersistedOperationPool::V15(PersistedOperationPoolV15 {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
bls_to_execution_changes,
// Initialize with empty set
capella_bls_change_broadcast_indices: <_>::default(),
});
Ok(vec![v15.as_kv_store_op(OP_POOL_DB_KEY)])
}
pub fn downgrade_from_v15<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
// Load a V15 op pool and transform it to V14.
let PersistedOperationPoolV15::<T::EthSpec> {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
bls_to_execution_changes,
capella_bls_change_broadcast_indices,
} = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? {
op_pool
} else {
debug!(log, "Nothing to do, no operation pool stored");
return Ok(vec![]);
};
info!(
log,
"Forgetting address changes for Capella broadcast";
"count" => capella_bls_change_broadcast_indices.len(),
);
let v14 = PersistedOperationPoolV14 {
attestations,
sync_contributions,
attester_slashings,
proposer_slashings,
voluntary_exits,
bls_to_execution_changes,
};
Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)])
}

View File

@ -6,10 +6,10 @@ use slog::error;
use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards;
use std::collections::HashMap; use std::collections::HashMap;
use store::RelativeEpoch; use store::RelativeEpoch;
use types::{BeaconBlockRef, BeaconState, ExecPayload}; use types::{AbstractExecPayload, BeaconBlockRef, BeaconState};
impl<T: BeaconChainTypes> BeaconChain<T> { impl<T: BeaconChainTypes> BeaconChain<T> {
pub fn compute_sync_committee_rewards<Payload: ExecPayload<T::EthSpec>>( pub fn compute_sync_committee_rewards<Payload: AbstractExecPayload<T::EthSpec>>(
&self, &self,
block: BeaconBlockRef<'_, T::EthSpec, Payload>, block: BeaconBlockRef<'_, T::EthSpec, Payload>,
state: &mut BeaconState<T::EthSpec>, state: &mut BeaconState<T::EthSpec>,

View File

@ -13,17 +13,17 @@ use crate::{
StateSkipConfig, StateSkipConfig,
}; };
use bls::get_withdrawal_credentials; use bls::get_withdrawal_credentials;
use execution_layer::test_utils::DEFAULT_JWT_SECRET;
use execution_layer::{ use execution_layer::{
auth::JwtKey, auth::JwtKey,
test_utils::{ test_utils::{
ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_TERMINAL_BLOCK, ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_JWT_SECRET,
DEFAULT_TERMINAL_BLOCK,
}, },
ExecutionLayer, ExecutionLayer,
}; };
use fork_choice::CountUnrealized; use fork_choice::CountUnrealized;
use futures::channel::mpsc::Receiver; use futures::channel::mpsc::Receiver;
pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH};
use int_to_bytes::int_to_bytes32; use int_to_bytes::int_to_bytes32;
use merkle_proof::MerkleTree; use merkle_proof::MerkleTree;
use parking_lot::Mutex; use parking_lot::Mutex;
@ -149,6 +149,7 @@ pub struct Builder<T: BeaconChainTypes> {
eth_spec_instance: T::EthSpec, eth_spec_instance: T::EthSpec,
spec: Option<ChainSpec>, spec: Option<ChainSpec>,
validator_keypairs: Option<Vec<Keypair>>, validator_keypairs: Option<Vec<Keypair>>,
withdrawal_keypairs: Vec<Option<Keypair>>,
chain_config: Option<ChainConfig>, chain_config: Option<ChainConfig>,
store_config: Option<StoreConfig>, store_config: Option<StoreConfig>,
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
@ -180,7 +181,7 @@ impl<E: EthSpec> Builder<EphemeralHarnessType<E>> {
.unwrap(), .unwrap(),
); );
let mutator = move |builder: BeaconChainBuilder<_>| { let mutator = move |builder: BeaconChainBuilder<_>| {
let genesis_state = interop_genesis_state::<E>( let genesis_state = interop_genesis_state_with_eth1::<E>(
&validator_keypairs, &validator_keypairs,
HARNESS_GENESIS_TIME, HARNESS_GENESIS_TIME,
Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH),
@ -241,7 +242,7 @@ impl<E: EthSpec> Builder<DiskHarnessType<E>> {
.expect("cannot build without validator keypairs"); .expect("cannot build without validator keypairs");
let mutator = move |builder: BeaconChainBuilder<_>| { let mutator = move |builder: BeaconChainBuilder<_>| {
let genesis_state = interop_genesis_state::<E>( let genesis_state = interop_genesis_state_with_eth1::<E>(
&validator_keypairs, &validator_keypairs,
HARNESS_GENESIS_TIME, HARNESS_GENESIS_TIME,
Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH),
@ -283,6 +284,7 @@ where
eth_spec_instance, eth_spec_instance,
spec: None, spec: None,
validator_keypairs: None, validator_keypairs: None,
withdrawal_keypairs: vec![],
chain_config: None, chain_config: None,
store_config: None, store_config: None,
store: None, store: None,
@ -308,6 +310,26 @@ where
self self
} }
/// Initializes the BLS withdrawal keypairs for `num_keypairs` validators to
/// the "determistic" values, regardless of wether or not the validator has
/// a BLS or execution address in the genesis deposits.
///
/// This aligns with the withdrawal commitments used in the "interop"
/// genesis states.
pub fn deterministic_withdrawal_keypairs(self, num_keypairs: usize) -> Self {
self.withdrawal_keypairs(
types::test_utils::generate_deterministic_keypairs(num_keypairs)
.into_iter()
.map(Option::Some)
.collect(),
)
}
pub fn withdrawal_keypairs(mut self, withdrawal_keypairs: Vec<Option<Keypair>>) -> Self {
self.withdrawal_keypairs = withdrawal_keypairs;
self
}
pub fn default_spec(self) -> Self { pub fn default_spec(self) -> Self {
self.spec_or_default(None) self.spec_or_default(None)
} }
@ -385,15 +407,43 @@ where
self self
} }
pub fn recalculate_fork_times_with_genesis(mut self, genesis_time: u64) -> Self {
let mock = self
.mock_execution_layer
.as_mut()
.expect("must have mock execution layer to recalculate fork times");
let spec = self
.spec
.clone()
.expect("cannot recalculate fork times without spec");
mock.server.execution_block_generator().shanghai_time =
spec.capella_fork_epoch.map(|epoch| {
genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
});
mock.server.execution_block_generator().eip4844_time =
spec.eip4844_fork_epoch.map(|epoch| {
genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
});
self
}
pub fn mock_execution_layer(mut self) -> Self { pub fn mock_execution_layer(mut self) -> Self {
let spec = self.spec.clone().expect("cannot build without spec"); let spec = self.spec.clone().expect("cannot build without spec");
let shanghai_time = spec.capella_fork_epoch.map(|epoch| {
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
});
let eip4844_time = spec.eip4844_fork_epoch.map(|epoch| {
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
});
let mock = MockExecutionLayer::new( let mock = MockExecutionLayer::new(
self.runtime.task_executor.clone(), self.runtime.task_executor.clone(),
spec.terminal_total_difficulty,
DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_BLOCK,
spec.terminal_block_hash, shanghai_time,
spec.terminal_block_hash_activation_epoch, eip4844_time,
None,
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
spec,
None, None,
); );
self.execution_layer = Some(mock.el.clone()); self.execution_layer = Some(mock.el.clone());
@ -401,19 +451,30 @@ where
self self
} }
pub fn mock_execution_layer_with_builder(mut self, beacon_url: SensitiveUrl) -> Self { pub fn mock_execution_layer_with_builder(
mut self,
beacon_url: SensitiveUrl,
builder_threshold: Option<u128>,
) -> Self {
// Get a random unused port // Get a random unused port
let port = unused_port::unused_tcp_port().unwrap(); let port = unused_port::unused_tcp_port().unwrap();
let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap();
let spec = self.spec.clone().expect("cannot build without spec"); let spec = self.spec.clone().expect("cannot build without spec");
let shanghai_time = spec.capella_fork_epoch.map(|epoch| {
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
});
let eip4844_time = spec.eip4844_fork_epoch.map(|epoch| {
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
});
let mock_el = MockExecutionLayer::new( let mock_el = MockExecutionLayer::new(
self.runtime.task_executor.clone(), self.runtime.task_executor.clone(),
spec.terminal_total_difficulty,
DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_BLOCK,
spec.terminal_block_hash, shanghai_time,
spec.terminal_block_hash_activation_epoch, eip4844_time,
builder_threshold,
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
spec.clone(),
Some(builder_url.clone()), Some(builder_url.clone()),
) )
.move_to_terminal_block(); .move_to_terminal_block();
@ -505,6 +566,7 @@ where
spec: chain.spec.clone(), spec: chain.spec.clone(),
chain: Arc::new(chain), chain: Arc::new(chain),
validator_keypairs, validator_keypairs,
withdrawal_keypairs: self.withdrawal_keypairs,
shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)),
runtime: self.runtime, runtime: self.runtime,
mock_execution_layer: self.mock_execution_layer, mock_execution_layer: self.mock_execution_layer,
@ -520,6 +582,12 @@ where
/// Used for testing. /// Used for testing.
pub struct BeaconChainHarness<T: BeaconChainTypes> { pub struct BeaconChainHarness<T: BeaconChainTypes> {
pub validator_keypairs: Vec<Keypair>, pub validator_keypairs: Vec<Keypair>,
/// Optional BLS withdrawal keys for each validator.
///
/// If a validator index is missing from this vec or their entry is `None` then either
/// no BLS withdrawal key was set for them (they had an address from genesis) or the test
/// initializer neglected to set this field.
pub withdrawal_keypairs: Vec<Option<Keypair>>,
pub chain: Arc<BeaconChain<T>>, pub chain: Arc<BeaconChain<T>>,
pub spec: ChainSpec, pub spec: ChainSpec,
@ -1431,6 +1499,44 @@ where
.sign(sk, &fork, genesis_validators_root, &self.chain.spec) .sign(sk, &fork, genesis_validators_root, &self.chain.spec)
} }
pub fn make_bls_to_execution_change(
&self,
validator_index: u64,
address: Address,
) -> SignedBlsToExecutionChange {
let keypair = self.get_withdrawal_keypair(validator_index);
self.make_bls_to_execution_change_with_keys(
validator_index,
address,
&keypair.pk,
&keypair.sk,
)
}
pub fn make_bls_to_execution_change_with_keys(
&self,
validator_index: u64,
address: Address,
pubkey: &PublicKey,
secret_key: &SecretKey,
) -> SignedBlsToExecutionChange {
let genesis_validators_root = self.chain.genesis_validators_root;
BlsToExecutionChange {
validator_index,
from_bls_pubkey: pubkey.compress(),
to_execution_address: address,
}
.sign(secret_key, genesis_validators_root, &self.chain.spec)
}
pub fn get_withdrawal_keypair(&self, validator_index: u64) -> &Keypair {
self.withdrawal_keypairs
.get(validator_index as usize)
.expect("BLS withdrawal key missing from harness")
.as_ref()
.expect("no withdrawal key for validator")
}
pub fn add_voluntary_exit( pub fn add_voluntary_exit(
&self, &self,
block: &mut BeaconBlock<E>, block: &mut BeaconBlock<E>,

View File

@ -0,0 +1,167 @@
#![cfg(not(debug_assertions))] // Tests run too slow in debug.
use beacon_chain::test_utils::BeaconChainHarness;
use execution_layer::test_utils::Block;
use types::*;
const VALIDATOR_COUNT: usize = 32;
type E = MainnetEthSpec;
fn verify_execution_payload_chain<T: EthSpec>(chain: &[FullPayload<T>]) {
let mut prev_ep: Option<FullPayload<T>> = None;
for ep in chain {
assert!(!ep.is_default_with_empty_roots());
assert!(ep.block_hash() != ExecutionBlockHash::zero());
// Check against previous `ExecutionPayload`.
if let Some(prev_ep) = prev_ep {
assert_eq!(prev_ep.block_hash(), ep.parent_hash());
assert_eq!(prev_ep.block_number() + 1, ep.block_number());
assert!(ep.timestamp() > prev_ep.timestamp());
}
prev_ep = Some(ep.clone());
}
}
#[tokio::test]
async fn base_altair_merge_capella() {
let altair_fork_epoch = Epoch::new(4);
let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch());
let bellatrix_fork_epoch = Epoch::new(8);
let merge_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch());
let capella_fork_epoch = Epoch::new(12);
let capella_fork_slot = capella_fork_epoch.start_slot(E::slots_per_epoch());
let mut spec = E::default_spec();
spec.altair_fork_epoch = Some(altair_fork_epoch);
spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch);
spec.capella_fork_epoch = Some(capella_fork_epoch);
let harness = BeaconChainHarness::builder(E::default())
.spec(spec)
.logger(logging::test_logger())
.deterministic_keypairs(VALIDATOR_COUNT)
.fresh_ephemeral_store()
.mock_execution_layer()
.build();
/*
* Start with the base fork.
*/
assert!(harness.chain.head_snapshot().beacon_block.as_base().is_ok());
/*
* Do the Altair fork.
*/
harness.extend_to_slot(altair_fork_slot).await;
let altair_head = &harness.chain.head_snapshot().beacon_block;
assert!(altair_head.as_altair().is_ok());
assert_eq!(altair_head.slot(), altair_fork_slot);
/*
* Do the merge fork, without a terminal PoW block.
*/
harness.extend_to_slot(merge_fork_slot).await;
let merge_head = &harness.chain.head_snapshot().beacon_block;
assert!(merge_head.as_merge().is_ok());
assert_eq!(merge_head.slot(), merge_fork_slot);
assert!(
merge_head
.message()
.body()
.execution_payload()
.unwrap()
.is_default_with_empty_roots(),
"Merge head is default payload"
);
/*
* Next merge block shouldn't include an exec payload.
*/
harness.extend_slots(1).await;
let one_after_merge_head = &harness.chain.head_snapshot().beacon_block;
assert!(
one_after_merge_head
.message()
.body()
.execution_payload()
.unwrap()
.is_default_with_empty_roots(),
"One after merge head is default payload"
);
assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1);
/*
* Trigger the terminal PoW block.
*/
harness
.execution_block_generator()
.move_to_terminal_block()
.unwrap();
// Add a slot duration to get to the next slot
let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot;
harness
.execution_block_generator()
.modify_last_block(|block| {
if let Block::PoW(terminal_block) = block {
terminal_block.timestamp = timestamp;
}
});
harness.extend_slots(1).await;
let two_after_merge_head = &harness.chain.head_snapshot().beacon_block;
assert!(
two_after_merge_head
.message()
.body()
.execution_payload()
.unwrap()
.is_default_with_empty_roots(),
"Two after merge head is default payload"
);
assert_eq!(two_after_merge_head.slot(), merge_fork_slot + 2);
/*
* Next merge block should include an exec payload.
*/
let mut execution_payloads = vec![];
for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() {
harness.extend_slots(1).await;
let block = &harness.chain.head_snapshot().beacon_block;
let full_payload: FullPayload<E> = block
.message()
.body()
.execution_payload()
.unwrap()
.clone()
.into();
// pre-capella shouldn't have withdrawals
assert!(full_payload.withdrawals_root().is_err());
execution_payloads.push(full_payload);
}
/*
* Should enter capella fork now.
*/
for _ in 0..16 {
harness.extend_slots(1).await;
let block = &harness.chain.head_snapshot().beacon_block;
let full_payload: FullPayload<E> = block
.message()
.body()
.execution_payload()
.unwrap()
.clone()
.into();
// post-capella should have withdrawals
assert!(full_payload.withdrawals_root().is_ok());
execution_payloads.push(full_payload);
}
verify_execution_payload_chain(execution_payloads.as_slice());
}

View File

@ -1,6 +1,7 @@
mod attestation_production; mod attestation_production;
mod attestation_verification; mod attestation_verification;
mod block_verification; mod block_verification;
mod capella;
mod merge; mod merge;
mod op_verification; mod op_verification;
mod payload_invalidation; mod payload_invalidation;

View File

@ -12,17 +12,14 @@ fn verify_execution_payload_chain<T: EthSpec>(chain: &[FullPayload<T>]) {
let mut prev_ep: Option<FullPayload<T>> = None; let mut prev_ep: Option<FullPayload<T>> = None;
for ep in chain { for ep in chain {
assert!(*ep != FullPayload::default()); assert!(!ep.is_default_with_empty_roots());
assert!(ep.block_hash() != ExecutionBlockHash::zero()); assert!(ep.block_hash() != ExecutionBlockHash::zero());
// Check against previous `ExecutionPayload`. // Check against previous `ExecutionPayload`.
if let Some(prev_ep) = prev_ep { if let Some(prev_ep) = prev_ep {
assert_eq!(prev_ep.block_hash(), ep.execution_payload.parent_hash); assert_eq!(prev_ep.block_hash(), ep.parent_hash());
assert_eq!( assert_eq!(prev_ep.block_number() + 1, ep.block_number());
prev_ep.execution_payload.block_number + 1, assert!(ep.timestamp() > prev_ep.timestamp());
ep.execution_payload.block_number
);
assert!(ep.execution_payload.timestamp > prev_ep.execution_payload.timestamp);
} }
prev_ep = Some(ep.clone()); prev_ep = Some(ep.clone());
} }
@ -89,7 +86,7 @@ async fn merge_with_terminal_block_hash_override() {
if i == 0 { if i == 0 {
assert_eq!(execution_payload.block_hash(), genesis_pow_block_hash); assert_eq!(execution_payload.block_hash(), genesis_pow_block_hash);
} }
execution_payloads.push(execution_payload); execution_payloads.push(execution_payload.into());
} }
verify_execution_payload_chain(execution_payloads.as_slice()); verify_execution_payload_chain(execution_payloads.as_slice());
@ -141,9 +138,14 @@ async fn base_altair_merge_with_terminal_block_after_fork() {
let merge_head = &harness.chain.head_snapshot().beacon_block; let merge_head = &harness.chain.head_snapshot().beacon_block;
assert!(merge_head.as_merge().is_ok()); assert!(merge_head.as_merge().is_ok());
assert_eq!(merge_head.slot(), merge_fork_slot); assert_eq!(merge_head.slot(), merge_fork_slot);
assert_eq!( assert!(
*merge_head.message().body().execution_payload().unwrap(), merge_head
FullPayload::default() .message()
.body()
.execution_payload()
.unwrap()
.is_default_with_empty_roots(),
"Merge head is default payload"
); );
/* /*
@ -153,13 +155,14 @@ async fn base_altair_merge_with_terminal_block_after_fork() {
harness.extend_slots(1).await; harness.extend_slots(1).await;
let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; let one_after_merge_head = &harness.chain.head_snapshot().beacon_block;
assert_eq!( assert!(
*one_after_merge_head one_after_merge_head
.message() .message()
.body() .body()
.execution_payload() .execution_payload()
.unwrap(), .unwrap()
FullPayload::default() .is_default_with_empty_roots(),
"One after merge head is default payload"
); );
assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1); assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1);
@ -185,26 +188,34 @@ async fn base_altair_merge_with_terminal_block_after_fork() {
harness.extend_slots(1).await; harness.extend_slots(1).await;
let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; let two_after_merge_head = &harness.chain.head_snapshot().beacon_block;
assert_eq!( assert!(
*one_after_merge_head two_after_merge_head
.message() .message()
.body() .body()
.execution_payload() .execution_payload()
.unwrap(), .unwrap()
FullPayload::default() .is_default_with_empty_roots(),
"Two after merge head is default payload"
); );
assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 2); assert_eq!(two_after_merge_head.slot(), merge_fork_slot + 2);
/* /*
* Next merge block should include an exec payload. * Next merge block should include an exec payload.
*/ */
for _ in 0..4 { for _ in 0..4 {
harness.extend_slots(1).await; harness.extend_slots(1).await;
let block = &harness.chain.head_snapshot().beacon_block; let block = &harness.chain.head_snapshot().beacon_block;
execution_payloads.push(block.message().body().execution_payload().unwrap().clone()); execution_payloads.push(
block
.message()
.body()
.execution_payload()
.unwrap()
.clone()
.into(),
);
} }
verify_execution_payload_chain(execution_payloads.as_slice()); verify_execution_payload_chain(execution_payloads.as_slice());

View File

@ -13,9 +13,9 @@ use beacon_chain::{
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON,
}; };
use execution_layer::{ use execution_layer::{
json_structures::{JsonForkChoiceStateV1, JsonPayloadAttributesV1}, json_structures::{JsonForkchoiceStateV1, JsonPayloadAttributes, JsonPayloadAttributesV1},
test_utils::ExecutionBlockGenerator, test_utils::ExecutionBlockGenerator,
ExecutionLayer, ForkChoiceState, PayloadAttributes, ExecutionLayer, ForkchoiceState, PayloadAttributes,
}; };
use fork_choice::{ use fork_choice::{
CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus, CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus,
@ -120,7 +120,7 @@ impl InvalidPayloadRig {
&self.harness.chain.canonical_head &self.harness.chain.canonical_head
} }
fn previous_forkchoice_update_params(&self) -> (ForkChoiceState, PayloadAttributes) { fn previous_forkchoice_update_params(&self) -> (ForkchoiceState, PayloadAttributes) {
let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap();
let json = mock_execution_layer let json = mock_execution_layer
.server .server
@ -129,14 +129,17 @@ impl InvalidPayloadRig {
let params = json.get("params").expect("no params"); let params = json.get("params").expect("no params");
let fork_choice_state_json = params.get(0).expect("no payload param"); let fork_choice_state_json = params.get(0).expect("no payload param");
let fork_choice_state: JsonForkChoiceStateV1 = let fork_choice_state: JsonForkchoiceStateV1 =
serde_json::from_value(fork_choice_state_json.clone()).unwrap(); serde_json::from_value(fork_choice_state_json.clone()).unwrap();
let payload_param_json = params.get(1).expect("no payload param"); let payload_param_json = params.get(1).expect("no payload param");
let attributes: JsonPayloadAttributesV1 = let attributes: JsonPayloadAttributesV1 =
serde_json::from_value(payload_param_json.clone()).unwrap(); serde_json::from_value(payload_param_json.clone()).unwrap();
(fork_choice_state.into(), attributes.into()) (
fork_choice_state.into(),
JsonPayloadAttributes::V1(attributes).into(),
)
} }
fn previous_payload_attributes(&self) -> PayloadAttributes { fn previous_payload_attributes(&self) -> PayloadAttributes {
@ -991,20 +994,20 @@ async fn payload_preparation() {
.await .await
.unwrap(); .unwrap();
let payload_attributes = PayloadAttributes { let payload_attributes = PayloadAttributes::new(
timestamp: rig rig.harness
.harness
.chain .chain
.slot_clock .slot_clock
.start_of(next_slot) .start_of(next_slot)
.unwrap() .unwrap()
.as_secs(), .as_secs(),
prev_randao: *head *head
.beacon_state .beacon_state
.get_randao_mix(head.beacon_state.current_epoch()) .get_randao_mix(head.beacon_state.current_epoch())
.unwrap(), .unwrap(),
suggested_fee_recipient: fee_recipient, fee_recipient,
}; None,
);
assert_eq!(rig.previous_payload_attributes(), payload_attributes); assert_eq!(rig.previous_payload_attributes(), payload_attributes);
} }
@ -1138,7 +1141,7 @@ async fn payload_preparation_before_transition_block() {
let (fork_choice_state, payload_attributes) = rig.previous_forkchoice_update_params(); let (fork_choice_state, payload_attributes) = rig.previous_forkchoice_update_params();
let latest_block_hash = rig.latest_execution_block_hash(); let latest_block_hash = rig.latest_execution_block_hash();
assert_eq!(payload_attributes.suggested_fee_recipient, fee_recipient); assert_eq!(payload_attributes.suggested_fee_recipient(), fee_recipient);
assert_eq!(fork_choice_state.head_block_hash, latest_block_hash); assert_eq!(fork_choice_state.head_block_hash, latest_block_hash);
} }
@ -1385,18 +1388,16 @@ async fn build_optimistic_chain(
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.execution_payload .is_default_with_empty_roots(),
== <_>::default(),
"the block *has not* undergone the merge transition" "the block *has not* undergone the merge transition"
); );
assert!( assert!(
post_transition_block !post_transition_block
.message() .message()
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.execution_payload .is_default_with_empty_roots(),
!= <_>::default(),
"the block *has* undergone the merge transition" "the block *has* undergone the merge transition"
); );

View File

@ -2,6 +2,7 @@
use beacon_chain::attestation_verification::Error as AttnError; use beacon_chain::attestation_verification::Error as AttnError;
use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::builder::BeaconChainBuilder;
use beacon_chain::schema_change::migrate_schema;
use beacon_chain::test_utils::{ use beacon_chain::test_utils::{
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
}; };
@ -22,6 +23,7 @@ use std::collections::HashSet;
use std::convert::TryInto; use std::convert::TryInto;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION};
use store::{ use store::{
iter::{BlockRootsIterator, StateRootsIterator}, iter::{BlockRootsIterator, StateRootsIterator},
HotColdDB, LevelDB, StoreConfig, HotColdDB, LevelDB, StoreConfig,
@ -68,6 +70,7 @@ fn get_harness(
let harness = BeaconChainHarness::builder(MinimalEthSpec) let harness = BeaconChainHarness::builder(MinimalEthSpec)
.default_spec() .default_spec()
.keypairs(KEYPAIRS[0..validator_count].to_vec()) .keypairs(KEYPAIRS[0..validator_count].to_vec())
.logger(store.logger().clone())
.fresh_disk_store(store) .fresh_disk_store(store)
.mock_execution_layer() .mock_execution_layer()
.build(); .build();
@ -1013,8 +1016,8 @@ fn check_shuffling_compatible(
// Ensure blocks from abandoned forks are pruned from the Hot DB // Ensure blocks from abandoned forks are pruned from the Hot DB
#[tokio::test] #[tokio::test]
async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { async fn prunes_abandoned_fork_between_two_finalized_checkpoints() {
const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const HONEST_VALIDATOR_COUNT: usize = 32 + 0;
const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0;
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
@ -1123,8 +1126,8 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() {
#[tokio::test] #[tokio::test]
async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() {
const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const HONEST_VALIDATOR_COUNT: usize = 32 + 0;
const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0;
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
@ -1255,8 +1258,8 @@ async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() {
#[tokio::test] #[tokio::test]
async fn pruning_does_not_touch_blocks_prior_to_finalization() { async fn pruning_does_not_touch_blocks_prior_to_finalization() {
const HONEST_VALIDATOR_COUNT: usize = 16; const HONEST_VALIDATOR_COUNT: usize = 32;
const ADVERSARIAL_VALIDATOR_COUNT: usize = 8; const ADVERSARIAL_VALIDATOR_COUNT: usize = 16;
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
@ -1350,8 +1353,8 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() {
#[tokio::test] #[tokio::test]
async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { async fn prunes_fork_growing_past_youngest_finalized_checkpoint() {
const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const HONEST_VALIDATOR_COUNT: usize = 32 + 0;
const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0;
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
@ -1495,8 +1498,8 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() {
// This is to check if state outside of normal block processing are pruned correctly. // This is to check if state outside of normal block processing are pruned correctly.
#[tokio::test] #[tokio::test]
async fn prunes_skipped_slots_states() { async fn prunes_skipped_slots_states() {
const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const HONEST_VALIDATOR_COUNT: usize = 32 + 0;
const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0;
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
@ -1624,8 +1627,8 @@ async fn prunes_skipped_slots_states() {
// This is to check if state outside of normal block processing are pruned correctly. // This is to check if state outside of normal block processing are pruned correctly.
#[tokio::test] #[tokio::test]
async fn finalizes_non_epoch_start_slot() { async fn finalizes_non_epoch_start_slot() {
const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const HONEST_VALIDATOR_COUNT: usize = 32 + 0;
const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0;
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect(); let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
@ -2529,6 +2532,91 @@ async fn revert_minority_fork_on_resume() {
assert_eq!(heads.len(), 1); assert_eq!(heads.len(), 1);
} }
// This test checks whether the schema downgrade from the latest version to some minimum supported
// version is correct. This is the easiest schema test to write without historic versions of
// Lighthouse on-hand, but has the disadvantage that the min version needs to be adjusted manually
// as old downgrades are deprecated.
#[tokio::test]
async fn schema_downgrade_to_min_version() {
let num_blocks_produced = E::slots_per_epoch() * 4;
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
let spec = &harness.chain.spec.clone();
harness
.extend_chain(
num_blocks_produced as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
let min_version = if harness.spec.capella_fork_epoch.is_some() {
// Can't downgrade beyond V14 once Capella is reached, for simplicity don't test that
// at all if Capella is enabled.
SchemaVersion(14)
} else {
SchemaVersion(11)
};
// Close the database to ensure everything is written to disk.
drop(store);
drop(harness);
// Re-open the store.
let store = get_store(&db_path);
// Downgrade.
let deposit_contract_deploy_block = 0;
migrate_schema::<DiskHarnessType<E>>(
store.clone(),
deposit_contract_deploy_block,
CURRENT_SCHEMA_VERSION,
min_version,
store.logger().clone(),
spec,
)
.expect("schema downgrade to minimum version should work");
// Upgrade back.
migrate_schema::<DiskHarnessType<E>>(
store.clone(),
deposit_contract_deploy_block,
min_version,
CURRENT_SCHEMA_VERSION,
store.logger().clone(),
spec,
)
.expect("schema upgrade from minimum version should work");
// Rescreate the harness.
let harness = BeaconChainHarness::builder(MinimalEthSpec)
.default_spec()
.keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec())
.logger(store.logger().clone())
.resumed_disk_store(store.clone())
.mock_execution_layer()
.build();
check_finalization(&harness, num_blocks_produced);
check_split_slot(&harness, store.clone());
check_chain_dump(&harness, num_blocks_produced + 1);
check_iterators(&harness);
// Check that downgrading beyond the minimum version fails (bound is *tight*).
let min_version_sub_1 = SchemaVersion(min_version.as_u64().checked_sub(1).unwrap());
migrate_schema::<DiskHarnessType<E>>(
store.clone(),
deposit_contract_deploy_block,
CURRENT_SCHEMA_VERSION,
min_version_sub_1,
harness.logger().clone(),
spec,
)
.expect_err("should not downgrade below minimum version");
}
/// Checks that two chains are the same, for the purpose of these tests. /// Checks that two chains are the same, for the purpose of these tests.
/// ///
/// Several fields that are hard/impossible to check are ignored (e.g., the store). /// Several fields that are hard/impossible to check are ignored (e.g., the store).

View File

@ -45,6 +45,7 @@ fn get_valid_sync_committee_message(
harness: &BeaconChainHarness<EphemeralHarnessType<E>>, harness: &BeaconChainHarness<EphemeralHarnessType<E>>,
slot: Slot, slot: Slot,
relative_sync_committee: RelativeSyncCommittee, relative_sync_committee: RelativeSyncCommittee,
message_index: usize,
) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) { ) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) {
let head_state = harness.chain.head_beacon_state_cloned(); let head_state = harness.chain.head_beacon_state_cloned();
let head_block_root = harness.chain.head_snapshot().beacon_block_root; let head_block_root = harness.chain.head_snapshot().beacon_block_root;
@ -52,7 +53,7 @@ fn get_valid_sync_committee_message(
.make_sync_committee_messages(&head_state, head_block_root, slot, relative_sync_committee) .make_sync_committee_messages(&head_state, head_block_root, slot, relative_sync_committee)
.get(0) .get(0)
.expect("sync messages should exist") .expect("sync messages should exist")
.get(0) .get(message_index)
.expect("first sync message should exist") .expect("first sync message should exist")
.clone(); .clone();
@ -494,7 +495,7 @@ async fn unaggregated_gossip_verification() {
let current_slot = harness.chain.slot().expect("should get slot"); let current_slot = harness.chain.slot().expect("should get slot");
let (valid_sync_committee_message, expected_validator_index, validator_sk, subnet_id) = let (valid_sync_committee_message, expected_validator_index, validator_sk, subnet_id) =
get_valid_sync_committee_message(&harness, current_slot, RelativeSyncCommittee::Current); get_valid_sync_committee_message(&harness, current_slot, RelativeSyncCommittee::Current, 0);
macro_rules! assert_invalid { macro_rules! assert_invalid {
($desc: tt, $attn_getter: expr, $subnet_getter: expr, $($error: pat_param) |+ $( if $guard: expr )?) => { ($desc: tt, $attn_getter: expr, $subnet_getter: expr, $($error: pat_param) |+ $( if $guard: expr )?) => {
@ -644,7 +645,7 @@ async fn unaggregated_gossip_verification() {
// **Incorrectly** create a sync message using the current sync committee // **Incorrectly** create a sync message using the current sync committee
let (next_valid_sync_committee_message, _, _, next_subnet_id) = let (next_valid_sync_committee_message, _, _, next_subnet_id) =
get_valid_sync_committee_message(&harness, target_slot, RelativeSyncCommittee::Current); get_valid_sync_committee_message(&harness, target_slot, RelativeSyncCommittee::Current, 1);
assert_invalid!( assert_invalid!(
"sync message on incorrect subnet", "sync message on incorrect subnet",

View File

@ -19,7 +19,7 @@ use types::{
}; };
// Should ideally be divisible by 3. // Should ideally be divisible by 3.
pub const VALIDATOR_COUNT: usize = 24; pub const VALIDATOR_COUNT: usize = 48;
lazy_static! { lazy_static! {
/// A cached set of keys. /// A cached set of keys.

View File

@ -1,6 +1,6 @@
use eth2::types::builder_bid::SignedBuilderBid; use eth2::types::builder_bid::SignedBuilderBid;
use eth2::types::{ use eth2::types::{
BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, ExecutionPayload, AbstractExecPayload, BlindedPayload, EthSpec, ExecutionBlockHash, ExecutionPayload,
ForkVersionedResponse, PublicKeyBytes, SignedBeaconBlock, SignedValidatorRegistrationData, ForkVersionedResponse, PublicKeyBytes, SignedBeaconBlock, SignedValidatorRegistrationData,
Slot, Slot,
}; };
@ -160,7 +160,7 @@ impl BuilderHttpClient {
} }
/// `GET /eth/v1/builder/header` /// `GET /eth/v1/builder/header`
pub async fn get_builder_header<E: EthSpec, Payload: ExecPayload<E>>( pub async fn get_builder_header<E: EthSpec, Payload: AbstractExecPayload<E>>(
&self, &self,
slot: Slot, slot: Slot,
parent_hash: ExecutionBlockHash, parent_hash: ExecutionBlockHash,

View File

@ -6,6 +6,10 @@ edition = "2021"
[dev-dependencies] [dev-dependencies]
serde_yaml = "0.8.13" serde_yaml = "0.8.13"
logging = { path = "../../common/logging" }
state_processing = { path = "../../consensus/state_processing" }
operation_pool = { path = "../operation_pool" }
tokio = "1.14.0"
[dependencies] [dependencies]
beacon_chain = { path = "../beacon_chain" } beacon_chain = { path = "../beacon_chain" }

View File

@ -0,0 +1,322 @@
use crate::*;
use lighthouse_network::PubsubMessage;
use network::NetworkMessage;
use slog::{debug, info, warn, Logger};
use slot_clock::SlotClock;
use std::cmp;
use std::collections::HashSet;
use std::mem;
use std::time::Duration;
use tokio::sync::mpsc::UnboundedSender;
use tokio::time::sleep;
use types::EthSpec;
/// The size of each chunk of addresses changes to be broadcast at the Capella
/// fork.
const BROADCAST_CHUNK_SIZE: usize = 128;
/// The delay between broadcasting each chunk.
const BROADCAST_CHUNK_DELAY: Duration = Duration::from_millis(500);
/// If the Capella fork has already been reached, `broadcast_address_changes` is
/// called immediately.
///
/// If the Capella fork has not been reached, waits until the start of the fork
/// epoch and then calls `broadcast_address_changes`.
pub async fn broadcast_address_changes_at_capella<T: BeaconChainTypes>(
chain: &BeaconChain<T>,
network_send: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: &Logger,
) {
let spec = &chain.spec;
let slot_clock = &chain.slot_clock;
let capella_fork_slot = if let Some(epoch) = spec.capella_fork_epoch {
epoch.start_slot(T::EthSpec::slots_per_epoch())
} else {
// Exit now if Capella is not defined.
return;
};
// Wait until the Capella fork epoch.
while chain.slot().map_or(true, |slot| slot < capella_fork_slot) {
match slot_clock.duration_to_slot(capella_fork_slot) {
Some(duration) => {
// Sleep until the Capella fork.
sleep(duration).await;
break;
}
None => {
// We were unable to read the slot clock wait another slot
// and then try again.
sleep(slot_clock.slot_duration()).await;
}
}
}
// The following function will be called in two scenarios:
//
// 1. The node has been running for some time and the Capella fork has just
// been reached.
// 2. The node has just started and it is *after* the Capella fork.
broadcast_address_changes(chain, network_send, log).await
}
/// Broadcasts any address changes that are flagged for broadcasting at the
/// Capella fork epoch.
///
/// Address changes are published in chunks, with a delay between each chunk.
/// This helps reduce the load on the P2P network and also helps prevent us from
/// clogging our `network_send` channel and being late to publish
/// blocks, attestations, etc.
pub async fn broadcast_address_changes<T: BeaconChainTypes>(
chain: &BeaconChain<T>,
network_send: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: &Logger,
) {
let head = chain.head_snapshot();
let mut changes = chain
.op_pool
.get_bls_to_execution_changes_received_pre_capella(&head.beacon_state, &chain.spec);
while !changes.is_empty() {
// This `split_off` approach is to allow us to have owned chunks of the
// `changes` vec. The `std::slice::Chunks` method uses references and
// the `itertools` iterator that achives this isn't `Send` so it doesn't
// work well with the `sleep` at the end of the loop.
let tail = changes.split_off(cmp::min(BROADCAST_CHUNK_SIZE, changes.len()));
let chunk = mem::replace(&mut changes, tail);
let mut published_indices = HashSet::with_capacity(BROADCAST_CHUNK_SIZE);
let mut num_ok = 0;
let mut num_err = 0;
// Publish each individual address change.
for address_change in chunk {
let validator_index = address_change.message.validator_index;
let pubsub_message = PubsubMessage::BlsToExecutionChange(Box::new(address_change));
let message = NetworkMessage::Publish {
messages: vec![pubsub_message],
};
// It seems highly unlikely that this unbounded send will fail, but
// we handle the result nontheless.
if let Err(e) = network_send.send(message) {
debug!(
log,
"Failed to publish change message";
"error" => ?e,
"validator_index" => validator_index
);
num_err += 1;
} else {
debug!(
log,
"Published address change message";
"validator_index" => validator_index
);
num_ok += 1;
published_indices.insert(validator_index);
}
}
// Remove any published indices from the list of indices that need to be
// published.
chain
.op_pool
.register_indices_broadcasted_at_capella(&published_indices);
info!(
log,
"Published address change messages";
"num_published" => num_ok,
);
if num_err > 0 {
warn!(
log,
"Failed to publish address changes";
"info" => "failed messages will be retried",
"num_unable_to_publish" => num_err,
);
}
sleep(BROADCAST_CHUNK_DELAY).await;
}
debug!(
log,
"Address change routine complete";
);
}
#[cfg(not(debug_assertions))] // Tests run too slow in debug.
#[cfg(test)]
mod tests {
use super::*;
use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType};
use operation_pool::ReceivedPreCapella;
use state_processing::{SigVerifiedOp, VerifyOperation};
use std::collections::HashSet;
use tokio::sync::mpsc;
use types::*;
type E = MainnetEthSpec;
pub const VALIDATOR_COUNT: usize = BROADCAST_CHUNK_SIZE * 3;
pub const EXECUTION_ADDRESS: Address = Address::repeat_byte(42);
struct Tester {
harness: BeaconChainHarness<EphemeralHarnessType<E>>,
/// Changes which should be broadcast at the Capella fork.
received_pre_capella_changes: Vec<SigVerifiedOp<SignedBlsToExecutionChange, E>>,
/// Changes which should *not* be broadcast at the Capella fork.
not_received_pre_capella_changes: Vec<SigVerifiedOp<SignedBlsToExecutionChange, E>>,
}
impl Tester {
fn new() -> Self {
let altair_fork_epoch = Epoch::new(0);
let bellatrix_fork_epoch = Epoch::new(0);
let capella_fork_epoch = Epoch::new(2);
let mut spec = E::default_spec();
spec.altair_fork_epoch = Some(altair_fork_epoch);
spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch);
spec.capella_fork_epoch = Some(capella_fork_epoch);
let harness = BeaconChainHarness::builder(E::default())
.spec(spec)
.logger(logging::test_logger())
.deterministic_keypairs(VALIDATOR_COUNT)
.deterministic_withdrawal_keypairs(VALIDATOR_COUNT)
.fresh_ephemeral_store()
.mock_execution_layer()
.build();
Self {
harness,
received_pre_capella_changes: <_>::default(),
not_received_pre_capella_changes: <_>::default(),
}
}
fn produce_verified_address_change(
&self,
validator_index: u64,
) -> SigVerifiedOp<SignedBlsToExecutionChange, E> {
let change = self
.harness
.make_bls_to_execution_change(validator_index, EXECUTION_ADDRESS);
let head = self.harness.chain.head_snapshot();
change
.validate(&head.beacon_state, &self.harness.spec)
.unwrap()
}
fn produce_received_pre_capella_changes(mut self, indices: Vec<u64>) -> Self {
for validator_index in indices {
self.received_pre_capella_changes
.push(self.produce_verified_address_change(validator_index));
}
self
}
fn produce_not_received_pre_capella_changes(mut self, indices: Vec<u64>) -> Self {
for validator_index in indices {
self.not_received_pre_capella_changes
.push(self.produce_verified_address_change(validator_index));
}
self
}
async fn run(self) {
let harness = self.harness;
let chain = harness.chain.clone();
let mut broadcast_indices = HashSet::new();
for change in self.received_pre_capella_changes {
broadcast_indices.insert(change.as_inner().message.validator_index);
chain
.op_pool
.insert_bls_to_execution_change(change, ReceivedPreCapella::Yes);
}
let mut non_broadcast_indices = HashSet::new();
for change in self.not_received_pre_capella_changes {
non_broadcast_indices.insert(change.as_inner().message.validator_index);
chain
.op_pool
.insert_bls_to_execution_change(change, ReceivedPreCapella::No);
}
harness.set_current_slot(
chain
.spec
.capella_fork_epoch
.unwrap()
.start_slot(E::slots_per_epoch()),
);
let (sender, mut receiver) = mpsc::unbounded_channel();
broadcast_address_changes_at_capella(&chain, sender, &logging::test_logger()).await;
let mut broadcasted_changes = vec![];
while let Some(NetworkMessage::Publish { mut messages }) = receiver.recv().await {
match messages.pop().unwrap() {
PubsubMessage::BlsToExecutionChange(change) => broadcasted_changes.push(change),
_ => panic!("unexpected message"),
}
}
assert_eq!(
broadcasted_changes.len(),
broadcast_indices.len(),
"all expected changes should have been broadcast"
);
for broadcasted in &broadcasted_changes {
assert!(
!non_broadcast_indices.contains(&broadcasted.message.validator_index),
"messages not flagged for broadcast should not have been broadcast"
);
}
let head = chain.head_snapshot();
assert!(
chain
.op_pool
.get_bls_to_execution_changes_received_pre_capella(
&head.beacon_state,
&chain.spec,
)
.is_empty(),
"there shouldn't be any capella broadcast changes left in the op pool"
);
}
}
// Useful for generating even-numbered indices. Required since only even
// numbered genesis validators have BLS credentials.
fn even_indices(start: u64, count: usize) -> Vec<u64> {
(start..).filter(|i| i % 2 == 0).take(count).collect()
}
#[tokio::test]
async fn one_chunk() {
Tester::new()
.produce_received_pre_capella_changes(even_indices(0, 4))
.produce_not_received_pre_capella_changes(even_indices(10, 4))
.run()
.await;
}
#[tokio::test]
async fn multiple_chunks() {
Tester::new()
.produce_received_pre_capella_changes(even_indices(0, BROADCAST_CHUNK_SIZE * 3 / 2))
.run()
.await;
}
}

View File

@ -1,3 +1,4 @@
use crate::address_change_broadcast::broadcast_address_changes_at_capella;
use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::config::{ClientGenesis, Config as ClientConfig};
use crate::notifier::spawn_notifier; use crate::notifier::spawn_notifier;
use crate::Client; use crate::Client;
@ -802,6 +803,25 @@ where
// Spawns a routine that polls the `exchange_transition_configuration` endpoint. // Spawns a routine that polls the `exchange_transition_configuration` endpoint.
execution_layer.spawn_transition_configuration_poll(beacon_chain.spec.clone()); execution_layer.spawn_transition_configuration_poll(beacon_chain.spec.clone());
} }
// Spawn a service to publish BLS to execution changes at the Capella fork.
if let Some(network_senders) = self.network_senders {
let inner_chain = beacon_chain.clone();
let broadcast_context =
runtime_context.service_context("addr_bcast".to_string());
let log = broadcast_context.log().clone();
broadcast_context.executor.spawn(
async move {
broadcast_address_changes_at_capella(
&inner_chain,
network_senders.network_send(),
&log,
)
.await
},
"addr_broadcast",
);
}
} }
start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone());

View File

@ -1,5 +1,6 @@
extern crate slog; extern crate slog;
mod address_change_broadcast;
pub mod config; pub mod config;
mod metrics; mod metrics;
mod notifier; mod notifier;

View File

@ -1,5 +1,6 @@
use crate::metrics; use crate::metrics;
use beacon_chain::{ use beacon_chain::{
capella_readiness::CapellaReadiness,
merge_readiness::{MergeConfig, MergeReadiness}, merge_readiness::{MergeConfig, MergeReadiness},
BeaconChain, BeaconChainTypes, ExecutionStatus, BeaconChain, BeaconChainTypes, ExecutionStatus,
}; };
@ -313,6 +314,7 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
eth1_logging(&beacon_chain, &log); eth1_logging(&beacon_chain, &log);
merge_readiness_logging(current_slot, &beacon_chain, &log).await; merge_readiness_logging(current_slot, &beacon_chain, &log).await;
capella_readiness_logging(current_slot, &beacon_chain, &log).await;
} }
}; };
@ -350,12 +352,15 @@ async fn merge_readiness_logging<T: BeaconChainTypes>(
} }
if merge_completed && !has_execution_layer { if merge_completed && !has_execution_layer {
error!( if !beacon_chain.is_time_to_prepare_for_capella(current_slot) {
log, // logging of the EE being offline is handled in `capella_readiness_logging()`
"Execution endpoint required"; error!(
"info" => "you need an execution engine to validate blocks, see: \ log,
https://lighthouse-book.sigmaprime.io/merge-migration.html" "Execution endpoint required";
); "info" => "you need an execution engine to validate blocks, see: \
https://lighthouse-book.sigmaprime.io/merge-migration.html"
);
}
return; return;
} }
@ -419,6 +424,61 @@ async fn merge_readiness_logging<T: BeaconChainTypes>(
} }
} }
/// Provides some helpful logging to users to indicate if their node is ready for Capella
async fn capella_readiness_logging<T: BeaconChainTypes>(
current_slot: Slot,
beacon_chain: &BeaconChain<T>,
log: &Logger,
) {
let capella_completed = beacon_chain
.canonical_head
.cached_head()
.snapshot
.beacon_block
.message()
.body()
.execution_payload()
.map_or(false, |payload| payload.withdrawals_root().is_ok());
let has_execution_layer = beacon_chain.execution_layer.is_some();
if capella_completed && has_execution_layer
|| !beacon_chain.is_time_to_prepare_for_capella(current_slot)
{
return;
}
if capella_completed && !has_execution_layer {
error!(
log,
"Execution endpoint required";
"info" => "you need a Capella enabled execution engine to validate blocks, see: \
https://lighthouse-book.sigmaprime.io/merge-migration.html"
);
return;
}
match beacon_chain.check_capella_readiness().await {
CapellaReadiness::Ready => {
info!(log, "Ready for Capella")
}
readiness @ CapellaReadiness::ExchangeCapabilitiesFailed { error: _ } => {
error!(
log,
"Not ready for Capella";
"hint" => "the execution endpoint may be offline",
"info" => %readiness,
)
}
readiness => warn!(
log,
"Not ready for Capella";
"hint" => "try updating the execution endpoint",
"info" => %readiness,
),
}
}
fn eth1_logging<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>, log: &Logger) { fn eth1_logging<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>, log: &Logger) {
let current_slot_opt = beacon_chain.slot().ok(); let current_slot_opt = beacon_chain.slot().ok();

View File

@ -21,7 +21,7 @@ hex = "0.4.2"
types = { path = "../../consensus/types"} types = { path = "../../consensus/types"}
merkle_proof = { path = "../../consensus/merkle_proof"} merkle_proof = { path = "../../consensus/merkle_proof"}
eth2_ssz = "0.4.1" eth2_ssz = "0.4.1"
eth2_ssz_derive = "0.3.0" eth2_ssz_derive = "0.3.1"
tree_hash = "0.4.1" tree_hash = "0.4.1"
parking_lot = "0.12.0" parking_lot = "0.12.0"
slog = "2.5.2" slog = "2.5.2"

View File

@ -697,6 +697,7 @@ mod fast {
let web3 = eth1.web3(); let web3 = eth1.web3();
let now = get_block_number(&web3).await; let now = get_block_number(&web3).await;
let spec = MainnetEthSpec::default_spec();
let service = Service::new( let service = Service::new(
Config { Config {
endpoint: Eth1Endpoint::NoAuth( endpoint: Eth1Endpoint::NoAuth(
@ -710,7 +711,7 @@ mod fast {
..Config::default() ..Config::default()
}, },
log, log,
MainnetEthSpec::default_spec(), spec.clone(),
) )
.unwrap(); .unwrap();
let client = let client =

View File

@ -26,6 +26,7 @@ eth2_ssz = "0.4.1"
eth2_ssz_types = "0.2.2" eth2_ssz_types = "0.2.2"
eth2 = { path = "../../common/eth2" } eth2 = { path = "../../common/eth2" }
state_processing = { path = "../../consensus/state_processing" } state_processing = { path = "../../consensus/state_processing" }
superstruct = "0.6.0"
lru = "0.7.1" lru = "0.7.1"
exit-future = "0.2.0" exit-future = "0.2.0"
tree_hash = "0.4.1" tree_hash = "0.4.1"
@ -40,9 +41,9 @@ lazy_static = "1.4.0"
ethers-core = "1.0.2" ethers-core = "1.0.2"
builder_client = { path = "../builder_client" } builder_client = { path = "../builder_client" }
fork_choice = { path = "../../consensus/fork_choice" } fork_choice = { path = "../../consensus/fork_choice" }
mev-build-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "6c99b0fbdc0427b1625469d2e575303ce08de5b8" } mev-rs = { git = "https://github.com/ralexstokes/mev-rs" }
ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "a8110af76d97bf2bf27fb987a671808fcbdf1834" } ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus" }
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" } ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs" }
tokio-stream = { version = "0.1.9", features = [ "sync" ] } tokio-stream = { version = "0.1.9", features = [ "sync" ] }
strum = "0.24.0" strum = "0.24.0"
keccak-hash = "0.10.0" keccak-hash = "0.10.0"

View File

@ -1,4 +1,5 @@
use crate::{ use crate::{
json_structures::JsonWithdrawal,
keccak::{keccak256, KeccakHasher}, keccak::{keccak256, KeccakHasher},
metrics, Error, ExecutionLayer, metrics, Error, ExecutionLayer,
}; };
@ -6,39 +7,51 @@ use ethers_core::utils::rlp::RlpStream;
use keccak_hash::KECCAK_EMPTY_LIST_RLP; use keccak_hash::KECCAK_EMPTY_LIST_RLP;
use triehash::ordered_trie_root; use triehash::ordered_trie_root;
use types::{ use types::{
map_execution_block_header_fields, Address, EthSpec, ExecutionBlockHash, ExecutionBlockHeader, map_execution_block_header_fields_except_withdrawals, Address, EthSpec, ExecutionBlockHash,
ExecutionPayload, Hash256, Hash64, Uint256, ExecutionBlockHeader, ExecutionPayloadRef, Hash256, Hash64, Uint256,
}; };
impl<T: EthSpec> ExecutionLayer<T> { impl<T: EthSpec> ExecutionLayer<T> {
/// Verify `payload.block_hash` locally within Lighthouse. /// Verify `payload.block_hash` locally within Lighthouse.
/// ///
/// No remote calls to the execution client will be made, so this is quite a cheap check. /// No remote calls to the execution client will be made, so this is quite a cheap check.
pub fn verify_payload_block_hash(&self, payload: &ExecutionPayload<T>) -> Result<(), Error> { pub fn verify_payload_block_hash(&self, payload: ExecutionPayloadRef<T>) -> Result<(), Error> {
let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH);
// Calculate the transactions root. // Calculate the transactions root.
// We're currently using a deprecated Parity library for this. We should move to a // We're currently using a deprecated Parity library for this. We should move to a
// better alternative when one appears, possibly following Reth. // better alternative when one appears, possibly following Reth.
let rlp_transactions_root = ordered_trie_root::<KeccakHasher, _>( let rlp_transactions_root = ordered_trie_root::<KeccakHasher, _>(
payload.transactions.iter().map(|txn_bytes| &**txn_bytes), payload.transactions().iter().map(|txn_bytes| &**txn_bytes),
); );
// Calculate withdrawals root (post-Capella).
let rlp_withdrawals_root = if let Ok(withdrawals) = payload.withdrawals() {
Some(ordered_trie_root::<KeccakHasher, _>(
withdrawals.iter().map(|withdrawal| {
rlp_encode_withdrawal(&JsonWithdrawal::from(withdrawal.clone()))
}),
))
} else {
None
};
// Construct the block header. // Construct the block header.
let exec_block_header = ExecutionBlockHeader::from_payload( let exec_block_header = ExecutionBlockHeader::from_payload(
payload, payload,
KECCAK_EMPTY_LIST_RLP.as_fixed_bytes().into(), KECCAK_EMPTY_LIST_RLP.as_fixed_bytes().into(),
rlp_transactions_root, rlp_transactions_root,
rlp_withdrawals_root,
); );
// Hash the RLP encoding of the block header. // Hash the RLP encoding of the block header.
let rlp_block_header = rlp_encode_block_header(&exec_block_header); let rlp_block_header = rlp_encode_block_header(&exec_block_header);
let header_hash = ExecutionBlockHash::from_root(keccak256(&rlp_block_header)); let header_hash = ExecutionBlockHash::from_root(keccak256(&rlp_block_header));
if header_hash != payload.block_hash { if header_hash != payload.block_hash() {
return Err(Error::BlockHashMismatch { return Err(Error::BlockHashMismatch {
computed: header_hash, computed: header_hash,
payload: payload.block_hash, payload: payload.block_hash(),
transactions_root: rlp_transactions_root, transactions_root: rlp_transactions_root,
}); });
} }
@ -47,13 +60,27 @@ impl<T: EthSpec> ExecutionLayer<T> {
} }
} }
/// RLP encode a withdrawal.
pub fn rlp_encode_withdrawal(withdrawal: &JsonWithdrawal) -> Vec<u8> {
let mut rlp_stream = RlpStream::new();
rlp_stream.begin_list(4);
rlp_stream.append(&withdrawal.index);
rlp_stream.append(&withdrawal.validator_index);
rlp_stream.append(&withdrawal.address);
rlp_stream.append(&withdrawal.amount);
rlp_stream.out().into()
}
/// RLP encode an execution block header. /// RLP encode an execution block header.
pub fn rlp_encode_block_header(header: &ExecutionBlockHeader) -> Vec<u8> { pub fn rlp_encode_block_header(header: &ExecutionBlockHeader) -> Vec<u8> {
let mut rlp_header_stream = RlpStream::new(); let mut rlp_header_stream = RlpStream::new();
rlp_header_stream.begin_unbounded_list(); rlp_header_stream.begin_unbounded_list();
map_execution_block_header_fields!(&header, |_, field| { map_execution_block_header_fields_except_withdrawals!(&header, |_, field| {
rlp_header_stream.append(field); rlp_header_stream.append(field);
}); });
if let Some(withdrawals_root) = &header.withdrawals_root {
rlp_header_stream.append(withdrawals_root);
}
rlp_header_stream.finalize_unbounded_list(); rlp_header_stream.finalize_unbounded_list();
rlp_header_stream.out().into() rlp_header_stream.out().into()
} }
@ -99,6 +126,7 @@ mod test {
mix_hash: Hash256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(), mix_hash: Hash256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(),
nonce: Hash64::zero(), nonce: Hash64::zero(),
base_fee_per_gas: 0x036b_u64.into(), base_fee_per_gas: 0x036b_u64.into(),
withdrawals_root: None,
}; };
let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b"; let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b";
let expected_hash = let expected_hash =
@ -126,6 +154,7 @@ mod test {
mix_hash: Hash256::from_str("0000000000000000000000000000000000000000000000000000000000020000").unwrap(), mix_hash: Hash256::from_str("0000000000000000000000000000000000000000000000000000000000020000").unwrap(),
nonce: Hash64::zero(), nonce: Hash64::zero(),
base_fee_per_gas: 0x036b_u64.into(), base_fee_per_gas: 0x036b_u64.into(),
withdrawals_root: None,
}; };
let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b"; let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b";
let expected_hash = let expected_hash =
@ -154,6 +183,7 @@ mod test {
mix_hash: Hash256::from_str("bf5289894b2ceab3549f92f063febbac896b280ddb18129a57cff13113c11b13").unwrap(), mix_hash: Hash256::from_str("bf5289894b2ceab3549f92f063febbac896b280ddb18129a57cff13113c11b13").unwrap(),
nonce: Hash64::zero(), nonce: Hash64::zero(),
base_fee_per_gas: 0x34187b238_u64.into(), base_fee_per_gas: 0x34187b238_u64.into(),
withdrawals_root: None,
}; };
let expected_hash = let expected_hash =
Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351") Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351")

View File

@ -1,14 +1,23 @@
use crate::engines::ForkChoiceState; use crate::engines::ForkchoiceState;
use crate::http::{
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ENGINE_FORKCHOICE_UPDATED_V1,
ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2,
ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2,
};
pub use ethers_core::types::Transaction; pub use ethers_core::types::Transaction;
use ethers_core::utils::rlp::{self, Decodable, Rlp};
use http::deposit_methods::RpcError; use http::deposit_methods::RpcError;
pub use json_structures::TransitionConfigurationV1; pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1};
use reqwest::StatusCode; use reqwest::StatusCode;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::convert::TryFrom;
use strum::IntoStaticStr; use strum::IntoStaticStr;
use superstruct::superstruct;
pub use types::{ pub use types::{
Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader,
Hash256, Uint256, VariableList, ExecutionPayloadRef, FixedVector, ForkName, Hash256, Uint256, VariableList, Withdrawal,
}; };
use types::{ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge};
pub mod auth; pub mod auth;
pub mod http; pub mod http;
@ -38,7 +47,13 @@ pub enum Error {
PayloadConversionLogicFlaw, PayloadConversionLogicFlaw,
DeserializeTransaction(ssz_types::Error), DeserializeTransaction(ssz_types::Error),
DeserializeTransactions(ssz_types::Error), DeserializeTransactions(ssz_types::Error),
DeserializeWithdrawals(ssz_types::Error),
BuilderApi(builder_client::Error), BuilderApi(builder_client::Error),
IncorrectStateVariant,
RequiredMethodUnsupported(&'static str),
UnsupportedForkVariant(String),
BadConversion(String),
RlpDecoderError(rlp::DecoderError),
} }
impl From<reqwest::Error> for Error { impl From<reqwest::Error> for Error {
@ -72,6 +87,12 @@ impl From<builder_client::Error> for Error {
} }
} }
impl From<rlp::DecoderError> for Error {
fn from(e: rlp::DecoderError) -> Self {
Error::RlpDecoderError(e)
}
}
#[derive(Clone, Copy, Debug, PartialEq, IntoStaticStr)] #[derive(Clone, Copy, Debug, PartialEq, IntoStaticStr)]
#[strum(serialize_all = "snake_case")] #[strum(serialize_all = "snake_case")]
pub enum PayloadStatusV1Status { pub enum PayloadStatusV1Status {
@ -111,9 +132,18 @@ pub struct ExecutionBlock {
pub timestamp: u64, pub timestamp: u64,
} }
/// Representation of an exection block with enough detail to reconstruct a payload. /// Representation of an execution block with enough detail to reconstruct a payload.
#[superstruct(
variants(Merge, Capella, Eip4844),
variant_attributes(
derive(Clone, Debug, PartialEq, Serialize, Deserialize,),
serde(bound = "T: EthSpec", rename_all = "camelCase"),
),
cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"),
partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant")
)]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(bound = "T: EthSpec", rename_all = "camelCase", untagged)]
pub struct ExecutionBlockWithTransactions<T: EthSpec> { pub struct ExecutionBlockWithTransactions<T: EthSpec> {
pub parent_hash: ExecutionBlockHash, pub parent_hash: ExecutionBlockHash,
#[serde(alias = "miner")] #[serde(alias = "miner")]
@ -135,16 +165,138 @@ pub struct ExecutionBlockWithTransactions<T: EthSpec> {
#[serde(with = "ssz_types::serde_utils::hex_var_list")] #[serde(with = "ssz_types::serde_utils::hex_var_list")]
pub extra_data: VariableList<u8, T::MaxExtraDataBytes>, pub extra_data: VariableList<u8, T::MaxExtraDataBytes>,
pub base_fee_per_gas: Uint256, pub base_fee_per_gas: Uint256,
#[superstruct(only(Eip4844))]
#[serde(with = "eth2_serde_utils::u256_hex_be")]
pub excess_data_gas: Uint256,
#[serde(rename = "hash")] #[serde(rename = "hash")]
pub block_hash: ExecutionBlockHash, pub block_hash: ExecutionBlockHash,
pub transactions: Vec<Transaction>, pub transactions: Vec<Transaction>,
#[superstruct(only(Capella, Eip4844))]
pub withdrawals: Vec<JsonWithdrawal>,
} }
#[derive(Clone, Copy, Debug, PartialEq)] impl<T: EthSpec> TryFrom<ExecutionPayload<T>> for ExecutionBlockWithTransactions<T> {
type Error = Error;
fn try_from(payload: ExecutionPayload<T>) -> Result<Self, Error> {
let json_payload = match payload {
ExecutionPayload::Merge(block) => Self::Merge(ExecutionBlockWithTransactionsMerge {
parent_hash: block.parent_hash,
fee_recipient: block.fee_recipient,
state_root: block.state_root,
receipts_root: block.receipts_root,
logs_bloom: block.logs_bloom,
prev_randao: block.prev_randao,
block_number: block.block_number,
gas_limit: block.gas_limit,
gas_used: block.gas_used,
timestamp: block.timestamp,
extra_data: block.extra_data,
base_fee_per_gas: block.base_fee_per_gas,
block_hash: block.block_hash,
transactions: block
.transactions
.iter()
.map(|tx| Transaction::decode(&Rlp::new(tx)))
.collect::<Result<Vec<_>, _>>()?,
}),
ExecutionPayload::Capella(block) => {
Self::Capella(ExecutionBlockWithTransactionsCapella {
parent_hash: block.parent_hash,
fee_recipient: block.fee_recipient,
state_root: block.state_root,
receipts_root: block.receipts_root,
logs_bloom: block.logs_bloom,
prev_randao: block.prev_randao,
block_number: block.block_number,
gas_limit: block.gas_limit,
gas_used: block.gas_used,
timestamp: block.timestamp,
extra_data: block.extra_data,
base_fee_per_gas: block.base_fee_per_gas,
block_hash: block.block_hash,
transactions: block
.transactions
.iter()
.map(|tx| Transaction::decode(&Rlp::new(tx)))
.collect::<Result<Vec<_>, _>>()?,
withdrawals: Vec::from(block.withdrawals)
.into_iter()
.map(|withdrawal| withdrawal.into())
.collect(),
})
}
ExecutionPayload::Eip4844(block) => {
Self::Eip4844(ExecutionBlockWithTransactionsEip4844 {
parent_hash: block.parent_hash,
fee_recipient: block.fee_recipient,
state_root: block.state_root,
receipts_root: block.receipts_root,
logs_bloom: block.logs_bloom,
prev_randao: block.prev_randao,
block_number: block.block_number,
gas_limit: block.gas_limit,
gas_used: block.gas_used,
timestamp: block.timestamp,
extra_data: block.extra_data,
base_fee_per_gas: block.base_fee_per_gas,
excess_data_gas: block.excess_data_gas,
block_hash: block.block_hash,
transactions: block
.transactions
.iter()
.map(|tx| Transaction::decode(&Rlp::new(tx)))
.collect::<Result<Vec<_>, _>>()?,
withdrawals: Vec::from(block.withdrawals)
.into_iter()
.map(|withdrawal| withdrawal.into())
.collect(),
})
}
};
Ok(json_payload)
}
}
#[superstruct(
variants(V1, V2),
variant_attributes(derive(Clone, Debug, Eq, Hash, PartialEq),),
cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"),
partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant")
)]
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct PayloadAttributes { pub struct PayloadAttributes {
#[superstruct(getter(copy))]
pub timestamp: u64, pub timestamp: u64,
#[superstruct(getter(copy))]
pub prev_randao: Hash256, pub prev_randao: Hash256,
#[superstruct(getter(copy))]
pub suggested_fee_recipient: Address, pub suggested_fee_recipient: Address,
#[superstruct(only(V2))]
pub withdrawals: Vec<Withdrawal>,
}
impl PayloadAttributes {
pub fn new(
timestamp: u64,
prev_randao: Hash256,
suggested_fee_recipient: Address,
withdrawals: Option<Vec<Withdrawal>>,
) -> Self {
match withdrawals {
Some(withdrawals) => PayloadAttributes::V2(PayloadAttributesV2 {
timestamp,
prev_randao,
suggested_fee_recipient,
withdrawals,
}),
None => PayloadAttributes::V1(PayloadAttributesV1 {
timestamp,
prev_randao,
suggested_fee_recipient,
}),
}
}
} }
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
@ -166,3 +318,103 @@ pub struct ProposeBlindedBlockResponse {
pub latest_valid_hash: Option<Hash256>, pub latest_valid_hash: Option<Hash256>,
pub validation_error: Option<String>, pub validation_error: Option<String>,
} }
#[superstruct(
variants(Merge, Capella, Eip4844),
variant_attributes(derive(Clone, Debug, PartialEq),),
map_into(ExecutionPayload),
map_ref_into(ExecutionPayloadRef),
cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"),
partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant")
)]
#[derive(Clone, Debug, PartialEq)]
pub struct GetPayloadResponse<T: EthSpec> {
#[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))]
pub execution_payload: ExecutionPayloadMerge<T>,
#[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))]
pub execution_payload: ExecutionPayloadCapella<T>,
#[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))]
pub execution_payload: ExecutionPayloadEip4844<T>,
pub block_value: Uint256,
}
impl<'a, T: EthSpec> From<GetPayloadResponseRef<'a, T>> for ExecutionPayloadRef<'a, T> {
fn from(response: GetPayloadResponseRef<'a, T>) -> Self {
map_get_payload_response_ref_into_execution_payload_ref!(&'a _, response, |inner, cons| {
cons(&inner.execution_payload)
})
}
}
impl<T: EthSpec> From<GetPayloadResponse<T>> for ExecutionPayload<T> {
fn from(response: GetPayloadResponse<T>) -> Self {
map_get_payload_response_into_execution_payload!(response, |inner, cons| {
cons(inner.execution_payload)
})
}
}
impl<T: EthSpec> From<GetPayloadResponse<T>> for (ExecutionPayload<T>, Uint256) {
fn from(response: GetPayloadResponse<T>) -> Self {
match response {
GetPayloadResponse::Merge(inner) => (
ExecutionPayload::Merge(inner.execution_payload),
inner.block_value,
),
GetPayloadResponse::Capella(inner) => (
ExecutionPayload::Capella(inner.execution_payload),
inner.block_value,
),
GetPayloadResponse::Eip4844(inner) => (
ExecutionPayload::Eip4844(inner.execution_payload),
inner.block_value,
),
}
}
}
impl<T: EthSpec> GetPayloadResponse<T> {
pub fn execution_payload_ref(&self) -> ExecutionPayloadRef<T> {
self.to_ref().into()
}
}
#[derive(Clone, Copy, Debug)]
pub struct EngineCapabilities {
pub new_payload_v1: bool,
pub new_payload_v2: bool,
pub forkchoice_updated_v1: bool,
pub forkchoice_updated_v2: bool,
pub get_payload_v1: bool,
pub get_payload_v2: bool,
pub exchange_transition_configuration_v1: bool,
}
impl EngineCapabilities {
pub fn to_response(&self) -> Vec<&str> {
let mut response = Vec::new();
if self.new_payload_v1 {
response.push(ENGINE_NEW_PAYLOAD_V1);
}
if self.new_payload_v2 {
response.push(ENGINE_NEW_PAYLOAD_V2);
}
if self.forkchoice_updated_v1 {
response.push(ENGINE_FORKCHOICE_UPDATED_V1);
}
if self.forkchoice_updated_v2 {
response.push(ENGINE_FORKCHOICE_UPDATED_V2);
}
if self.get_payload_v1 {
response.push(ENGINE_GET_PAYLOAD_V1);
}
if self.get_payload_v2 {
response.push(ENGINE_GET_PAYLOAD_V2);
}
if self.exchange_transition_configuration_v1 {
response.push(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1);
}
response
}
}

View File

@ -7,8 +7,10 @@ use reqwest::header::CONTENT_TYPE;
use sensitive_url::SensitiveUrl; use sensitive_url::SensitiveUrl;
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
use serde_json::json; use serde_json::json;
use std::collections::HashSet;
use tokio::sync::Mutex;
use std::time::Duration; use std::time::{Duration, Instant};
use types::EthSpec; use types::EthSpec;
pub use deposit_log::{DepositLog, Log}; pub use deposit_log::{DepositLog, Log};
@ -29,22 +31,57 @@ pub const ETH_SYNCING: &str = "eth_syncing";
pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_secs(1); pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_secs(1);
pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1"; pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1";
pub const ENGINE_NEW_PAYLOAD_V2: &str = "engine_newPayloadV2";
pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8);
pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1";
pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2";
pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2);
pub const ENGINE_GET_BLOBS_BUNDLE_V1: &str = "engine_getBlobsBundleV1";
pub const ENGINE_GET_BLOBS_BUNDLE_TIMEOUT: Duration = Duration::from_secs(2);
pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1";
pub const ENGINE_FORKCHOICE_UPDATED_V2: &str = "engine_forkchoiceUpdatedV2";
pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8);
pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str =
"engine_exchangeTransitionConfigurationV1"; "engine_exchangeTransitionConfigurationV1";
pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_secs(1); pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_secs(1);
pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities";
pub const ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT: Duration = Duration::from_secs(1);
/// This error is returned during a `chainId` call by Geth. /// This error is returned during a `chainId` call by Geth.
pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block";
/// This code is returned by all clients when a method is not supported
/// (verified geth, nethermind, erigon, besu)
pub const METHOD_NOT_FOUND_CODE: i64 = -32601;
/// Contains methods to convert arbitary bytes to an ETH2 deposit contract object. pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[
ENGINE_NEW_PAYLOAD_V1,
ENGINE_NEW_PAYLOAD_V2,
ENGINE_GET_PAYLOAD_V1,
ENGINE_GET_PAYLOAD_V2,
ENGINE_FORKCHOICE_UPDATED_V1,
ENGINE_FORKCHOICE_UPDATED_V2,
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1,
];
/// This is necessary because a user might run a capella-enabled version of
/// lighthouse before they update to a capella-enabled execution engine.
// TODO (mark): rip this out once we are post-capella on mainnet
pub static PRE_CAPELLA_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities {
new_payload_v1: true,
new_payload_v2: false,
forkchoice_updated_v1: true,
forkchoice_updated_v2: false,
get_payload_v1: true,
get_payload_v2: false,
exchange_transition_configuration_v1: true,
};
/// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object.
pub mod deposit_log { pub mod deposit_log {
use ssz::Decode; use ssz::Decode;
use state_processing::per_block_processing::signature_sets::deposit_pubkey_signature_message; use state_processing::per_block_processing::signature_sets::deposit_pubkey_signature_message;
@ -519,10 +556,39 @@ pub mod deposit_methods {
} }
} }
#[derive(Clone, Debug)]
pub struct CapabilitiesCacheEntry {
engine_capabilities: EngineCapabilities,
fetch_time: Instant,
}
impl CapabilitiesCacheEntry {
pub fn new(engine_capabilities: EngineCapabilities) -> Self {
Self {
engine_capabilities,
fetch_time: Instant::now(),
}
}
pub fn engine_capabilities(&self) -> EngineCapabilities {
self.engine_capabilities
}
pub fn age(&self) -> Duration {
Instant::now().duration_since(self.fetch_time)
}
/// returns `true` if the entry's age is >= age_limit
pub fn older_than(&self, age_limit: Option<Duration>) -> bool {
age_limit.map_or(false, |limit| self.age() >= limit)
}
}
pub struct HttpJsonRpc { pub struct HttpJsonRpc {
pub client: Client, pub client: Client,
pub url: SensitiveUrl, pub url: SensitiveUrl,
pub execution_timeout_multiplier: u32, pub execution_timeout_multiplier: u32,
pub engine_capabilities_cache: Mutex<Option<CapabilitiesCacheEntry>>,
auth: Option<Auth>, auth: Option<Auth>,
} }
@ -535,6 +601,7 @@ impl HttpJsonRpc {
client: Client::builder().build()?, client: Client::builder().build()?,
url, url,
execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1),
engine_capabilities_cache: Mutex::new(None),
auth: None, auth: None,
}) })
} }
@ -548,6 +615,7 @@ impl HttpJsonRpc {
client: Client::builder().build()?, client: Client::builder().build()?,
url, url,
execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1),
engine_capabilities_cache: Mutex::new(None),
auth: Some(auth), auth: Some(auth),
}) })
} }
@ -654,21 +722,48 @@ impl HttpJsonRpc {
pub async fn get_block_by_hash_with_txns<T: EthSpec>( pub async fn get_block_by_hash_with_txns<T: EthSpec>(
&self, &self,
block_hash: ExecutionBlockHash, block_hash: ExecutionBlockHash,
fork: ForkName,
) -> Result<Option<ExecutionBlockWithTransactions<T>>, Error> { ) -> Result<Option<ExecutionBlockWithTransactions<T>>, Error> {
let params = json!([block_hash, true]); let params = json!([block_hash, true]);
self.rpc_request( Ok(Some(match fork {
ETH_GET_BLOCK_BY_HASH, ForkName::Merge => ExecutionBlockWithTransactions::Merge(
params, self.rpc_request(
ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, ETH_GET_BLOCK_BY_HASH,
) params,
.await ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier,
)
.await?,
),
ForkName::Capella => ExecutionBlockWithTransactions::Capella(
self.rpc_request(
ETH_GET_BLOCK_BY_HASH,
params,
ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier,
)
.await?,
),
ForkName::Eip4844 => ExecutionBlockWithTransactions::Eip4844(
self.rpc_request(
ETH_GET_BLOCK_BY_HASH,
params,
ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier,
)
.await?,
),
ForkName::Base | ForkName::Altair => {
return Err(Error::UnsupportedForkVariant(format!(
"called get_block_by_hash_with_txns with fork {:?}",
fork
)))
}
}))
} }
pub async fn new_payload_v1<T: EthSpec>( pub async fn new_payload_v1<T: EthSpec>(
&self, &self,
execution_payload: ExecutionPayload<T>, execution_payload: ExecutionPayload<T>,
) -> Result<PayloadStatusV1, Error> { ) -> Result<PayloadStatusV1, Error> {
let params = json!([JsonExecutionPayloadV1::from(execution_payload)]); let params = json!([JsonExecutionPayload::from(execution_payload)]);
let response: JsonPayloadStatusV1 = self let response: JsonPayloadStatusV1 = self
.rpc_request( .rpc_request(
@ -681,13 +776,30 @@ impl HttpJsonRpc {
Ok(response.into()) Ok(response.into())
} }
pub async fn new_payload_v2<T: EthSpec>(
&self,
execution_payload: ExecutionPayload<T>,
) -> Result<PayloadStatusV1, Error> {
let params = json!([JsonExecutionPayload::from(execution_payload)]);
let response: JsonPayloadStatusV1 = self
.rpc_request(
ENGINE_NEW_PAYLOAD_V2,
params,
ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier,
)
.await?;
Ok(response.into())
}
pub async fn get_payload_v1<T: EthSpec>( pub async fn get_payload_v1<T: EthSpec>(
&self, &self,
payload_id: PayloadId, payload_id: PayloadId,
) -> Result<ExecutionPayload<T>, Error> { ) -> Result<GetPayloadResponse<T>, Error> {
let params = json!([JsonPayloadIdRequest::from(payload_id)]); let params = json!([JsonPayloadIdRequest::from(payload_id)]);
let response: JsonExecutionPayloadV1<T> = self let payload_v1: JsonExecutionPayloadV1<T> = self
.rpc_request( .rpc_request(
ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V1,
params, params,
@ -695,17 +807,74 @@ impl HttpJsonRpc {
) )
.await?; .await?;
Ok(response.into()) Ok(GetPayloadResponse::Merge(GetPayloadResponseMerge {
execution_payload: payload_v1.into(),
// Set the V1 payload values from the EE to be zero. This simulates
// the pre-block-value functionality of always choosing the builder
// block.
block_value: Uint256::zero(),
}))
}
pub async fn get_payload_v2<T: EthSpec>(
&self,
fork_name: ForkName,
payload_id: PayloadId,
) -> Result<GetPayloadResponse<T>, Error> {
let params = json!([JsonPayloadIdRequest::from(payload_id)]);
match fork_name {
ForkName::Merge => {
let response: JsonGetPayloadResponseV1<T> = self
.rpc_request(
ENGINE_GET_PAYLOAD_V2,
params,
ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier,
)
.await?;
Ok(JsonGetPayloadResponse::V1(response).into())
}
ForkName::Capella => {
let response: JsonGetPayloadResponseV2<T> = self
.rpc_request(
ENGINE_GET_PAYLOAD_V2,
params,
ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier,
)
.await?;
Ok(JsonGetPayloadResponse::V2(response).into())
}
ForkName::Base | ForkName::Altair | ForkName::Eip4844 => Err(
Error::UnsupportedForkVariant(format!("called get_payload_v2 with {}", fork_name)),
),
}
}
pub async fn get_blobs_bundle_v1<T: EthSpec>(
&self,
payload_id: PayloadId,
) -> Result<JsonBlobBundles<T>, Error> {
let params = json!([JsonPayloadIdRequest::from(payload_id)]);
let response: JsonBlobBundles<T> = self
.rpc_request(
ENGINE_GET_BLOBS_BUNDLE_V1,
params,
ENGINE_GET_BLOBS_BUNDLE_TIMEOUT,
)
.await?;
Ok(response)
} }
pub async fn forkchoice_updated_v1( pub async fn forkchoice_updated_v1(
&self, &self,
forkchoice_state: ForkChoiceState, forkchoice_state: ForkchoiceState,
payload_attributes: Option<PayloadAttributes>, payload_attributes: Option<PayloadAttributes>,
) -> Result<ForkchoiceUpdatedResponse, Error> { ) -> Result<ForkchoiceUpdatedResponse, Error> {
let params = json!([ let params = json!([
JsonForkChoiceStateV1::from(forkchoice_state), JsonForkchoiceStateV1::from(forkchoice_state),
payload_attributes.map(JsonPayloadAttributesV1::from) payload_attributes.map(JsonPayloadAttributes::from)
]); ]);
let response: JsonForkchoiceUpdatedV1Response = self let response: JsonForkchoiceUpdatedV1Response = self
@ -719,6 +888,27 @@ impl HttpJsonRpc {
Ok(response.into()) Ok(response.into())
} }
pub async fn forkchoice_updated_v2(
&self,
forkchoice_state: ForkchoiceState,
payload_attributes: Option<PayloadAttributes>,
) -> Result<ForkchoiceUpdatedResponse, Error> {
let params = json!([
JsonForkchoiceStateV1::from(forkchoice_state),
payload_attributes.map(JsonPayloadAttributes::from)
]);
let response: JsonForkchoiceUpdatedV1Response = self
.rpc_request(
ENGINE_FORKCHOICE_UPDATED_V2,
params,
ENGINE_FORKCHOICE_UPDATED_TIMEOUT * self.execution_timeout_multiplier,
)
.await?;
Ok(response.into())
}
pub async fn exchange_transition_configuration_v1( pub async fn exchange_transition_configuration_v1(
&self, &self,
transition_configuration: TransitionConfigurationV1, transition_configuration: TransitionConfigurationV1,
@ -736,6 +926,118 @@ impl HttpJsonRpc {
Ok(response) Ok(response)
} }
pub async fn exchange_capabilities(&self) -> Result<EngineCapabilities, Error> {
let params = json!([LIGHTHOUSE_CAPABILITIES]);
let response: Result<HashSet<String>, _> = self
.rpc_request(
ENGINE_EXCHANGE_CAPABILITIES,
params,
ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT * self.execution_timeout_multiplier,
)
.await;
match response {
// TODO (mark): rip this out once we are post capella on mainnet
Err(error) => match error {
Error::ServerMessage { code, message: _ } if code == METHOD_NOT_FOUND_CODE => {
Ok(PRE_CAPELLA_ENGINE_CAPABILITIES)
}
_ => Err(error),
},
Ok(capabilities) => Ok(EngineCapabilities {
new_payload_v1: capabilities.contains(ENGINE_NEW_PAYLOAD_V1),
new_payload_v2: capabilities.contains(ENGINE_NEW_PAYLOAD_V2),
forkchoice_updated_v1: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V1),
forkchoice_updated_v2: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V2),
get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1),
get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2),
exchange_transition_configuration_v1: capabilities
.contains(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1),
}),
}
}
pub async fn clear_exchange_capabilties_cache(&self) {
*self.engine_capabilities_cache.lock().await = None;
}
/// Returns the execution engine capabilities resulting from a call to
/// engine_exchangeCapabilities. If the capabilities cache is not populated,
/// or if it is populated with a cached result of age >= `age_limit`, this
/// method will fetch the result from the execution engine and populate the
/// cache before returning it. Otherwise it will return a cached result from
/// a previous call.
///
/// Set `age_limit` to `None` to always return the cached result
/// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE
pub async fn get_engine_capabilities(
&self,
age_limit: Option<Duration>,
) -> Result<EngineCapabilities, Error> {
let mut lock = self.engine_capabilities_cache.lock().await;
if let Some(lock) = lock.as_ref().filter(|entry| !entry.older_than(age_limit)) {
Ok(lock.engine_capabilities())
} else {
let engine_capabilities = self.exchange_capabilities().await?;
*lock = Some(CapabilitiesCacheEntry::new(engine_capabilities));
Ok(engine_capabilities)
}
}
// automatically selects the latest version of
// new_payload that the execution engine supports
pub async fn new_payload<T: EthSpec>(
&self,
execution_payload: ExecutionPayload<T>,
) -> Result<PayloadStatusV1, Error> {
let engine_capabilities = self.get_engine_capabilities(None).await?;
if engine_capabilities.new_payload_v2 {
self.new_payload_v2(execution_payload).await
} else if engine_capabilities.new_payload_v1 {
self.new_payload_v1(execution_payload).await
} else {
Err(Error::RequiredMethodUnsupported("engine_newPayload"))
}
}
// automatically selects the latest version of
// get_payload that the execution engine supports
pub async fn get_payload<T: EthSpec>(
&self,
fork_name: ForkName,
payload_id: PayloadId,
) -> Result<GetPayloadResponse<T>, Error> {
let engine_capabilities = self.get_engine_capabilities(None).await?;
if engine_capabilities.get_payload_v2 {
self.get_payload_v2(fork_name, payload_id).await
} else if engine_capabilities.new_payload_v1 {
self.get_payload_v1(payload_id).await
} else {
Err(Error::RequiredMethodUnsupported("engine_getPayload"))
}
}
// automatically selects the latest version of
// forkchoice_updated that the execution engine supports
pub async fn forkchoice_updated(
&self,
forkchoice_state: ForkchoiceState,
payload_attributes: Option<PayloadAttributes>,
) -> Result<ForkchoiceUpdatedResponse, Error> {
let engine_capabilities = self.get_engine_capabilities(None).await?;
if engine_capabilities.forkchoice_updated_v2 {
self.forkchoice_updated_v2(forkchoice_state, payload_attributes)
.await
} else if engine_capabilities.forkchoice_updated_v1 {
self.forkchoice_updated_v1(forkchoice_state, payload_attributes)
.await
} else {
Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated"))
}
}
} }
#[cfg(test)] #[cfg(test)]
@ -746,7 +1048,7 @@ mod test {
use std::future::Future; use std::future::Future;
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use types::{MainnetEthSpec, Transactions, Unsigned, VariableList}; use types::{ExecutionPayloadMerge, MainnetEthSpec, Transactions, Unsigned, VariableList};
struct Tester { struct Tester {
server: MockServer<MainnetEthSpec>, server: MockServer<MainnetEthSpec>,
@ -852,10 +1154,10 @@ mod test {
fn encode_transactions<E: EthSpec>( fn encode_transactions<E: EthSpec>(
transactions: Transactions<E>, transactions: Transactions<E>,
) -> Result<serde_json::Value, serde_json::Error> { ) -> Result<serde_json::Value, serde_json::Error> {
let ep: JsonExecutionPayloadV1<E> = JsonExecutionPayloadV1 { let ep: JsonExecutionPayload<E> = JsonExecutionPayload::V1(JsonExecutionPayloadV1 {
transactions, transactions,
..<_>::default() ..<_>::default()
}; });
let json = serde_json::to_value(&ep)?; let json = serde_json::to_value(&ep)?;
Ok(json.get("transactions").unwrap().clone()) Ok(json.get("transactions").unwrap().clone())
} }
@ -882,8 +1184,8 @@ mod test {
json.as_object_mut() json.as_object_mut()
.unwrap() .unwrap()
.insert("transactions".into(), transactions); .insert("transactions".into(), transactions);
let ep: JsonExecutionPayloadV1<E> = serde_json::from_value(json)?; let ep: JsonExecutionPayload<E> = serde_json::from_value(json)?;
Ok(ep.transactions) Ok(ep.transactions().clone())
} }
fn assert_transactions_serde<E: EthSpec>( fn assert_transactions_serde<E: EthSpec>(
@ -1029,16 +1331,16 @@ mod test {
|client| async move { |client| async move {
let _ = client let _ = client
.forkchoice_updated_v1( .forkchoice_updated_v1(
ForkChoiceState { ForkchoiceState {
head_block_hash: ExecutionBlockHash::repeat_byte(1), head_block_hash: ExecutionBlockHash::repeat_byte(1),
safe_block_hash: ExecutionBlockHash::repeat_byte(1), safe_block_hash: ExecutionBlockHash::repeat_byte(1),
finalized_block_hash: ExecutionBlockHash::zero(), finalized_block_hash: ExecutionBlockHash::zero(),
}, },
Some(PayloadAttributes { Some(PayloadAttributes::V1(PayloadAttributesV1 {
timestamp: 5, timestamp: 5,
prev_randao: Hash256::zero(), prev_randao: Hash256::zero(),
suggested_fee_recipient: Address::repeat_byte(0), suggested_fee_recipient: Address::repeat_byte(0),
}), })),
) )
.await; .await;
}, },
@ -1064,16 +1366,16 @@ mod test {
.assert_auth_failure(|client| async move { .assert_auth_failure(|client| async move {
client client
.forkchoice_updated_v1( .forkchoice_updated_v1(
ForkChoiceState { ForkchoiceState {
head_block_hash: ExecutionBlockHash::repeat_byte(1), head_block_hash: ExecutionBlockHash::repeat_byte(1),
safe_block_hash: ExecutionBlockHash::repeat_byte(1), safe_block_hash: ExecutionBlockHash::repeat_byte(1),
finalized_block_hash: ExecutionBlockHash::zero(), finalized_block_hash: ExecutionBlockHash::zero(),
}, },
Some(PayloadAttributes { Some(PayloadAttributes::V1(PayloadAttributesV1 {
timestamp: 5, timestamp: 5,
prev_randao: Hash256::zero(), prev_randao: Hash256::zero(),
suggested_fee_recipient: Address::repeat_byte(0), suggested_fee_recipient: Address::repeat_byte(0),
}), })),
) )
.await .await
}) })
@ -1109,22 +1411,24 @@ mod test {
.assert_request_equals( .assert_request_equals(
|client| async move { |client| async move {
let _ = client let _ = client
.new_payload_v1::<MainnetEthSpec>(ExecutionPayload { .new_payload_v1::<MainnetEthSpec>(ExecutionPayload::Merge(
parent_hash: ExecutionBlockHash::repeat_byte(0), ExecutionPayloadMerge {
fee_recipient: Address::repeat_byte(1), parent_hash: ExecutionBlockHash::repeat_byte(0),
state_root: Hash256::repeat_byte(1), fee_recipient: Address::repeat_byte(1),
receipts_root: Hash256::repeat_byte(0), state_root: Hash256::repeat_byte(1),
logs_bloom: vec![1; 256].into(), receipts_root: Hash256::repeat_byte(0),
prev_randao: Hash256::repeat_byte(1), logs_bloom: vec![1; 256].into(),
block_number: 0, prev_randao: Hash256::repeat_byte(1),
gas_limit: 1, block_number: 0,
gas_used: 2, gas_limit: 1,
timestamp: 42, gas_used: 2,
extra_data: vec![].into(), timestamp: 42,
base_fee_per_gas: Uint256::from(1), extra_data: vec![].into(),
block_hash: ExecutionBlockHash::repeat_byte(1), base_fee_per_gas: Uint256::from(1),
transactions: vec![].into(), block_hash: ExecutionBlockHash::repeat_byte(1),
}) transactions: vec![].into(),
},
))
.await; .await;
}, },
json!({ json!({
@ -1154,22 +1458,24 @@ mod test {
Tester::new(false) Tester::new(false)
.assert_auth_failure(|client| async move { .assert_auth_failure(|client| async move {
client client
.new_payload_v1::<MainnetEthSpec>(ExecutionPayload { .new_payload_v1::<MainnetEthSpec>(ExecutionPayload::Merge(
parent_hash: ExecutionBlockHash::repeat_byte(0), ExecutionPayloadMerge {
fee_recipient: Address::repeat_byte(1), parent_hash: ExecutionBlockHash::repeat_byte(0),
state_root: Hash256::repeat_byte(1), fee_recipient: Address::repeat_byte(1),
receipts_root: Hash256::repeat_byte(0), state_root: Hash256::repeat_byte(1),
logs_bloom: vec![1; 256].into(), receipts_root: Hash256::repeat_byte(0),
prev_randao: Hash256::repeat_byte(1), logs_bloom: vec![1; 256].into(),
block_number: 0, prev_randao: Hash256::repeat_byte(1),
gas_limit: 1, block_number: 0,
gas_used: 2, gas_limit: 1,
timestamp: 42, gas_used: 2,
extra_data: vec![].into(), timestamp: 42,
base_fee_per_gas: Uint256::from(1), extra_data: vec![].into(),
block_hash: ExecutionBlockHash::repeat_byte(1), base_fee_per_gas: Uint256::from(1),
transactions: vec![].into(), block_hash: ExecutionBlockHash::repeat_byte(1),
}) transactions: vec![].into(),
},
))
.await .await
}) })
.await; .await;
@ -1182,7 +1488,7 @@ mod test {
|client| async move { |client| async move {
let _ = client let _ = client
.forkchoice_updated_v1( .forkchoice_updated_v1(
ForkChoiceState { ForkchoiceState {
head_block_hash: ExecutionBlockHash::repeat_byte(0), head_block_hash: ExecutionBlockHash::repeat_byte(0),
safe_block_hash: ExecutionBlockHash::repeat_byte(0), safe_block_hash: ExecutionBlockHash::repeat_byte(0),
finalized_block_hash: ExecutionBlockHash::repeat_byte(1), finalized_block_hash: ExecutionBlockHash::repeat_byte(1),
@ -1208,7 +1514,7 @@ mod test {
.assert_auth_failure(|client| async move { .assert_auth_failure(|client| async move {
client client
.forkchoice_updated_v1( .forkchoice_updated_v1(
ForkChoiceState { ForkchoiceState {
head_block_hash: ExecutionBlockHash::repeat_byte(0), head_block_hash: ExecutionBlockHash::repeat_byte(0),
safe_block_hash: ExecutionBlockHash::repeat_byte(0), safe_block_hash: ExecutionBlockHash::repeat_byte(0),
finalized_block_hash: ExecutionBlockHash::repeat_byte(1), finalized_block_hash: ExecutionBlockHash::repeat_byte(1),
@ -1247,16 +1553,16 @@ mod test {
|client| async move { |client| async move {
let _ = client let _ = client
.forkchoice_updated_v1( .forkchoice_updated_v1(
ForkChoiceState { ForkchoiceState {
head_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), head_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(),
safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(),
finalized_block_hash: ExecutionBlockHash::zero(), finalized_block_hash: ExecutionBlockHash::zero(),
}, },
Some(PayloadAttributes { Some(PayloadAttributes::V1(PayloadAttributesV1 {
timestamp: 5, timestamp: 5,
prev_randao: Hash256::zero(), prev_randao: Hash256::zero(),
suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(),
}) }))
) )
.await; .await;
}, },
@ -1294,16 +1600,16 @@ mod test {
|client| async move { |client| async move {
let response = client let response = client
.forkchoice_updated_v1( .forkchoice_updated_v1(
ForkChoiceState { ForkchoiceState {
head_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), head_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(),
safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(),
finalized_block_hash: ExecutionBlockHash::zero(), finalized_block_hash: ExecutionBlockHash::zero(),
}, },
Some(PayloadAttributes { Some(PayloadAttributes::V1(PayloadAttributesV1 {
timestamp: 5, timestamp: 5,
prev_randao: Hash256::zero(), prev_randao: Hash256::zero(),
suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(),
}) }))
) )
.await .await
.unwrap(); .unwrap();
@ -1357,12 +1663,13 @@ mod test {
} }
})], })],
|client| async move { |client| async move {
let payload = client let payload: ExecutionPayload<_> = client
.get_payload_v1::<MainnetEthSpec>(str_to_payload_id("0xa247243752eb10b4")) .get_payload_v1::<MainnetEthSpec>(str_to_payload_id("0xa247243752eb10b4"))
.await .await
.unwrap(); .unwrap()
.into();
let expected = ExecutionPayload { let expected = ExecutionPayload::Merge(ExecutionPayloadMerge {
parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(),
fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(),
state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(),
@ -1377,7 +1684,7 @@ mod test {
base_fee_per_gas: Uint256::from(7), base_fee_per_gas: Uint256::from(7),
block_hash: ExecutionBlockHash::from_str("0x6359b8381a370e2f54072a5784ddd78b6ed024991558c511d4452eb4f6ac898c").unwrap(), block_hash: ExecutionBlockHash::from_str("0x6359b8381a370e2f54072a5784ddd78b6ed024991558c511d4452eb4f6ac898c").unwrap(),
transactions: vec![].into(), transactions: vec![].into(),
}; });
assert_eq!(payload, expected); assert_eq!(payload, expected);
}, },
@ -1387,7 +1694,7 @@ mod test {
// engine_newPayloadV1 REQUEST validation // engine_newPayloadV1 REQUEST validation
|client| async move { |client| async move {
let _ = client let _ = client
.new_payload_v1::<MainnetEthSpec>(ExecutionPayload { .new_payload_v1::<MainnetEthSpec>(ExecutionPayload::Merge(ExecutionPayloadMerge{
parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(),
fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(),
state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(),
@ -1402,7 +1709,7 @@ mod test {
base_fee_per_gas: Uint256::from(7), base_fee_per_gas: Uint256::from(7),
block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(),
transactions: vec![].into(), transactions: vec![].into(),
}) }))
.await; .await;
}, },
json!({ json!({
@ -1441,7 +1748,7 @@ mod test {
})], })],
|client| async move { |client| async move {
let response = client let response = client
.new_payload_v1::<MainnetEthSpec>(ExecutionPayload::default()) .new_payload_v1::<MainnetEthSpec>(ExecutionPayload::Merge(ExecutionPayloadMerge::default()))
.await .await
.unwrap(); .unwrap();
@ -1460,7 +1767,7 @@ mod test {
|client| async move { |client| async move {
let _ = client let _ = client
.forkchoice_updated_v1( .forkchoice_updated_v1(
ForkChoiceState { ForkchoiceState {
head_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), head_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(),
safe_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(),
finalized_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), finalized_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(),
@ -1499,7 +1806,7 @@ mod test {
|client| async move { |client| async move {
let response = client let response = client
.forkchoice_updated_v1( .forkchoice_updated_v1(
ForkChoiceState { ForkchoiceState {
head_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), head_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(),
safe_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(),
finalized_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), finalized_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(),

View File

@ -1,7 +1,14 @@
use super::*; use super::*;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use strum::EnumString; use strum::EnumString;
use types::{EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList}; use superstruct::superstruct;
use types::{
Blob, EthSpec, ExecutionBlockHash, FixedVector, KzgCommitment, Transaction, Unsigned,
VariableList, Withdrawal,
};
use types::{
ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge,
};
#[derive(Debug, PartialEq, Serialize, Deserialize)] #[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
@ -56,9 +63,18 @@ pub struct JsonPayloadIdResponse {
pub payload_id: PayloadId, pub payload_id: PayloadId,
} }
#[derive(Debug, PartialEq, Default, Serialize, Deserialize)] #[superstruct(
#[serde(bound = "T: EthSpec", rename_all = "camelCase")] variants(V1, V2, V3),
pub struct JsonExecutionPayloadHeaderV1<T: EthSpec> { variant_attributes(
derive(Debug, PartialEq, Default, Serialize, Deserialize,),
serde(bound = "T: EthSpec", rename_all = "camelCase"),
),
cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"),
partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant")
)]
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(bound = "T: EthSpec", rename_all = "camelCase", untagged)]
pub struct JsonExecutionPayload<T: EthSpec> {
pub parent_hash: ExecutionBlockHash, pub parent_hash: ExecutionBlockHash,
pub fee_recipient: Address, pub fee_recipient: Address,
pub state_root: Hash256, pub state_root: Hash256,
@ -78,210 +94,342 @@ pub struct JsonExecutionPayloadHeaderV1<T: EthSpec> {
pub extra_data: VariableList<u8, T::MaxExtraDataBytes>, pub extra_data: VariableList<u8, T::MaxExtraDataBytes>,
#[serde(with = "eth2_serde_utils::u256_hex_be")] #[serde(with = "eth2_serde_utils::u256_hex_be")]
pub base_fee_per_gas: Uint256, pub base_fee_per_gas: Uint256,
pub block_hash: ExecutionBlockHash, #[superstruct(only(V3))]
pub transactions_root: Hash256,
}
impl<T: EthSpec> From<JsonExecutionPayloadHeaderV1<T>> for ExecutionPayloadHeader<T> {
fn from(e: JsonExecutionPayloadHeaderV1<T>) -> Self {
// Use this verbose deconstruction pattern to ensure no field is left unused.
let JsonExecutionPayloadHeaderV1 {
parent_hash,
fee_recipient,
state_root,
receipts_root,
logs_bloom,
prev_randao,
block_number,
gas_limit,
gas_used,
timestamp,
extra_data,
base_fee_per_gas,
block_hash,
transactions_root,
} = e;
Self {
parent_hash,
fee_recipient,
state_root,
receipts_root,
logs_bloom,
prev_randao,
block_number,
gas_limit,
gas_used,
timestamp,
extra_data,
base_fee_per_gas,
block_hash,
transactions_root,
}
}
}
#[derive(Debug, PartialEq, Default, Serialize, Deserialize)]
#[serde(bound = "T: EthSpec", rename_all = "camelCase")]
pub struct JsonExecutionPayloadV1<T: EthSpec> {
pub parent_hash: ExecutionBlockHash,
pub fee_recipient: Address,
pub state_root: Hash256,
pub receipts_root: Hash256,
#[serde(with = "serde_logs_bloom")]
pub logs_bloom: FixedVector<u8, T::BytesPerLogsBloom>,
pub prev_randao: Hash256,
#[serde(with = "eth2_serde_utils::u64_hex_be")]
pub block_number: u64,
#[serde(with = "eth2_serde_utils::u64_hex_be")]
pub gas_limit: u64,
#[serde(with = "eth2_serde_utils::u64_hex_be")]
pub gas_used: u64,
#[serde(with = "eth2_serde_utils::u64_hex_be")]
pub timestamp: u64,
#[serde(with = "ssz_types::serde_utils::hex_var_list")]
pub extra_data: VariableList<u8, T::MaxExtraDataBytes>,
#[serde(with = "eth2_serde_utils::u256_hex_be")] #[serde(with = "eth2_serde_utils::u256_hex_be")]
pub base_fee_per_gas: Uint256, pub excess_data_gas: Uint256,
pub block_hash: ExecutionBlockHash, pub block_hash: ExecutionBlockHash,
#[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")]
pub transactions: pub transactions:
VariableList<Transaction<T::MaxBytesPerTransaction>, T::MaxTransactionsPerPayload>, VariableList<Transaction<T::MaxBytesPerTransaction>, T::MaxTransactionsPerPayload>,
#[superstruct(only(V2, V3))]
pub withdrawals: VariableList<JsonWithdrawal, T::MaxWithdrawalsPerPayload>,
} }
impl<T: EthSpec> From<ExecutionPayload<T>> for JsonExecutionPayloadV1<T> { impl<T: EthSpec> From<ExecutionPayloadMerge<T>> for JsonExecutionPayloadV1<T> {
fn from(e: ExecutionPayload<T>) -> Self { fn from(payload: ExecutionPayloadMerge<T>) -> Self {
// Use this verbose deconstruction pattern to ensure no field is left unused. JsonExecutionPayloadV1 {
let ExecutionPayload { parent_hash: payload.parent_hash,
parent_hash, fee_recipient: payload.fee_recipient,
fee_recipient, state_root: payload.state_root,
state_root, receipts_root: payload.receipts_root,
receipts_root, logs_bloom: payload.logs_bloom,
logs_bloom, prev_randao: payload.prev_randao,
prev_randao, block_number: payload.block_number,
block_number, gas_limit: payload.gas_limit,
gas_limit, gas_used: payload.gas_used,
gas_used, timestamp: payload.timestamp,
timestamp, extra_data: payload.extra_data,
extra_data, base_fee_per_gas: payload.base_fee_per_gas,
base_fee_per_gas, block_hash: payload.block_hash,
block_hash, transactions: payload.transactions,
transactions, }
} = e; }
}
Self { impl<T: EthSpec> From<ExecutionPayloadCapella<T>> for JsonExecutionPayloadV2<T> {
parent_hash, fn from(payload: ExecutionPayloadCapella<T>) -> Self {
fee_recipient, JsonExecutionPayloadV2 {
state_root, parent_hash: payload.parent_hash,
receipts_root, fee_recipient: payload.fee_recipient,
logs_bloom, state_root: payload.state_root,
prev_randao, receipts_root: payload.receipts_root,
block_number, logs_bloom: payload.logs_bloom,
gas_limit, prev_randao: payload.prev_randao,
gas_used, block_number: payload.block_number,
timestamp, gas_limit: payload.gas_limit,
extra_data, gas_used: payload.gas_used,
base_fee_per_gas, timestamp: payload.timestamp,
block_hash, extra_data: payload.extra_data,
transactions, base_fee_per_gas: payload.base_fee_per_gas,
block_hash: payload.block_hash,
transactions: payload.transactions,
withdrawals: payload
.withdrawals
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
}
}
}
impl<T: EthSpec> From<ExecutionPayloadEip4844<T>> for JsonExecutionPayloadV3<T> {
fn from(payload: ExecutionPayloadEip4844<T>) -> Self {
JsonExecutionPayloadV3 {
parent_hash: payload.parent_hash,
fee_recipient: payload.fee_recipient,
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom,
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,
timestamp: payload.timestamp,
extra_data: payload.extra_data,
base_fee_per_gas: payload.base_fee_per_gas,
excess_data_gas: payload.excess_data_gas,
block_hash: payload.block_hash,
transactions: payload.transactions,
withdrawals: payload
.withdrawals
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
} }
} }
} }
impl<T: EthSpec> From<JsonExecutionPayloadV1<T>> for ExecutionPayload<T> { impl<T: EthSpec> From<ExecutionPayload<T>> for JsonExecutionPayload<T> {
fn from(e: JsonExecutionPayloadV1<T>) -> Self { fn from(execution_payload: ExecutionPayload<T>) -> Self {
// Use this verbose deconstruction pattern to ensure no field is left unused. match execution_payload {
let JsonExecutionPayloadV1 { ExecutionPayload::Merge(payload) => JsonExecutionPayload::V1(payload.into()),
parent_hash, ExecutionPayload::Capella(payload) => JsonExecutionPayload::V2(payload.into()),
fee_recipient, ExecutionPayload::Eip4844(payload) => JsonExecutionPayload::V3(payload.into()),
state_root, }
receipts_root, }
logs_bloom, }
prev_randao,
block_number,
gas_limit,
gas_used,
timestamp,
extra_data,
base_fee_per_gas,
block_hash,
transactions,
} = e;
Self { impl<T: EthSpec> From<JsonExecutionPayloadV1<T>> for ExecutionPayloadMerge<T> {
parent_hash, fn from(payload: JsonExecutionPayloadV1<T>) -> Self {
fee_recipient, ExecutionPayloadMerge {
state_root, parent_hash: payload.parent_hash,
receipts_root, fee_recipient: payload.fee_recipient,
logs_bloom, state_root: payload.state_root,
prev_randao, receipts_root: payload.receipts_root,
block_number, logs_bloom: payload.logs_bloom,
gas_limit, prev_randao: payload.prev_randao,
gas_used, block_number: payload.block_number,
timestamp, gas_limit: payload.gas_limit,
extra_data, gas_used: payload.gas_used,
base_fee_per_gas, timestamp: payload.timestamp,
block_hash, extra_data: payload.extra_data,
transactions, base_fee_per_gas: payload.base_fee_per_gas,
block_hash: payload.block_hash,
transactions: payload.transactions,
}
}
}
impl<T: EthSpec> From<JsonExecutionPayloadV2<T>> for ExecutionPayloadCapella<T> {
fn from(payload: JsonExecutionPayloadV2<T>) -> Self {
ExecutionPayloadCapella {
parent_hash: payload.parent_hash,
fee_recipient: payload.fee_recipient,
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom,
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,
timestamp: payload.timestamp,
extra_data: payload.extra_data,
base_fee_per_gas: payload.base_fee_per_gas,
block_hash: payload.block_hash,
transactions: payload.transactions,
withdrawals: payload
.withdrawals
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
}
}
}
impl<T: EthSpec> From<JsonExecutionPayloadV3<T>> for ExecutionPayloadEip4844<T> {
fn from(payload: JsonExecutionPayloadV3<T>) -> Self {
ExecutionPayloadEip4844 {
parent_hash: payload.parent_hash,
fee_recipient: payload.fee_recipient,
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom,
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,
timestamp: payload.timestamp,
extra_data: payload.extra_data,
base_fee_per_gas: payload.base_fee_per_gas,
excess_data_gas: payload.excess_data_gas,
block_hash: payload.block_hash,
transactions: payload.transactions,
withdrawals: payload
.withdrawals
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
}
}
}
impl<T: EthSpec> From<JsonExecutionPayload<T>> for ExecutionPayload<T> {
fn from(json_execution_payload: JsonExecutionPayload<T>) -> Self {
match json_execution_payload {
JsonExecutionPayload::V1(payload) => ExecutionPayload::Merge(payload.into()),
JsonExecutionPayload::V2(payload) => ExecutionPayload::Capella(payload.into()),
JsonExecutionPayload::V3(payload) => ExecutionPayload::Eip4844(payload.into()),
}
}
}
#[superstruct(
variants(V1, V2, V3),
variant_attributes(
derive(Debug, PartialEq, Serialize, Deserialize),
serde(bound = "T: EthSpec", rename_all = "camelCase")
),
cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"),
partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant")
)]
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(untagged)]
pub struct JsonGetPayloadResponse<T: EthSpec> {
#[superstruct(only(V1), partial_getter(rename = "execution_payload_v1"))]
pub execution_payload: JsonExecutionPayloadV1<T>,
#[superstruct(only(V2), partial_getter(rename = "execution_payload_v2"))]
pub execution_payload: JsonExecutionPayloadV2<T>,
#[superstruct(only(V3), partial_getter(rename = "execution_payload_v3"))]
pub execution_payload: JsonExecutionPayloadV3<T>,
#[serde(with = "eth2_serde_utils::u256_hex_be")]
pub block_value: Uint256,
}
impl<T: EthSpec> From<JsonGetPayloadResponse<T>> for GetPayloadResponse<T> {
fn from(json_get_payload_response: JsonGetPayloadResponse<T>) -> Self {
match json_get_payload_response {
JsonGetPayloadResponse::V1(response) => {
GetPayloadResponse::Merge(GetPayloadResponseMerge {
execution_payload: response.execution_payload.into(),
block_value: response.block_value,
})
}
JsonGetPayloadResponse::V2(response) => {
GetPayloadResponse::Capella(GetPayloadResponseCapella {
execution_payload: response.execution_payload.into(),
block_value: response.block_value,
})
}
JsonGetPayloadResponse::V3(response) => {
GetPayloadResponse::Eip4844(GetPayloadResponseEip4844 {
execution_payload: response.execution_payload.into(),
block_value: response.block_value,
})
}
} }
} }
} }
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct JsonPayloadAttributesV1 { pub struct JsonWithdrawal {
#[serde(with = "eth2_serde_utils::u64_hex_be")]
pub index: u64,
#[serde(with = "eth2_serde_utils::u64_hex_be")]
pub validator_index: u64,
pub address: Address,
#[serde(with = "eth2_serde_utils::u64_hex_be")]
pub amount: u64,
}
impl From<Withdrawal> for JsonWithdrawal {
fn from(withdrawal: Withdrawal) -> Self {
Self {
index: withdrawal.index,
validator_index: withdrawal.validator_index,
address: withdrawal.address,
amount: withdrawal.amount,
}
}
}
impl From<JsonWithdrawal> for Withdrawal {
fn from(jw: JsonWithdrawal) -> Self {
Self {
index: jw.index,
validator_index: jw.validator_index,
address: jw.address,
amount: jw.amount,
}
}
}
#[superstruct(
variants(V1, V2),
variant_attributes(
derive(Debug, Clone, PartialEq, Serialize, Deserialize),
serde(rename_all = "camelCase")
),
cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"),
partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant")
)]
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(untagged)]
pub struct JsonPayloadAttributes {
#[serde(with = "eth2_serde_utils::u64_hex_be")] #[serde(with = "eth2_serde_utils::u64_hex_be")]
pub timestamp: u64, pub timestamp: u64,
pub prev_randao: Hash256, pub prev_randao: Hash256,
pub suggested_fee_recipient: Address, pub suggested_fee_recipient: Address,
#[superstruct(only(V2))]
pub withdrawals: Vec<JsonWithdrawal>,
} }
impl From<PayloadAttributes> for JsonPayloadAttributesV1 { impl From<PayloadAttributes> for JsonPayloadAttributes {
fn from(p: PayloadAttributes) -> Self { fn from(payload_atributes: PayloadAttributes) -> Self {
// Use this verbose deconstruction pattern to ensure no field is left unused. match payload_atributes {
let PayloadAttributes { PayloadAttributes::V1(pa) => Self::V1(JsonPayloadAttributesV1 {
timestamp, timestamp: pa.timestamp,
prev_randao, prev_randao: pa.prev_randao,
suggested_fee_recipient, suggested_fee_recipient: pa.suggested_fee_recipient,
} = p; }),
PayloadAttributes::V2(pa) => Self::V2(JsonPayloadAttributesV2 {
Self { timestamp: pa.timestamp,
timestamp, prev_randao: pa.prev_randao,
prev_randao, suggested_fee_recipient: pa.suggested_fee_recipient,
suggested_fee_recipient, withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(),
}),
} }
} }
} }
impl From<JsonPayloadAttributesV1> for PayloadAttributes { impl From<JsonPayloadAttributes> for PayloadAttributes {
fn from(j: JsonPayloadAttributesV1) -> Self { fn from(json_payload_attributes: JsonPayloadAttributes) -> Self {
// Use this verbose deconstruction pattern to ensure no field is left unused. match json_payload_attributes {
let JsonPayloadAttributesV1 { JsonPayloadAttributes::V1(jpa) => Self::V1(PayloadAttributesV1 {
timestamp, timestamp: jpa.timestamp,
prev_randao, prev_randao: jpa.prev_randao,
suggested_fee_recipient, suggested_fee_recipient: jpa.suggested_fee_recipient,
} = j; }),
JsonPayloadAttributes::V2(jpa) => Self::V2(PayloadAttributesV2 {
Self { timestamp: jpa.timestamp,
timestamp, prev_randao: jpa.prev_randao,
prev_randao, suggested_fee_recipient: jpa.suggested_fee_recipient,
suggested_fee_recipient, withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(),
}),
} }
} }
} }
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(bound = "T: EthSpec", rename_all = "camelCase")]
pub struct JsonBlobBundles<T: EthSpec> {
pub block_hash: ExecutionBlockHash,
pub kzgs: Vec<KzgCommitment>,
pub blobs: Vec<Blob<T>>,
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct JsonForkChoiceStateV1 { pub struct JsonForkchoiceStateV1 {
pub head_block_hash: ExecutionBlockHash, pub head_block_hash: ExecutionBlockHash,
pub safe_block_hash: ExecutionBlockHash, pub safe_block_hash: ExecutionBlockHash,
pub finalized_block_hash: ExecutionBlockHash, pub finalized_block_hash: ExecutionBlockHash,
} }
impl From<ForkChoiceState> for JsonForkChoiceStateV1 { impl From<ForkchoiceState> for JsonForkchoiceStateV1 {
fn from(f: ForkChoiceState) -> Self { fn from(f: ForkchoiceState) -> Self {
// Use this verbose deconstruction pattern to ensure no field is left unused. // Use this verbose deconstruction pattern to ensure no field is left unused.
let ForkChoiceState { let ForkchoiceState {
head_block_hash, head_block_hash,
safe_block_hash, safe_block_hash,
finalized_block_hash, finalized_block_hash,
@ -295,10 +443,10 @@ impl From<ForkChoiceState> for JsonForkChoiceStateV1 {
} }
} }
impl From<JsonForkChoiceStateV1> for ForkChoiceState { impl From<JsonForkchoiceStateV1> for ForkchoiceState {
fn from(j: JsonForkChoiceStateV1) -> Self { fn from(j: JsonForkchoiceStateV1) -> Self {
// Use this verbose deconstruction pattern to ensure no field is left unused. // Use this verbose deconstruction pattern to ensure no field is left unused.
let JsonForkChoiceStateV1 { let JsonForkchoiceStateV1 {
head_block_hash, head_block_hash,
safe_block_hash, safe_block_hash,
finalized_block_hash, finalized_block_hash,

View File

@ -1,22 +1,25 @@
//! Provides generic behaviour for multiple execution engines, specifically fallback behaviour. //! Provides generic behaviour for multiple execution engines, specifically fallback behaviour.
use crate::engine_api::{ use crate::engine_api::{
Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, EngineCapabilities, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes,
PayloadId,
}; };
use crate::HttpJsonRpc; use crate::HttpJsonRpc;
use lru::LruCache; use lru::LruCache;
use slog::{debug, error, info, warn, Logger}; use slog::{debug, error, info, warn, Logger};
use std::future::Future; use std::future::Future;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration;
use task_executor::TaskExecutor; use task_executor::TaskExecutor;
use tokio::sync::{watch, Mutex, RwLock}; use tokio::sync::{watch, Mutex, RwLock};
use tokio_stream::wrappers::WatchStream; use tokio_stream::wrappers::WatchStream;
use types::{Address, ExecutionBlockHash, Hash256}; use types::ExecutionBlockHash;
/// The number of payload IDs that will be stored for each `Engine`. /// The number of payload IDs that will be stored for each `Engine`.
/// ///
/// Since the size of each value is small (~100 bytes) a large number is used for safety. /// Since the size of each value is small (~800 bytes) a large number is used for safety.
const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512; const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512;
const CACHED_ENGINE_CAPABILITIES_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes
/// Stores the remembered state of a engine. /// Stores the remembered state of a engine.
#[derive(Copy, Clone, PartialEq, Debug, Eq, Default)] #[derive(Copy, Clone, PartialEq, Debug, Eq, Default)]
@ -28,6 +31,14 @@ enum EngineStateInternal {
AuthFailed, AuthFailed,
} }
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
enum CapabilitiesCacheAction {
#[default]
None,
Update,
Clear,
}
/// A subset of the engine state to inform other services if the engine is online or offline. /// A subset of the engine state to inform other services if the engine is online or offline.
#[derive(Debug, Clone, PartialEq, Eq, Copy)] #[derive(Debug, Clone, PartialEq, Eq, Copy)]
pub enum EngineState { pub enum EngineState {
@ -88,7 +99,7 @@ impl State {
} }
#[derive(Copy, Clone, PartialEq, Debug)] #[derive(Copy, Clone, PartialEq, Debug)]
pub struct ForkChoiceState { pub struct ForkchoiceState {
pub head_block_hash: ExecutionBlockHash, pub head_block_hash: ExecutionBlockHash,
pub safe_block_hash: ExecutionBlockHash, pub safe_block_hash: ExecutionBlockHash,
pub finalized_block_hash: ExecutionBlockHash, pub finalized_block_hash: ExecutionBlockHash,
@ -97,9 +108,7 @@ pub struct ForkChoiceState {
#[derive(Hash, PartialEq, std::cmp::Eq)] #[derive(Hash, PartialEq, std::cmp::Eq)]
struct PayloadIdCacheKey { struct PayloadIdCacheKey {
pub head_block_hash: ExecutionBlockHash, pub head_block_hash: ExecutionBlockHash,
pub timestamp: u64, pub payload_attributes: PayloadAttributes,
pub prev_randao: Hash256,
pub suggested_fee_recipient: Address,
} }
#[derive(Debug)] #[derive(Debug)]
@ -115,7 +124,7 @@ pub struct Engine {
pub api: HttpJsonRpc, pub api: HttpJsonRpc,
payload_id_cache: Mutex<LruCache<PayloadIdCacheKey, PayloadId>>, payload_id_cache: Mutex<LruCache<PayloadIdCacheKey, PayloadId>>,
state: RwLock<State>, state: RwLock<State>,
latest_forkchoice_state: RwLock<Option<ForkChoiceState>>, latest_forkchoice_state: RwLock<Option<ForkchoiceState>>,
executor: TaskExecutor, executor: TaskExecutor,
log: Logger, log: Logger,
} }
@ -142,37 +151,30 @@ impl Engine {
pub async fn get_payload_id( pub async fn get_payload_id(
&self, &self,
head_block_hash: ExecutionBlockHash, head_block_hash: &ExecutionBlockHash,
timestamp: u64, payload_attributes: &PayloadAttributes,
prev_randao: Hash256,
suggested_fee_recipient: Address,
) -> Option<PayloadId> { ) -> Option<PayloadId> {
self.payload_id_cache self.payload_id_cache
.lock() .lock()
.await .await
.get(&PayloadIdCacheKey { .get(&PayloadIdCacheKey::new(head_block_hash, payload_attributes))
head_block_hash,
timestamp,
prev_randao,
suggested_fee_recipient,
})
.cloned() .cloned()
} }
pub async fn notify_forkchoice_updated( pub async fn notify_forkchoice_updated(
&self, &self,
forkchoice_state: ForkChoiceState, forkchoice_state: ForkchoiceState,
payload_attributes: Option<PayloadAttributes>, payload_attributes: Option<PayloadAttributes>,
log: &Logger, log: &Logger,
) -> Result<ForkchoiceUpdatedResponse, EngineApiError> { ) -> Result<ForkchoiceUpdatedResponse, EngineApiError> {
let response = self let response = self
.api .api
.forkchoice_updated_v1(forkchoice_state, payload_attributes) .forkchoice_updated(forkchoice_state, payload_attributes.clone())
.await?; .await?;
if let Some(payload_id) = response.payload_id { if let Some(payload_id) = response.payload_id {
if let Some(key) = if let Some(key) = payload_attributes
payload_attributes.map(|pa| PayloadIdCacheKey::new(&forkchoice_state, &pa)) .map(|pa| PayloadIdCacheKey::new(&forkchoice_state.head_block_hash, &pa))
{ {
self.payload_id_cache.lock().await.put(key, payload_id); self.payload_id_cache.lock().await.put(key, payload_id);
} else { } else {
@ -187,11 +189,11 @@ impl Engine {
Ok(response) Ok(response)
} }
async fn get_latest_forkchoice_state(&self) -> Option<ForkChoiceState> { async fn get_latest_forkchoice_state(&self) -> Option<ForkchoiceState> {
*self.latest_forkchoice_state.read().await *self.latest_forkchoice_state.read().await
} }
pub async fn set_latest_forkchoice_state(&self, state: ForkChoiceState) { pub async fn set_latest_forkchoice_state(&self, state: ForkchoiceState) {
*self.latest_forkchoice_state.write().await = Some(state); *self.latest_forkchoice_state.write().await = Some(state);
} }
@ -216,7 +218,7 @@ impl Engine {
// For simplicity, payload attributes are never included in this call. It may be // For simplicity, payload attributes are never included in this call. It may be
// reasonable to include them in the future. // reasonable to include them in the future.
if let Err(e) = self.api.forkchoice_updated_v1(forkchoice_state, None).await { if let Err(e) = self.api.forkchoice_updated(forkchoice_state, None).await {
debug!( debug!(
self.log, self.log,
"Failed to issue latest head to engine"; "Failed to issue latest head to engine";
@ -239,7 +241,7 @@ impl Engine {
/// Run the `EngineApi::upcheck` function if the node's last known state is not synced. This /// Run the `EngineApi::upcheck` function if the node's last known state is not synced. This
/// might be used to recover the node if offline. /// might be used to recover the node if offline.
pub async fn upcheck(&self) { pub async fn upcheck(&self) {
let state: EngineStateInternal = match self.api.upcheck().await { let (state, cache_action) = match self.api.upcheck().await {
Ok(()) => { Ok(()) => {
let mut state = self.state.write().await; let mut state = self.state.write().await;
if **state != EngineStateInternal::Synced { if **state != EngineStateInternal::Synced {
@ -257,12 +259,12 @@ impl Engine {
); );
} }
state.update(EngineStateInternal::Synced); state.update(EngineStateInternal::Synced);
**state (**state, CapabilitiesCacheAction::Update)
} }
Err(EngineApiError::IsSyncing) => { Err(EngineApiError::IsSyncing) => {
let mut state = self.state.write().await; let mut state = self.state.write().await;
state.update(EngineStateInternal::Syncing); state.update(EngineStateInternal::Syncing);
**state (**state, CapabilitiesCacheAction::Update)
} }
Err(EngineApiError::Auth(err)) => { Err(EngineApiError::Auth(err)) => {
error!( error!(
@ -273,7 +275,7 @@ impl Engine {
let mut state = self.state.write().await; let mut state = self.state.write().await;
state.update(EngineStateInternal::AuthFailed); state.update(EngineStateInternal::AuthFailed);
**state (**state, CapabilitiesCacheAction::Clear)
} }
Err(e) => { Err(e) => {
error!( error!(
@ -284,10 +286,30 @@ impl Engine {
let mut state = self.state.write().await; let mut state = self.state.write().await;
state.update(EngineStateInternal::Offline); state.update(EngineStateInternal::Offline);
**state // need to clear the engine capabilities cache if we detect the
// execution engine is offline as it is likely the engine is being
// updated to a newer version with new capabilities
(**state, CapabilitiesCacheAction::Clear)
} }
}; };
// do this after dropping state lock guard to avoid holding two locks at once
match cache_action {
CapabilitiesCacheAction::None => {}
CapabilitiesCacheAction::Update => {
if let Err(e) = self
.get_engine_capabilities(Some(CACHED_ENGINE_CAPABILITIES_AGE_LIMIT))
.await
{
warn!(self.log,
"Error during exchange capabilities";
"error" => ?e,
)
}
}
CapabilitiesCacheAction::Clear => self.api.clear_exchange_capabilties_cache().await,
}
debug!( debug!(
self.log, self.log,
"Execution engine upcheck complete"; "Execution engine upcheck complete";
@ -295,6 +317,22 @@ impl Engine {
); );
} }
/// Returns the execution engine capabilities resulting from a call to
/// engine_exchangeCapabilities. If the capabilities cache is not populated,
/// or if it is populated with a cached result of age >= `age_limit`, this
/// method will fetch the result from the execution engine and populate the
/// cache before returning it. Otherwise it will return a cached result from
/// a previous call.
///
/// Set `age_limit` to `None` to always return the cached result
/// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE
pub async fn get_engine_capabilities(
&self,
age_limit: Option<Duration>,
) -> Result<EngineCapabilities, EngineApiError> {
self.api.get_engine_capabilities(age_limit).await
}
/// Run `func` on the node regardless of the node's current state. /// Run `func` on the node regardless of the node's current state.
/// ///
/// ## Note /// ## Note
@ -303,7 +341,7 @@ impl Engine {
/// deadlock. /// deadlock.
pub async fn request<'a, F, G, H>(self: &'a Arc<Self>, func: F) -> Result<H, EngineError> pub async fn request<'a, F, G, H>(self: &'a Arc<Self>, func: F) -> Result<H, EngineError>
where where
F: Fn(&'a Engine) -> G, F: FnOnce(&'a Engine) -> G,
G: Future<Output = Result<H, EngineApiError>>, G: Future<Output = Result<H, EngineApiError>>,
{ {
match func(self).await { match func(self).await {
@ -348,12 +386,10 @@ impl Engine {
} }
impl PayloadIdCacheKey { impl PayloadIdCacheKey {
fn new(state: &ForkChoiceState, attributes: &PayloadAttributes) -> Self { fn new(head_block_hash: &ExecutionBlockHash, attributes: &PayloadAttributes) -> Self {
Self { Self {
head_block_hash: state.head_block_hash, head_block_hash: *head_block_hash,
timestamp: attributes.timestamp, payload_attributes: attributes.clone(),
prev_randao: attributes.prev_randao,
suggested_fee_recipient: attributes.suggested_fee_recipient,
} }
} }
} }

View File

@ -7,12 +7,13 @@
use crate::payload_cache::PayloadCache; use crate::payload_cache::PayloadCache;
use auth::{strip_prefix, Auth, JwtKey}; use auth::{strip_prefix, Auth, JwtKey};
use builder_client::BuilderHttpClient; use builder_client::BuilderHttpClient;
pub use engine_api::EngineCapabilities;
use engine_api::Error as ApiError; use engine_api::Error as ApiError;
pub use engine_api::*; pub use engine_api::*;
pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc};
use engines::{Engine, EngineError}; use engines::{Engine, EngineError};
pub use engines::{EngineState, ForkChoiceState}; pub use engines::{EngineState, ForkchoiceState};
use eth2::types::{builder_bid::SignedBuilderBid, ForkVersionedResponse}; use eth2::types::builder_bid::SignedBuilderBid;
use fork_choice::ForkchoiceUpdateParameters; use fork_choice::ForkchoiceUpdateParameters;
use lru::LruCache; use lru::LruCache;
use payload_status::process_payload_status; use payload_status::process_payload_status;
@ -35,9 +36,13 @@ use tokio::{
time::sleep, time::sleep,
}; };
use tokio_stream::wrappers::WatchStream; use tokio_stream::wrappers::WatchStream;
use tree_hash::TreeHash;
use types::{AbstractExecPayload, BeaconStateError, Blob, ExecPayload, KzgCommitment, Withdrawals};
use types::{ use types::{
BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, ForkName, BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ExecutionPayload,
ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, ForkName,
ForkVersionedResponse, ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock,
Slot, Uint256,
}; };
mod block_hash; mod block_hash;
@ -98,6 +103,13 @@ pub enum Error {
transactions_root: Hash256, transactions_root: Hash256,
}, },
InvalidJWTSecret(String), InvalidJWTSecret(String),
BeaconStateError(BeaconStateError),
}
impl From<BeaconStateError> for Error {
fn from(e: BeaconStateError) -> Self {
Error::BeaconStateError(e)
}
} }
impl From<ApiError> for Error { impl From<ApiError> for Error {
@ -106,6 +118,108 @@ impl From<ApiError> for Error {
} }
} }
pub enum BlockProposalContents<T: EthSpec, Payload: AbstractExecPayload<T>> {
Payload {
payload: Payload,
block_value: Uint256,
},
PayloadAndBlobs {
payload: Payload,
block_value: Uint256,
kzg_commitments: Vec<KzgCommitment>,
blobs: Vec<Blob<T>>,
},
}
impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Payload> {
pub fn payload(&self) -> &Payload {
match self {
Self::Payload {
payload,
block_value: _,
} => payload,
Self::PayloadAndBlobs {
payload,
block_value: _,
kzg_commitments: _,
blobs: _,
} => payload,
}
}
pub fn to_payload(self) -> Payload {
match self {
Self::Payload {
payload,
block_value: _,
} => payload,
Self::PayloadAndBlobs {
payload,
block_value: _,
kzg_commitments: _,
blobs: _,
} => payload,
}
}
pub fn kzg_commitments(&self) -> Option<&[KzgCommitment]> {
match self {
Self::Payload {
payload: _,
block_value: _,
} => None,
Self::PayloadAndBlobs {
payload: _,
block_value: _,
kzg_commitments,
blobs: _,
} => Some(kzg_commitments),
}
}
pub fn blobs(&self) -> Option<&[Blob<T>]> {
match self {
Self::Payload {
payload: _,
block_value: _,
} => None,
Self::PayloadAndBlobs {
payload: _,
block_value: _,
kzg_commitments: _,
blobs,
} => Some(blobs),
}
}
pub fn block_value(&self) -> &Uint256 {
match self {
Self::Payload {
payload: _,
block_value,
} => block_value,
Self::PayloadAndBlobs {
payload: _,
block_value,
kzg_commitments: _,
blobs: _,
} => block_value,
}
}
pub fn default_at_fork(fork_name: ForkName) -> Result<Self, BeaconStateError> {
Ok(match fork_name {
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
BlockProposalContents::Payload {
payload: Payload::default_at_fork(fork_name)?,
block_value: Uint256::zero(),
}
}
ForkName::Eip4844 => BlockProposalContents::PayloadAndBlobs {
payload: Payload::default_at_fork(fork_name)?,
block_value: Uint256::zero(),
blobs: vec![],
kzg_commitments: vec![],
},
})
}
}
#[derive(Clone, PartialEq)] #[derive(Clone, PartialEq)]
pub struct ProposerPreparationDataEntry { pub struct ProposerPreparationDataEntry {
update_epoch: Epoch, update_epoch: Epoch,
@ -290,12 +404,12 @@ impl<T: EthSpec> ExecutionLayer<T> {
&self.inner.builder &self.inner.builder
} }
/// Cache a full payload, keyed on the `tree_hash_root` of its `transactions` field. /// Cache a full payload, keyed on the `tree_hash_root` of the payload
fn cache_payload(&self, payload: &ExecutionPayload<T>) -> Option<ExecutionPayload<T>> { fn cache_payload(&self, payload: ExecutionPayloadRef<T>) -> Option<ExecutionPayload<T>> {
self.inner.payload_cache.put(payload.clone()) self.inner.payload_cache.put(payload.clone_from_ref())
} }
/// Attempt to retrieve a full payload from the payload cache by the `transactions_root`. /// Attempt to retrieve a full payload from the payload cache by the payload root
pub fn get_payload_by_root(&self, root: &Hash256) -> Option<ExecutionPayload<T>> { pub fn get_payload_by_root(&self, root: &Hash256) -> Option<ExecutionPayload<T>> {
self.inner.payload_cache.pop(root) self.inner.payload_cache.pop(root)
} }
@ -566,19 +680,15 @@ impl<T: EthSpec> ExecutionLayer<T> {
/// ///
/// The result will be returned from the first node that returns successfully. No more nodes /// The result will be returned from the first node that returns successfully. No more nodes
/// will be contacted. /// will be contacted.
#[allow(clippy::too_many_arguments)] pub async fn get_payload<Payload: AbstractExecPayload<T>>(
pub async fn get_payload<Payload: ExecPayload<T>>(
&self, &self,
parent_hash: ExecutionBlockHash, parent_hash: ExecutionBlockHash,
timestamp: u64, payload_attributes: &PayloadAttributes,
prev_randao: Hash256,
proposer_index: u64,
forkchoice_update_params: ForkchoiceUpdateParameters, forkchoice_update_params: ForkchoiceUpdateParameters,
builder_params: BuilderParams, builder_params: BuilderParams,
current_fork: ForkName,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Payload, Error> { ) -> Result<BlockProposalContents<T, Payload>, Error> {
let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await;
let payload_result = match Payload::block_type() { let payload_result = match Payload::block_type() {
BlockType::Blinded => { BlockType::Blinded => {
let _timer = metrics::start_timer_vec( let _timer = metrics::start_timer_vec(
@ -587,11 +697,10 @@ impl<T: EthSpec> ExecutionLayer<T> {
); );
self.get_blinded_payload( self.get_blinded_payload(
parent_hash, parent_hash,
timestamp, payload_attributes,
prev_randao,
suggested_fee_recipient,
forkchoice_update_params, forkchoice_update_params,
builder_params, builder_params,
current_fork,
spec, spec,
) )
.await .await
@ -603,10 +712,9 @@ impl<T: EthSpec> ExecutionLayer<T> {
); );
self.get_full_payload( self.get_full_payload(
parent_hash, parent_hash,
timestamp, payload_attributes,
prev_randao,
suggested_fee_recipient,
forkchoice_update_params, forkchoice_update_params,
current_fork,
) )
.await .await
.map(ProvenancedPayload::Local) .map(ProvenancedPayload::Local)
@ -615,7 +723,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
// Track some metrics and return the result. // Track some metrics and return the result.
match payload_result { match payload_result {
Ok(ProvenancedPayload::Local(payload)) => { Ok(ProvenancedPayload::Local(block_proposal_contents)) => {
metrics::inc_counter_vec( metrics::inc_counter_vec(
&metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME,
&[metrics::SUCCESS], &[metrics::SUCCESS],
@ -624,9 +732,9 @@ impl<T: EthSpec> ExecutionLayer<T> {
&metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE, &metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE,
&[metrics::LOCAL], &[metrics::LOCAL],
); );
Ok(payload) Ok(block_proposal_contents)
} }
Ok(ProvenancedPayload::Builder(payload)) => { Ok(ProvenancedPayload::Builder(block_proposal_contents)) => {
metrics::inc_counter_vec( metrics::inc_counter_vec(
&metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME,
&[metrics::SUCCESS], &[metrics::SUCCESS],
@ -635,7 +743,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
&metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE, &metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE,
&[metrics::BUILDER], &[metrics::BUILDER],
); );
Ok(payload) Ok(block_proposal_contents)
} }
Err(e) => { Err(e) => {
metrics::inc_counter_vec( metrics::inc_counter_vec(
@ -647,17 +755,15 @@ impl<T: EthSpec> ExecutionLayer<T> {
} }
} }
#[allow(clippy::too_many_arguments)] async fn get_blinded_payload<Payload: AbstractExecPayload<T>>(
async fn get_blinded_payload<Payload: ExecPayload<T>>(
&self, &self,
parent_hash: ExecutionBlockHash, parent_hash: ExecutionBlockHash,
timestamp: u64, payload_attributes: &PayloadAttributes,
prev_randao: Hash256,
suggested_fee_recipient: Address,
forkchoice_update_params: ForkchoiceUpdateParameters, forkchoice_update_params: ForkchoiceUpdateParameters,
builder_params: BuilderParams, builder_params: BuilderParams,
current_fork: ForkName,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<ProvenancedPayload<Payload>, Error> { ) -> Result<ProvenancedPayload<BlockProposalContents<T, Payload>>, Error> {
if let Some(builder) = self.builder() { if let Some(builder) = self.builder() {
let slot = builder_params.slot; let slot = builder_params.slot;
let pubkey = builder_params.pubkey; let pubkey = builder_params.pubkey;
@ -682,10 +788,9 @@ impl<T: EthSpec> ExecutionLayer<T> {
timed_future(metrics::GET_BLINDED_PAYLOAD_LOCAL, async { timed_future(metrics::GET_BLINDED_PAYLOAD_LOCAL, async {
self.get_full_payload_caching::<Payload>( self.get_full_payload_caching::<Payload>(
parent_hash, parent_hash,
timestamp, payload_attributes,
prev_randao,
suggested_fee_recipient,
forkchoice_update_params, forkchoice_update_params,
current_fork,
) )
.await .await
}) })
@ -701,7 +806,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
}, },
"relay_response_ms" => relay_duration.as_millis(), "relay_response_ms" => relay_duration.as_millis(),
"local_fee_recipient" => match &local_result { "local_fee_recipient" => match &local_result {
Ok(header) => format!("{:?}", header.fee_recipient()), Ok(proposal_contents) => format!("{:?}", proposal_contents.payload().fee_recipient()),
Err(_) => "request failed".to_string() Err(_) => "request failed".to_string()
}, },
"local_response_ms" => local_duration.as_millis(), "local_response_ms" => local_duration.as_millis(),
@ -715,7 +820,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
"Builder error when requesting payload"; "Builder error when requesting payload";
"info" => "falling back to local execution client", "info" => "falling back to local execution client",
"relay_error" => ?e, "relay_error" => ?e,
"local_block_hash" => ?local.block_hash(), "local_block_hash" => ?local.payload().block_hash(),
"parent_hash" => ?parent_hash, "parent_hash" => ?parent_hash,
); );
Ok(ProvenancedPayload::Local(local)) Ok(ProvenancedPayload::Local(local))
@ -725,7 +830,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
self.log(), self.log(),
"Builder did not return a payload"; "Builder did not return a payload";
"info" => "falling back to local execution client", "info" => "falling back to local execution client",
"local_block_hash" => ?local.block_hash(), "local_block_hash" => ?local.payload().block_hash(),
"parent_hash" => ?parent_hash, "parent_hash" => ?parent_hash,
); );
Ok(ProvenancedPayload::Local(local)) Ok(ProvenancedPayload::Local(local))
@ -737,22 +842,37 @@ impl<T: EthSpec> ExecutionLayer<T> {
self.log(), self.log(),
"Received local and builder payloads"; "Received local and builder payloads";
"relay_block_hash" => ?header.block_hash(), "relay_block_hash" => ?header.block_hash(),
"local_block_hash" => ?local.block_hash(), "local_block_hash" => ?local.payload().block_hash(),
"parent_hash" => ?parent_hash, "parent_hash" => ?parent_hash,
); );
let relay_value = relay.data.message.value;
let local_value = *local.block_value();
if local_value >= relay_value {
info!(
self.log(),
"Local block is more profitable than relay block";
"local_block_value" => %local_value,
"relay_value" => %relay_value
);
return Ok(ProvenancedPayload::Local(local));
}
match verify_builder_bid( match verify_builder_bid(
&relay, &relay,
parent_hash, parent_hash,
prev_randao, payload_attributes,
timestamp, Some(local.payload().block_number()),
Some(local.block_number()),
self.inner.builder_profit_threshold, self.inner.builder_profit_threshold,
current_fork,
spec, spec,
) { ) {
Ok(()) => { Ok(()) => Ok(ProvenancedPayload::Builder(
Ok(ProvenancedPayload::Builder(relay.data.message.header)) BlockProposalContents::Payload {
} payload: relay.data.message.header,
block_value: relay.data.message.value,
},
)),
Err(reason) if !reason.payload_invalid() => { Err(reason) if !reason.payload_invalid() => {
info!( info!(
self.log(), self.log(),
@ -795,20 +915,26 @@ impl<T: EthSpec> ExecutionLayer<T> {
match verify_builder_bid( match verify_builder_bid(
&relay, &relay,
parent_hash, parent_hash,
prev_randao, payload_attributes,
timestamp,
None, None,
self.inner.builder_profit_threshold, self.inner.builder_profit_threshold,
current_fork,
spec, spec,
) { ) {
Ok(()) => { Ok(()) => Ok(ProvenancedPayload::Builder(
Ok(ProvenancedPayload::Builder(relay.data.message.header)) BlockProposalContents::Payload {
} payload: relay.data.message.header,
block_value: relay.data.message.value,
},
)),
// If the payload is valid then use it. The local EE failed // If the payload is valid then use it. The local EE failed
// to produce a payload so we have no alternative. // to produce a payload so we have no alternative.
Err(e) if !e.payload_invalid() => { Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder(
Ok(ProvenancedPayload::Builder(relay.data.message.header)) BlockProposalContents::Payload {
} payload: relay.data.message.header,
block_value: relay.data.message.value,
},
)),
Err(reason) => { Err(reason) => {
metrics::inc_counter_vec( metrics::inc_counter_vec(
&metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS,
@ -871,76 +997,62 @@ impl<T: EthSpec> ExecutionLayer<T> {
} }
self.get_full_payload_caching( self.get_full_payload_caching(
parent_hash, parent_hash,
timestamp, payload_attributes,
prev_randao,
suggested_fee_recipient,
forkchoice_update_params, forkchoice_update_params,
current_fork,
) )
.await .await
.map(ProvenancedPayload::Local) .map(ProvenancedPayload::Local)
} }
/// Get a full payload without caching its result in the execution layer's payload cache. /// Get a full payload without caching its result in the execution layer's payload cache.
async fn get_full_payload<Payload: ExecPayload<T>>( async fn get_full_payload<Payload: AbstractExecPayload<T>>(
&self, &self,
parent_hash: ExecutionBlockHash, parent_hash: ExecutionBlockHash,
timestamp: u64, payload_attributes: &PayloadAttributes,
prev_randao: Hash256,
suggested_fee_recipient: Address,
forkchoice_update_params: ForkchoiceUpdateParameters, forkchoice_update_params: ForkchoiceUpdateParameters,
) -> Result<Payload, Error> { current_fork: ForkName,
) -> Result<BlockProposalContents<T, Payload>, Error> {
self.get_full_payload_with( self.get_full_payload_with(
parent_hash, parent_hash,
timestamp, payload_attributes,
prev_randao,
suggested_fee_recipient,
forkchoice_update_params, forkchoice_update_params,
current_fork,
noop, noop,
) )
.await .await
} }
/// Get a full payload and cache its result in the execution layer's payload cache. /// Get a full payload and cache its result in the execution layer's payload cache.
async fn get_full_payload_caching<Payload: ExecPayload<T>>( async fn get_full_payload_caching<Payload: AbstractExecPayload<T>>(
&self, &self,
parent_hash: ExecutionBlockHash, parent_hash: ExecutionBlockHash,
timestamp: u64, payload_attributes: &PayloadAttributes,
prev_randao: Hash256,
suggested_fee_recipient: Address,
forkchoice_update_params: ForkchoiceUpdateParameters, forkchoice_update_params: ForkchoiceUpdateParameters,
) -> Result<Payload, Error> { current_fork: ForkName,
) -> Result<BlockProposalContents<T, Payload>, Error> {
self.get_full_payload_with( self.get_full_payload_with(
parent_hash, parent_hash,
timestamp, payload_attributes,
prev_randao,
suggested_fee_recipient,
forkchoice_update_params, forkchoice_update_params,
current_fork,
Self::cache_payload, Self::cache_payload,
) )
.await .await
} }
async fn get_full_payload_with<Payload: ExecPayload<T>>( async fn get_full_payload_with<Payload: AbstractExecPayload<T>>(
&self, &self,
parent_hash: ExecutionBlockHash, parent_hash: ExecutionBlockHash,
timestamp: u64, payload_attributes: &PayloadAttributes,
prev_randao: Hash256,
suggested_fee_recipient: Address,
forkchoice_update_params: ForkchoiceUpdateParameters, forkchoice_update_params: ForkchoiceUpdateParameters,
f: fn(&ExecutionLayer<T>, &ExecutionPayload<T>) -> Option<ExecutionPayload<T>>, current_fork: ForkName,
) -> Result<Payload, Error> { f: fn(&ExecutionLayer<T>, ExecutionPayloadRef<T>) -> Option<ExecutionPayload<T>>,
debug!( ) -> Result<BlockProposalContents<T, Payload>, Error> {
self.log(),
"Issuing engine_getPayload";
"suggested_fee_recipient" => ?suggested_fee_recipient,
"prev_randao" => ?prev_randao,
"timestamp" => timestamp,
"parent_hash" => ?parent_hash,
);
self.engine() self.engine()
.request(|engine| async move { .request(move |engine| async move {
let payload_id = if let Some(id) = engine let payload_id = if let Some(id) = engine
.get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient) .get_payload_id(&parent_hash, payload_attributes)
.await .await
{ {
// The payload id has been cached for this engine. // The payload id has been cached for this engine.
@ -956,7 +1068,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
&metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID,
&[metrics::MISS], &[metrics::MISS],
); );
let fork_choice_state = ForkChoiceState { let fork_choice_state = ForkchoiceState {
head_block_hash: parent_hash, head_block_hash: parent_hash,
safe_block_hash: forkchoice_update_params safe_block_hash: forkchoice_update_params
.justified_hash .justified_hash
@ -965,16 +1077,11 @@ impl<T: EthSpec> ExecutionLayer<T> {
.finalized_hash .finalized_hash
.unwrap_or_else(ExecutionBlockHash::zero), .unwrap_or_else(ExecutionBlockHash::zero),
}; };
let payload_attributes = PayloadAttributes {
timestamp,
prev_randao,
suggested_fee_recipient,
};
let response = engine let response = engine
.notify_forkchoice_updated( .notify_forkchoice_updated(
fork_choice_state, fork_choice_state,
Some(payload_attributes), Some(payload_attributes.clone()),
self.log(), self.log(),
) )
.await?; .await?;
@ -994,33 +1101,73 @@ impl<T: EthSpec> ExecutionLayer<T> {
} }
}; };
engine let blob_fut = async {
.api match current_fork {
.get_payload_v1::<T>(payload_id) ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
.await None
.map(|full_payload| {
if full_payload.fee_recipient != suggested_fee_recipient {
error!(
self.log(),
"Inconsistent fee recipient";
"msg" => "The fee recipient returned from the Execution Engine differs \
from the suggested_fee_recipient set on the beacon node. This could \
indicate that fees are being diverted to another address. Please \
ensure that the value of suggested_fee_recipient is set correctly and \
that the Execution Engine is trusted.",
"fee_recipient" => ?full_payload.fee_recipient,
"suggested_fee_recipient" => ?suggested_fee_recipient,
);
} }
if f(self, &full_payload).is_some() { ForkName::Eip4844 => {
warn!( debug!(
self.log(), self.log(),
"Duplicate payload cached, this might indicate redundant proposal \ "Issuing engine_getBlobsBundle";
"suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(),
"prev_randao" => ?payload_attributes.prev_randao(),
"timestamp" => payload_attributes.timestamp(),
"parent_hash" => ?parent_hash,
);
Some(engine.api.get_blobs_bundle_v1::<T>(payload_id).await)
}
}
};
let payload_fut = async {
debug!(
self.log(),
"Issuing engine_getPayload";
"suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(),
"prev_randao" => ?payload_attributes.prev_randao(),
"timestamp" => payload_attributes.timestamp(),
"parent_hash" => ?parent_hash,
);
engine.api.get_payload::<T>(current_fork, payload_id).await
};
let (blob, payload_response) = tokio::join!(blob_fut, payload_fut);
let (execution_payload, block_value) = payload_response.map(|payload_response| {
if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() {
error!(
self.log(),
"Inconsistent fee recipient";
"msg" => "The fee recipient returned from the Execution Engine differs \
from the suggested_fee_recipient set on the beacon node. This could \
indicate that fees are being diverted to another address. Please \
ensure that the value of suggested_fee_recipient is set correctly and \
that the Execution Engine is trusted.",
"fee_recipient" => ?payload_response.execution_payload_ref().fee_recipient(),
"suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(),
);
}
if f(self, payload_response.execution_payload_ref()).is_some() {
warn!(
self.log(),
"Duplicate payload cached, this might indicate redundant proposal \
attempts." attempts."
); );
} }
full_payload.into() payload_response.into()
})?;
if let Some(blob) = blob.transpose()? {
// FIXME(sean) cache blobs
Ok(BlockProposalContents::PayloadAndBlobs {
payload: execution_payload.into(),
block_value,
blobs: blob.blobs,
kzg_commitments: blob.kzgs,
}) })
} else {
Ok(BlockProposalContents::Payload {
payload: execution_payload.into(),
block_value,
})
}
}) })
.await .await
.map_err(Box::new) .map_err(Box::new)
@ -1052,14 +1199,14 @@ impl<T: EthSpec> ExecutionLayer<T> {
trace!( trace!(
self.log(), self.log(),
"Issuing engine_newPayload"; "Issuing engine_newPayload";
"parent_hash" => ?execution_payload.parent_hash, "parent_hash" => ?execution_payload.parent_hash(),
"block_hash" => ?execution_payload.block_hash, "block_hash" => ?execution_payload.block_hash(),
"block_number" => execution_payload.block_number, "block_number" => execution_payload.block_number(),
); );
let result = self let result = self
.engine() .engine()
.request(|engine| engine.api.new_payload_v1(execution_payload.clone())) .request(|engine| engine.api.new_payload(execution_payload.clone()))
.await; .await;
if let Ok(status) = &result { if let Ok(status) = &result {
@ -1069,7 +1216,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
); );
} }
process_payload_status(execution_payload.block_hash, result, self.log()) process_payload_status(execution_payload.block_hash(), result, self.log())
.map_err(Box::new) .map_err(Box::new)
.map_err(Error::EngineError) .map_err(Error::EngineError)
} }
@ -1172,9 +1319,9 @@ impl<T: EthSpec> ExecutionLayer<T> {
let payload_attributes = self.payload_attributes(next_slot, head_block_root).await; let payload_attributes = self.payload_attributes(next_slot, head_block_root).await;
// Compute the "lookahead", the time between when the payload will be produced and now. // Compute the "lookahead", the time between when the payload will be produced and now.
if let Some(payload_attributes) = payload_attributes { if let Some(ref payload_attributes) = payload_attributes {
if let Ok(now) = SystemTime::now().duration_since(UNIX_EPOCH) { if let Ok(now) = SystemTime::now().duration_since(UNIX_EPOCH) {
let timestamp = Duration::from_secs(payload_attributes.timestamp); let timestamp = Duration::from_secs(payload_attributes.timestamp());
if let Some(lookahead) = timestamp.checked_sub(now) { if let Some(lookahead) = timestamp.checked_sub(now) {
metrics::observe_duration( metrics::observe_duration(
&metrics::EXECUTION_LAYER_PAYLOAD_ATTRIBUTES_LOOKAHEAD, &metrics::EXECUTION_LAYER_PAYLOAD_ATTRIBUTES_LOOKAHEAD,
@ -1191,7 +1338,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
} }
} }
let forkchoice_state = ForkChoiceState { let forkchoice_state = ForkchoiceState {
head_block_hash, head_block_hash,
safe_block_hash: justified_block_hash, safe_block_hash: justified_block_hash,
finalized_block_hash, finalized_block_hash,
@ -1273,6 +1420,26 @@ impl<T: EthSpec> ExecutionLayer<T> {
} }
} }
/// Returns the execution engine capabilities resulting from a call to
/// engine_exchangeCapabilities. If the capabilities cache is not populated,
/// or if it is populated with a cached result of age >= `age_limit`, this
/// method will fetch the result from the execution engine and populate the
/// cache before returning it. Otherwise it will return a cached result from
/// a previous call.
///
/// Set `age_limit` to `None` to always return the cached result
/// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE
pub async fn get_engine_capabilities(
&self,
age_limit: Option<Duration>,
) -> Result<EngineCapabilities, Error> {
self.engine()
.request(|engine| engine.get_engine_capabilities(age_limit))
.await
.map_err(Box::new)
.map_err(Error::EngineError)
}
/// Used during block production to determine if the merge has been triggered. /// Used during block production to determine if the merge has been triggered.
/// ///
/// ## Specification /// ## Specification
@ -1476,10 +1643,11 @@ impl<T: EthSpec> ExecutionLayer<T> {
pub async fn get_payload_by_block_hash( pub async fn get_payload_by_block_hash(
&self, &self,
hash: ExecutionBlockHash, hash: ExecutionBlockHash,
fork: ForkName,
) -> Result<Option<ExecutionPayload<T>>, Error> { ) -> Result<Option<ExecutionPayload<T>>, Error> {
self.engine() self.engine()
.request(|engine| async move { .request(|engine| async move {
self.get_payload_by_block_hash_from_engine(engine, hash) self.get_payload_by_block_hash_from_engine(engine, hash, fork)
.await .await
}) })
.await .await
@ -1491,14 +1659,26 @@ impl<T: EthSpec> ExecutionLayer<T> {
&self, &self,
engine: &Engine, engine: &Engine,
hash: ExecutionBlockHash, hash: ExecutionBlockHash,
fork: ForkName,
) -> Result<Option<ExecutionPayload<T>>, ApiError> { ) -> Result<Option<ExecutionPayload<T>>, ApiError> {
let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH); let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH);
if hash == ExecutionBlockHash::zero() { if hash == ExecutionBlockHash::zero() {
return Ok(Some(ExecutionPayload::default())); return match fork {
ForkName::Merge => Ok(Some(ExecutionPayloadMerge::default().into())),
ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())),
ForkName::Eip4844 => Ok(Some(ExecutionPayloadEip4844::default().into())),
ForkName::Base | ForkName::Altair => Err(ApiError::UnsupportedForkVariant(
format!("called get_payload_by_block_hash_from_engine with {}", fork),
)),
};
} }
let block = if let Some(block) = engine.api.get_block_by_hash_with_txns::<T>(hash).await? { let block = if let Some(block) = engine
.api
.get_block_by_hash_with_txns::<T>(hash, fork)
.await?
{
block block
} else { } else {
return Ok(None); return Ok(None);
@ -1506,30 +1686,91 @@ impl<T: EthSpec> ExecutionLayer<T> {
let transactions = VariableList::new( let transactions = VariableList::new(
block block
.transactions .transactions()
.into_iter() .iter()
.map(|transaction| VariableList::new(transaction.rlp().to_vec())) .map(|transaction| VariableList::new(transaction.rlp().to_vec()))
.collect::<Result<_, _>>() .collect::<Result<_, _>>()
.map_err(ApiError::DeserializeTransaction)?, .map_err(ApiError::DeserializeTransaction)?,
) )
.map_err(ApiError::DeserializeTransactions)?; .map_err(ApiError::DeserializeTransactions)?;
Ok(Some(ExecutionPayload { let payload = match block {
parent_hash: block.parent_hash, ExecutionBlockWithTransactions::Merge(merge_block) => {
fee_recipient: block.fee_recipient, ExecutionPayload::Merge(ExecutionPayloadMerge {
state_root: block.state_root, parent_hash: merge_block.parent_hash,
receipts_root: block.receipts_root, fee_recipient: merge_block.fee_recipient,
logs_bloom: block.logs_bloom, state_root: merge_block.state_root,
prev_randao: block.prev_randao, receipts_root: merge_block.receipts_root,
block_number: block.block_number, logs_bloom: merge_block.logs_bloom,
gas_limit: block.gas_limit, prev_randao: merge_block.prev_randao,
gas_used: block.gas_used, block_number: merge_block.block_number,
timestamp: block.timestamp, gas_limit: merge_block.gas_limit,
extra_data: block.extra_data, gas_used: merge_block.gas_used,
base_fee_per_gas: block.base_fee_per_gas, timestamp: merge_block.timestamp,
block_hash: block.block_hash, extra_data: merge_block.extra_data,
transactions, base_fee_per_gas: merge_block.base_fee_per_gas,
})) block_hash: merge_block.block_hash,
transactions,
})
}
ExecutionBlockWithTransactions::Capella(capella_block) => {
let withdrawals = VariableList::new(
capella_block
.withdrawals
.into_iter()
.map(Into::into)
.collect(),
)
.map_err(ApiError::DeserializeWithdrawals)?;
ExecutionPayload::Capella(ExecutionPayloadCapella {
parent_hash: capella_block.parent_hash,
fee_recipient: capella_block.fee_recipient,
state_root: capella_block.state_root,
receipts_root: capella_block.receipts_root,
logs_bloom: capella_block.logs_bloom,
prev_randao: capella_block.prev_randao,
block_number: capella_block.block_number,
gas_limit: capella_block.gas_limit,
gas_used: capella_block.gas_used,
timestamp: capella_block.timestamp,
extra_data: capella_block.extra_data,
base_fee_per_gas: capella_block.base_fee_per_gas,
block_hash: capella_block.block_hash,
transactions,
withdrawals,
})
}
ExecutionBlockWithTransactions::Eip4844(eip4844_block) => {
let withdrawals = VariableList::new(
eip4844_block
.withdrawals
.into_iter()
.map(Into::into)
.collect(),
)
.map_err(ApiError::DeserializeWithdrawals)?;
ExecutionPayload::Eip4844(ExecutionPayloadEip4844 {
parent_hash: eip4844_block.parent_hash,
fee_recipient: eip4844_block.fee_recipient,
state_root: eip4844_block.state_root,
receipts_root: eip4844_block.receipts_root,
logs_bloom: eip4844_block.logs_bloom,
prev_randao: eip4844_block.prev_randao,
block_number: eip4844_block.block_number,
gas_limit: eip4844_block.gas_limit,
gas_used: eip4844_block.gas_used,
timestamp: eip4844_block.timestamp,
extra_data: eip4844_block.extra_data,
base_fee_per_gas: eip4844_block.base_fee_per_gas,
excess_data_gas: eip4844_block.excess_data_gas,
block_hash: eip4844_block.block_hash,
transactions,
withdrawals,
})
}
};
Ok(Some(payload))
} }
pub async fn propose_blinded_beacon_block( pub async fn propose_blinded_beacon_block(
@ -1565,9 +1806,9 @@ impl<T: EthSpec> ExecutionLayer<T> {
"Builder successfully revealed payload"; "Builder successfully revealed payload";
"relay_response_ms" => duration.as_millis(), "relay_response_ms" => duration.as_millis(),
"block_root" => ?block_root, "block_root" => ?block_root,
"fee_recipient" => ?payload.fee_recipient, "fee_recipient" => ?payload.fee_recipient(),
"block_hash" => ?payload.block_hash, "block_hash" => ?payload.block_hash(),
"parent_hash" => ?payload.parent_hash "parent_hash" => ?payload.parent_hash()
) )
} }
Err(e) => { Err(e) => {
@ -1629,6 +1870,10 @@ enum InvalidBuilderPayload {
signature: Signature, signature: Signature,
pubkey: PublicKeyBytes, pubkey: PublicKeyBytes,
}, },
WithdrawalsRoot {
payload: Option<Hash256>,
expected: Option<Hash256>,
},
} }
impl InvalidBuilderPayload { impl InvalidBuilderPayload {
@ -1643,6 +1888,7 @@ impl InvalidBuilderPayload {
InvalidBuilderPayload::BlockNumber { .. } => true, InvalidBuilderPayload::BlockNumber { .. } => true,
InvalidBuilderPayload::Fork { .. } => true, InvalidBuilderPayload::Fork { .. } => true,
InvalidBuilderPayload::Signature { .. } => true, InvalidBuilderPayload::Signature { .. } => true,
InvalidBuilderPayload::WithdrawalsRoot { .. } => true,
} }
} }
} }
@ -1678,18 +1924,31 @@ impl fmt::Display for InvalidBuilderPayload {
"invalid payload signature {} for pubkey {}", "invalid payload signature {} for pubkey {}",
signature, pubkey signature, pubkey
), ),
InvalidBuilderPayload::WithdrawalsRoot { payload, expected } => {
let opt_string = |opt_hash: &Option<Hash256>| {
opt_hash
.map(|hash| hash.to_string())
.unwrap_or_else(|| "None".to_string())
};
write!(
f,
"payload withdrawals root was {} not {}",
opt_string(payload),
opt_string(expected)
)
}
} }
} }
} }
/// Perform some cursory, non-exhaustive validation of the bid returned from the builder. /// Perform some cursory, non-exhaustive validation of the bid returned from the builder.
fn verify_builder_bid<T: EthSpec, Payload: ExecPayload<T>>( fn verify_builder_bid<T: EthSpec, Payload: AbstractExecPayload<T>>(
bid: &ForkVersionedResponse<SignedBuilderBid<T, Payload>>, bid: &ForkVersionedResponse<SignedBuilderBid<T, Payload>>,
parent_hash: ExecutionBlockHash, parent_hash: ExecutionBlockHash,
prev_randao: Hash256, payload_attributes: &PayloadAttributes,
timestamp: u64,
block_number: Option<u64>, block_number: Option<u64>,
profit_threshold: Uint256, profit_threshold: Uint256,
current_fork: ForkName,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Box<InvalidBuilderPayload>> { ) -> Result<(), Box<InvalidBuilderPayload>> {
let is_signature_valid = bid.data.verify_signature(spec); let is_signature_valid = bid.data.verify_signature(spec);
@ -1706,6 +1965,13 @@ fn verify_builder_bid<T: EthSpec, Payload: ExecPayload<T>>(
); );
} }
let expected_withdrawals_root = payload_attributes
.withdrawals()
.ok()
.cloned()
.map(|withdrawals| Withdrawals::<T>::from(withdrawals).tree_hash_root());
let payload_withdrawals_root = header.withdrawals_root().ok();
if payload_value < profit_threshold { if payload_value < profit_threshold {
Err(Box::new(InvalidBuilderPayload::LowValue { Err(Box::new(InvalidBuilderPayload::LowValue {
profit_threshold, profit_threshold,
@ -1716,35 +1982,36 @@ fn verify_builder_bid<T: EthSpec, Payload: ExecPayload<T>>(
payload: header.parent_hash(), payload: header.parent_hash(),
expected: parent_hash, expected: parent_hash,
})) }))
} else if header.prev_randao() != prev_randao { } else if header.prev_randao() != payload_attributes.prev_randao() {
Err(Box::new(InvalidBuilderPayload::PrevRandao { Err(Box::new(InvalidBuilderPayload::PrevRandao {
payload: header.prev_randao(), payload: header.prev_randao(),
expected: prev_randao, expected: payload_attributes.prev_randao(),
})) }))
} else if header.timestamp() != timestamp { } else if header.timestamp() != payload_attributes.timestamp() {
Err(Box::new(InvalidBuilderPayload::Timestamp { Err(Box::new(InvalidBuilderPayload::Timestamp {
payload: header.timestamp(), payload: header.timestamp(),
expected: timestamp, expected: payload_attributes.timestamp(),
})) }))
} else if block_number.map_or(false, |n| n != header.block_number()) { } else if block_number.map_or(false, |n| n != header.block_number()) {
Err(Box::new(InvalidBuilderPayload::BlockNumber { Err(Box::new(InvalidBuilderPayload::BlockNumber {
payload: header.block_number(), payload: header.block_number(),
expected: block_number, expected: block_number,
})) }))
} else if !matches!(bid.version, Some(ForkName::Merge)) { } else if bid.version != Some(current_fork) {
// Once fork information is added to the payload, we will need to
// check that the local and relay payloads match. At this point, if
// we are requesting a payload at all, we have to assume this is
// the Bellatrix fork.
Err(Box::new(InvalidBuilderPayload::Fork { Err(Box::new(InvalidBuilderPayload::Fork {
payload: bid.version, payload: bid.version,
expected: ForkName::Merge, expected: current_fork,
})) }))
} else if !is_signature_valid { } else if !is_signature_valid {
Err(Box::new(InvalidBuilderPayload::Signature { Err(Box::new(InvalidBuilderPayload::Signature {
signature: bid.data.signature.clone(), signature: bid.data.signature.clone(),
pubkey: bid.data.message.pubkey, pubkey: bid.data.message.pubkey,
})) }))
} else if payload_withdrawals_root != expected_withdrawals_root {
Err(Box::new(InvalidBuilderPayload::WithdrawalsRoot {
payload: payload_withdrawals_root,
expected: expected_withdrawals_root,
}))
} else { } else {
Ok(()) Ok(())
} }
@ -1906,7 +2173,10 @@ mod test {
} }
} }
fn noop<T: EthSpec>(_: &ExecutionLayer<T>, _: &ExecutionPayload<T>) -> Option<ExecutionPayload<T>> { fn noop<T: EthSpec>(
_: &ExecutionLayer<T>,
_: ExecutionPayloadRef<T>,
) -> Option<ExecutionPayload<T>> {
None None
} }

View File

@ -1,4 +1,4 @@
use crate::engines::ForkChoiceState; use crate::engines::ForkchoiceState;
use crate::{ use crate::{
engine_api::{ engine_api::{
json_structures::{ json_structures::{
@ -12,7 +12,10 @@ use serde::{Deserialize, Serialize};
use std::collections::HashMap; use std::collections::HashMap;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use tree_hash_derive::TreeHash; use tree_hash_derive::TreeHash;
use types::{EthSpec, ExecutionBlockHash, ExecutionPayload, Hash256, Uint256}; use types::{
EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella,
ExecutionPayloadEip4844, ExecutionPayloadMerge, ForkName, Hash256, Uint256,
};
const GAS_LIMIT: u64 = 16384; const GAS_LIMIT: u64 = 16384;
const GAS_USED: u64 = GAS_LIMIT - 1; const GAS_USED: u64 = GAS_LIMIT - 1;
@ -28,21 +31,21 @@ impl<T: EthSpec> Block<T> {
pub fn block_number(&self) -> u64 { pub fn block_number(&self) -> u64 {
match self { match self {
Block::PoW(block) => block.block_number, Block::PoW(block) => block.block_number,
Block::PoS(payload) => payload.block_number, Block::PoS(payload) => payload.block_number(),
} }
} }
pub fn parent_hash(&self) -> ExecutionBlockHash { pub fn parent_hash(&self) -> ExecutionBlockHash {
match self { match self {
Block::PoW(block) => block.parent_hash, Block::PoW(block) => block.parent_hash,
Block::PoS(payload) => payload.parent_hash, Block::PoS(payload) => payload.parent_hash(),
} }
} }
pub fn block_hash(&self) -> ExecutionBlockHash { pub fn block_hash(&self) -> ExecutionBlockHash {
match self { match self {
Block::PoW(block) => block.block_hash, Block::PoW(block) => block.block_hash,
Block::PoS(payload) => payload.block_hash, Block::PoS(payload) => payload.block_hash(),
} }
} }
@ -63,33 +66,18 @@ impl<T: EthSpec> Block<T> {
timestamp: block.timestamp, timestamp: block.timestamp,
}, },
Block::PoS(payload) => ExecutionBlock { Block::PoS(payload) => ExecutionBlock {
block_hash: payload.block_hash, block_hash: payload.block_hash(),
block_number: payload.block_number, block_number: payload.block_number(),
parent_hash: payload.parent_hash, parent_hash: payload.parent_hash(),
total_difficulty, total_difficulty,
timestamp: payload.timestamp, timestamp: payload.timestamp(),
}, },
} }
} }
pub fn as_execution_block_with_tx(&self) -> Option<ExecutionBlockWithTransactions<T>> { pub fn as_execution_block_with_tx(&self) -> Option<ExecutionBlockWithTransactions<T>> {
match self { match self {
Block::PoS(payload) => Some(ExecutionBlockWithTransactions { Block::PoS(payload) => Some(payload.clone().try_into().unwrap()),
parent_hash: payload.parent_hash,
fee_recipient: payload.fee_recipient,
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom.clone(),
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,
timestamp: payload.timestamp,
extra_data: payload.extra_data.clone(),
base_fee_per_gas: payload.base_fee_per_gas,
block_hash: payload.block_hash,
transactions: vec![],
}),
Block::PoW(_) => None, Block::PoW(_) => None,
} }
} }
@ -126,6 +114,11 @@ pub struct ExecutionBlockGenerator<T: EthSpec> {
pub pending_payloads: HashMap<ExecutionBlockHash, ExecutionPayload<T>>, pub pending_payloads: HashMap<ExecutionBlockHash, ExecutionPayload<T>>,
pub next_payload_id: u64, pub next_payload_id: u64,
pub payload_ids: HashMap<PayloadId, ExecutionPayload<T>>, pub payload_ids: HashMap<PayloadId, ExecutionPayload<T>>,
/*
* Post-merge fork triggers
*/
pub shanghai_time: Option<u64>, // withdrawals
pub eip4844_time: Option<u64>, // 4844
} }
impl<T: EthSpec> ExecutionBlockGenerator<T> { impl<T: EthSpec> ExecutionBlockGenerator<T> {
@ -133,6 +126,8 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
terminal_total_difficulty: Uint256, terminal_total_difficulty: Uint256,
terminal_block_number: u64, terminal_block_number: u64,
terminal_block_hash: ExecutionBlockHash, terminal_block_hash: ExecutionBlockHash,
shanghai_time: Option<u64>,
eip4844_time: Option<u64>,
) -> Self { ) -> Self {
let mut gen = Self { let mut gen = Self {
head_block: <_>::default(), head_block: <_>::default(),
@ -145,6 +140,8 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
pending_payloads: <_>::default(), pending_payloads: <_>::default(),
next_payload_id: 0, next_payload_id: 0,
payload_ids: <_>::default(), payload_ids: <_>::default(),
shanghai_time,
eip4844_time,
}; };
gen.insert_pow_block(0).unwrap(); gen.insert_pow_block(0).unwrap();
@ -176,6 +173,16 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
} }
} }
pub fn get_fork_at_timestamp(&self, timestamp: u64) -> ForkName {
match self.eip4844_time {
Some(fork_time) if timestamp >= fork_time => ForkName::Eip4844,
_ => match self.shanghai_time {
Some(fork_time) if timestamp >= fork_time => ForkName::Capella,
_ => ForkName::Merge,
},
}
}
pub fn execution_block_by_number(&self, number: u64) -> Option<ExecutionBlock> { pub fn execution_block_by_number(&self, number: u64) -> Option<ExecutionBlock> {
self.block_by_number(number) self.block_by_number(number)
.map(|block| block.as_execution_block(self.terminal_total_difficulty)) .map(|block| block.as_execution_block(self.terminal_total_difficulty))
@ -357,7 +364,9 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
// Update the block hash after modifying the block // Update the block hash after modifying the block
match &mut block { match &mut block {
Block::PoW(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), Block::PoW(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()),
Block::PoS(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), Block::PoS(b) => {
*b.block_hash_mut() = ExecutionBlockHash::from_root(b.tree_hash_root())
}
} }
// Update head. // Update head.
@ -378,7 +387,7 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
} }
pub fn new_payload(&mut self, payload: ExecutionPayload<T>) -> PayloadStatusV1 { pub fn new_payload(&mut self, payload: ExecutionPayload<T>) -> PayloadStatusV1 {
let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash) { let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash()) {
parent parent
} else { } else {
return PayloadStatusV1 { return PayloadStatusV1 {
@ -388,7 +397,7 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
}; };
}; };
if payload.block_number != parent.block_number() + 1 { if payload.block_number() != parent.block_number() + 1 {
return PayloadStatusV1 { return PayloadStatusV1 {
status: PayloadStatusV1Status::Invalid, status: PayloadStatusV1Status::Invalid,
latest_valid_hash: Some(parent.block_hash()), latest_valid_hash: Some(parent.block_hash()),
@ -396,8 +405,8 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
}; };
} }
let valid_hash = payload.block_hash; let valid_hash = payload.block_hash();
self.pending_payloads.insert(payload.block_hash, payload); self.pending_payloads.insert(payload.block_hash(), payload);
PayloadStatusV1 { PayloadStatusV1 {
status: PayloadStatusV1Status::Valid, status: PayloadStatusV1Status::Valid,
@ -406,9 +415,11 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
} }
} }
pub fn forkchoice_updated_v1( // This function expects payload_attributes to already be validated with respect to
// the current fork [obtained by self.get_fork_at_timestamp(payload_attributes.timestamp)]
pub fn forkchoice_updated(
&mut self, &mut self,
forkchoice_state: ForkChoiceState, forkchoice_state: ForkchoiceState,
payload_attributes: Option<PayloadAttributes>, payload_attributes: Option<PayloadAttributes>,
) -> Result<JsonForkchoiceUpdatedV1Response, String> { ) -> Result<JsonForkchoiceUpdatedV1Response, String> {
if let Some(payload) = self if let Some(payload) = self
@ -462,24 +473,87 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
let id = payload_id_from_u64(self.next_payload_id); let id = payload_id_from_u64(self.next_payload_id);
self.next_payload_id += 1; self.next_payload_id += 1;
let mut execution_payload = ExecutionPayload { let mut execution_payload = match &attributes {
parent_hash: forkchoice_state.head_block_hash, PayloadAttributes::V1(pa) => ExecutionPayload::Merge(ExecutionPayloadMerge {
fee_recipient: attributes.suggested_fee_recipient, parent_hash: forkchoice_state.head_block_hash,
receipts_root: Hash256::repeat_byte(42), fee_recipient: pa.suggested_fee_recipient,
state_root: Hash256::repeat_byte(43), receipts_root: Hash256::repeat_byte(42),
logs_bloom: vec![0; 256].into(), state_root: Hash256::repeat_byte(43),
prev_randao: attributes.prev_randao, logs_bloom: vec![0; 256].into(),
block_number: parent.block_number() + 1, prev_randao: pa.prev_randao,
gas_limit: GAS_LIMIT, block_number: parent.block_number() + 1,
gas_used: GAS_USED, gas_limit: GAS_LIMIT,
timestamp: attributes.timestamp, gas_used: GAS_USED,
extra_data: "block gen was here".as_bytes().to_vec().into(), timestamp: pa.timestamp,
base_fee_per_gas: Uint256::one(), extra_data: "block gen was here".as_bytes().to_vec().into(),
block_hash: ExecutionBlockHash::zero(), base_fee_per_gas: Uint256::one(),
transactions: vec![].into(), block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
}),
PayloadAttributes::V2(pa) => {
match self.get_fork_at_timestamp(pa.timestamp) {
ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge {
parent_hash: forkchoice_state.head_block_hash,
fee_recipient: pa.suggested_fee_recipient,
receipts_root: Hash256::repeat_byte(42),
state_root: Hash256::repeat_byte(43),
logs_bloom: vec![0; 256].into(),
prev_randao: pa.prev_randao,
block_number: parent.block_number() + 1,
gas_limit: GAS_LIMIT,
gas_used: GAS_USED,
timestamp: pa.timestamp,
extra_data: "block gen was here".as_bytes().to_vec().into(),
base_fee_per_gas: Uint256::one(),
block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
}),
ForkName::Capella => {
ExecutionPayload::Capella(ExecutionPayloadCapella {
parent_hash: forkchoice_state.head_block_hash,
fee_recipient: pa.suggested_fee_recipient,
receipts_root: Hash256::repeat_byte(42),
state_root: Hash256::repeat_byte(43),
logs_bloom: vec![0; 256].into(),
prev_randao: pa.prev_randao,
block_number: parent.block_number() + 1,
gas_limit: GAS_LIMIT,
gas_used: GAS_USED,
timestamp: pa.timestamp,
extra_data: "block gen was here".as_bytes().to_vec().into(),
base_fee_per_gas: Uint256::one(),
block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
withdrawals: pa.withdrawals.clone().into(),
})
}
ForkName::Eip4844 => {
ExecutionPayload::Eip4844(ExecutionPayloadEip4844 {
parent_hash: forkchoice_state.head_block_hash,
fee_recipient: pa.suggested_fee_recipient,
receipts_root: Hash256::repeat_byte(42),
state_root: Hash256::repeat_byte(43),
logs_bloom: vec![0; 256].into(),
prev_randao: pa.prev_randao,
block_number: parent.block_number() + 1,
gas_limit: GAS_LIMIT,
gas_used: GAS_USED,
timestamp: pa.timestamp,
extra_data: "block gen was here".as_bytes().to_vec().into(),
base_fee_per_gas: Uint256::one(),
// FIXME(4844): maybe this should be set to something?
excess_data_gas: Uint256::one(),
block_hash: ExecutionBlockHash::zero(),
transactions: vec![].into(),
withdrawals: pa.withdrawals.clone().into(),
})
}
_ => unreachable!(),
}
}
}; };
execution_payload.block_hash = *execution_payload.block_hash_mut() =
ExecutionBlockHash::from_root(execution_payload.tree_hash_root()); ExecutionBlockHash::from_root(execution_payload.tree_hash_root());
self.payload_ids.insert(id, execution_payload); self.payload_ids.insert(id, execution_payload);
@ -566,6 +640,8 @@ mod test {
TERMINAL_DIFFICULTY.into(), TERMINAL_DIFFICULTY.into(),
TERMINAL_BLOCK, TERMINAL_BLOCK,
ExecutionBlockHash::zero(), ExecutionBlockHash::zero(),
None,
None,
); );
for i in 0..=TERMINAL_BLOCK { for i in 0..=TERMINAL_BLOCK {

View File

@ -1,25 +1,33 @@
use super::Context; use super::Context;
use crate::engine_api::{http::*, *}; use crate::engine_api::{http::*, *};
use crate::json_structures::*; use crate::json_structures::*;
use crate::test_utils::DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI;
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
use serde_json::Value as JsonValue; use serde_json::Value as JsonValue;
use std::sync::Arc; use std::sync::Arc;
use types::EthSpec; use types::{EthSpec, ForkName};
pub const GENERIC_ERROR_CODE: i64 = -1234;
pub const BAD_PARAMS_ERROR_CODE: i64 = -32602;
pub const UNKNOWN_PAYLOAD_ERROR_CODE: i64 = -38001;
pub const FORK_REQUEST_MISMATCH_ERROR_CODE: i64 = -32000;
pub async fn handle_rpc<T: EthSpec>( pub async fn handle_rpc<T: EthSpec>(
body: JsonValue, body: JsonValue,
ctx: Arc<Context<T>>, ctx: Arc<Context<T>>,
) -> Result<JsonValue, String> { ) -> Result<JsonValue, (String, i64)> {
*ctx.previous_request.lock() = Some(body.clone()); *ctx.previous_request.lock() = Some(body.clone());
let method = body let method = body
.get("method") .get("method")
.and_then(JsonValue::as_str) .and_then(JsonValue::as_str)
.ok_or_else(|| "missing/invalid method field".to_string())?; .ok_or_else(|| "missing/invalid method field".to_string())
.map_err(|s| (s, GENERIC_ERROR_CODE))?;
let params = body let params = body
.get("params") .get("params")
.ok_or_else(|| "missing/invalid params field".to_string())?; .ok_or_else(|| "missing/invalid params field".to_string())
.map_err(|s| (s, GENERIC_ERROR_CODE))?;
match method { match method {
ETH_SYNCING => Ok(JsonValue::Bool(false)), ETH_SYNCING => Ok(JsonValue::Bool(false)),
@ -27,7 +35,8 @@ pub async fn handle_rpc<T: EthSpec>(
let tag = params let tag = params
.get(0) .get(0)
.and_then(JsonValue::as_str) .and_then(JsonValue::as_str)
.ok_or_else(|| "missing/invalid params[0] value".to_string())?; .ok_or_else(|| "missing/invalid params[0] value".to_string())
.map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?;
match tag { match tag {
"latest" => Ok(serde_json::to_value( "latest" => Ok(serde_json::to_value(
@ -36,7 +45,10 @@ pub async fn handle_rpc<T: EthSpec>(
.latest_execution_block(), .latest_execution_block(),
) )
.unwrap()), .unwrap()),
other => Err(format!("The tag {} is not supported", other)), other => Err((
format!("The tag {} is not supported", other),
BAD_PARAMS_ERROR_CODE,
)),
} }
} }
ETH_GET_BLOCK_BY_HASH => { ETH_GET_BLOCK_BY_HASH => {
@ -47,7 +59,8 @@ pub async fn handle_rpc<T: EthSpec>(
.and_then(|s| { .and_then(|s| {
s.parse() s.parse()
.map_err(|e| format!("unable to parse hash: {:?}", e)) .map_err(|e| format!("unable to parse hash: {:?}", e))
})?; })
.map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?;
// If we have a static response set, just return that. // If we have a static response set, just return that.
if let Some(response) = *ctx.static_get_block_by_hash_response.lock() { if let Some(response) = *ctx.static_get_block_by_hash_response.lock() {
@ -57,7 +70,8 @@ pub async fn handle_rpc<T: EthSpec>(
let full_tx = params let full_tx = params
.get(1) .get(1)
.and_then(JsonValue::as_bool) .and_then(JsonValue::as_bool)
.ok_or_else(|| "missing/invalid params[1] value".to_string())?; .ok_or_else(|| "missing/invalid params[1] value".to_string())
.map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?;
if full_tx { if full_tx {
Ok(serde_json::to_value( Ok(serde_json::to_value(
ctx.execution_block_generator ctx.execution_block_generator
@ -74,18 +88,70 @@ pub async fn handle_rpc<T: EthSpec>(
.unwrap()) .unwrap())
} }
} }
ENGINE_NEW_PAYLOAD_V1 => { ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 => {
let request: JsonExecutionPayloadV1<T> = get_param(params, 0)?; let request = match method {
ENGINE_NEW_PAYLOAD_V1 => JsonExecutionPayload::V1(
get_param::<JsonExecutionPayloadV1<T>>(params, 0)
.map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?,
),
ENGINE_NEW_PAYLOAD_V2 => get_param::<JsonExecutionPayloadV2<T>>(params, 0)
.map(|jep| JsonExecutionPayload::V2(jep))
.or_else(|_| {
get_param::<JsonExecutionPayloadV1<T>>(params, 0)
.map(|jep| JsonExecutionPayload::V1(jep))
})
.map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?,
// TODO(4844) add that here..
_ => unreachable!(),
};
let fork = ctx
.execution_block_generator
.read()
.get_fork_at_timestamp(*request.timestamp());
// validate method called correctly according to shanghai fork time
match fork {
ForkName::Merge => {
if matches!(request, JsonExecutionPayload::V2(_)) {
return Err((
format!(
"{} called with `ExecutionPayloadV2` before Capella fork!",
method
),
GENERIC_ERROR_CODE,
));
}
}
ForkName::Capella => {
if method == ENGINE_NEW_PAYLOAD_V1 {
return Err((
format!("{} called after Capella fork!", method),
GENERIC_ERROR_CODE,
));
}
if matches!(request, JsonExecutionPayload::V1(_)) {
return Err((
format!(
"{} called with `ExecutionPayloadV1` after Capella fork!",
method
),
GENERIC_ERROR_CODE,
));
}
}
// TODO(4844) add 4844 error checking here
_ => unreachable!(),
};
// Canned responses set by block hash take priority. // Canned responses set by block hash take priority.
if let Some(status) = ctx.get_new_payload_status(&request.block_hash) { if let Some(status) = ctx.get_new_payload_status(request.block_hash()) {
return Ok(serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap()); return Ok(serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap());
} }
let (static_response, should_import) = let (static_response, should_import) =
if let Some(mut response) = ctx.static_new_payload_response.lock().clone() { if let Some(mut response) = ctx.static_new_payload_response.lock().clone() {
if response.status.status == PayloadStatusV1Status::Valid { if response.status.status == PayloadStatusV1Status::Valid {
response.status.latest_valid_hash = Some(request.block_hash) response.status.latest_valid_hash = Some(*request.block_hash())
} }
(Some(response.status), response.should_import) (Some(response.status), response.should_import)
@ -107,21 +173,141 @@ pub async fn handle_rpc<T: EthSpec>(
Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap()) Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap())
} }
ENGINE_GET_PAYLOAD_V1 => { ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 => {
let request: JsonPayloadIdRequest = get_param(params, 0)?; let request: JsonPayloadIdRequest =
get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?;
let id = request.into(); let id = request.into();
let response = ctx let response = ctx
.execution_block_generator .execution_block_generator
.write() .write()
.get_payload(&id) .get_payload(&id)
.ok_or_else(|| format!("no payload for id {:?}", id))?; .ok_or_else(|| {
(
format!("no payload for id {:?}", id),
UNKNOWN_PAYLOAD_ERROR_CODE,
)
})?;
Ok(serde_json::to_value(JsonExecutionPayloadV1::from(response)).unwrap()) // validate method called correctly according to shanghai fork time
if ctx
.execution_block_generator
.read()
.get_fork_at_timestamp(response.timestamp())
== ForkName::Capella
&& method == ENGINE_GET_PAYLOAD_V1
{
return Err((
format!("{} called after Capella fork!", method),
FORK_REQUEST_MISMATCH_ERROR_CODE,
));
}
// TODO(4844) add 4844 error checking here
match method {
ENGINE_GET_PAYLOAD_V1 => {
Ok(serde_json::to_value(JsonExecutionPayload::from(response)).unwrap())
}
ENGINE_GET_PAYLOAD_V2 => Ok(match JsonExecutionPayload::from(response) {
JsonExecutionPayload::V1(execution_payload) => {
serde_json::to_value(JsonGetPayloadResponseV1 {
execution_payload,
block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(),
})
.unwrap()
}
JsonExecutionPayload::V2(execution_payload) => {
serde_json::to_value(JsonGetPayloadResponseV2 {
execution_payload,
block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(),
})
.unwrap()
}
_ => unreachable!(),
}),
_ => unreachable!(),
}
} }
ENGINE_FORKCHOICE_UPDATED_V1 => { ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 => {
let forkchoice_state: JsonForkChoiceStateV1 = get_param(params, 0)?; let forkchoice_state: JsonForkchoiceStateV1 =
let payload_attributes: Option<JsonPayloadAttributesV1> = get_param(params, 1)?; get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?;
let payload_attributes = match method {
ENGINE_FORKCHOICE_UPDATED_V1 => {
let jpa1: Option<JsonPayloadAttributesV1> =
get_param(params, 1).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?;
jpa1.map(JsonPayloadAttributes::V1)
}
ENGINE_FORKCHOICE_UPDATED_V2 => {
// we can't use `deny_unknown_fields` without breaking compatibility with some
// clients that haven't updated to the latest engine_api spec. So instead we'll
// need to deserialize based on timestamp
get_param::<Option<JsonPayloadAttributes>>(params, 1)
.and_then(|pa| {
pa.and_then(|pa| {
match ctx
.execution_block_generator
.read()
.get_fork_at_timestamp(*pa.timestamp())
{
ForkName::Merge => {
get_param::<Option<JsonPayloadAttributesV1>>(params, 1)
.map(|opt| opt.map(JsonPayloadAttributes::V1))
.transpose()
}
ForkName::Capella => {
get_param::<Option<JsonPayloadAttributesV2>>(params, 1)
.map(|opt| opt.map(JsonPayloadAttributes::V2))
.transpose()
}
_ => unreachable!(),
}
})
.transpose()
})
.map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?
}
_ => unreachable!(),
};
// validate method called correctly according to shanghai fork time
if let Some(pa) = payload_attributes.as_ref() {
match ctx
.execution_block_generator
.read()
.get_fork_at_timestamp(*pa.timestamp())
{
ForkName::Merge => {
if matches!(pa, JsonPayloadAttributes::V2(_)) {
return Err((
format!(
"{} called with `JsonPayloadAttributesV2` before Capella fork!",
method
),
GENERIC_ERROR_CODE,
));
}
}
ForkName::Capella => {
if method == ENGINE_FORKCHOICE_UPDATED_V1 {
return Err((
format!("{} called after Capella fork!", method),
FORK_REQUEST_MISMATCH_ERROR_CODE,
));
}
if matches!(pa, JsonPayloadAttributes::V1(_)) {
return Err((
format!(
"{} called with `JsonPayloadAttributesV1` after Capella fork!",
method
),
FORK_REQUEST_MISMATCH_ERROR_CODE,
));
}
}
// TODO(4844) add 4844 error checking here
_ => unreachable!(),
};
}
if let Some(hook_response) = ctx if let Some(hook_response) = ctx
.hook .hook
@ -145,10 +331,11 @@ pub async fn handle_rpc<T: EthSpec>(
let mut response = ctx let mut response = ctx
.execution_block_generator .execution_block_generator
.write() .write()
.forkchoice_updated_v1( .forkchoice_updated(
forkchoice_state.into(), forkchoice_state.into(),
payload_attributes.map(|json| json.into()), payload_attributes.map(|json| json.into()),
)?; )
.map_err(|s| (s, GENERIC_ERROR_CODE))?;
if let Some(mut status) = ctx.static_forkchoice_updated_response.lock().clone() { if let Some(mut status) = ctx.static_forkchoice_updated_response.lock().clone() {
if status.status == PayloadStatusV1Status::Valid { if status.status == PayloadStatusV1Status::Valid {
@ -169,9 +356,13 @@ pub async fn handle_rpc<T: EthSpec>(
}; };
Ok(serde_json::to_value(transition_config).unwrap()) Ok(serde_json::to_value(transition_config).unwrap())
} }
other => Err(format!( ENGINE_EXCHANGE_CAPABILITIES => {
"The method {} does not exist/is not available", let engine_capabilities = ctx.engine_capabilities.read();
other Ok(serde_json::to_value(engine_capabilities.to_response()).unwrap())
}
other => Err((
format!("The method {} does not exist/is not available", other),
METHOD_NOT_FOUND_CODE,
)), )),
} }
} }

View File

@ -1,8 +1,8 @@
use crate::json_structures::*; use crate::json_structures::*;
type ForkChoiceUpdatedHook = dyn Fn( type ForkChoiceUpdatedHook = dyn Fn(
JsonForkChoiceStateV1, JsonForkchoiceStateV1,
Option<JsonPayloadAttributesV1>, Option<JsonPayloadAttributes>,
) -> Option<JsonForkchoiceUpdatedV1Response> ) -> Option<JsonForkchoiceUpdatedV1Response>
+ Send + Send
+ Sync; + Sync;
@ -15,8 +15,8 @@ pub struct Hook {
impl Hook { impl Hook {
pub fn on_forkchoice_updated( pub fn on_forkchoice_updated(
&self, &self,
state: JsonForkChoiceStateV1, state: JsonForkchoiceStateV1,
payload_attributes: Option<JsonPayloadAttributesV1>, payload_attributes: Option<JsonPayloadAttributes>,
) -> Option<JsonForkchoiceUpdatedV1Response> { ) -> Option<JsonForkchoiceUpdatedV1Response> {
(self.forkchoice_updated.as_ref()?)(state, payload_attributes) (self.forkchoice_updated.as_ref()?)(state, payload_attributes)
} }

View File

@ -1,17 +1,21 @@
use crate::test_utils::DEFAULT_JWT_SECRET; use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET};
use crate::{Config, ExecutionLayer, PayloadAttributes}; use crate::{Config, ExecutionLayer, PayloadAttributes};
use async_trait::async_trait; use async_trait::async_trait;
use eth2::types::{BlockId, StateId, ValidatorId}; use eth2::types::{BlockId, StateId, ValidatorId};
use eth2::{BeaconNodeHttpClient, Timeouts}; use eth2::{BeaconNodeHttpClient, Timeouts};
use ethereum_consensus::crypto::{SecretKey, Signature};
use ethereum_consensus::primitives::BlsPublicKey;
pub use ethereum_consensus::state_transition::Context; pub use ethereum_consensus::state_transition::Context;
use ethereum_consensus::{
crypto::{SecretKey, Signature},
primitives::{BlsPublicKey, BlsSignature, ExecutionAddress, Hash32, Root, U256},
state_transition::Error,
};
use fork_choice::ForkchoiceUpdateParameters; use fork_choice::ForkchoiceUpdateParameters;
use mev_build_rs::{ use mev_rs::{
bellatrix::{BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix},
capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella},
sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError, sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError,
BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload, BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload,
ExecutionPayloadHeader as ServerPayloadHeader, SignedBlindedBeaconBlock, SignedBuilderBid, SignedBlindedBeaconBlock, SignedBuilderBid, SignedValidatorRegistration,
SignedValidatorRegistration,
}; };
use parking_lot::RwLock; use parking_lot::RwLock;
use sensitive_url::SensitiveUrl; use sensitive_url::SensitiveUrl;
@ -26,7 +30,8 @@ use task_executor::TaskExecutor;
use tempfile::NamedTempFile; use tempfile::NamedTempFile;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::{ use types::{
Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, Hash256, Slot, Uint256, Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, ForkName, Hash256, Slot,
Uint256,
}; };
#[derive(Clone)] #[derive(Clone)]
@ -38,25 +43,129 @@ pub enum Operation {
PrevRandao(Hash256), PrevRandao(Hash256),
BlockNumber(usize), BlockNumber(usize),
Timestamp(usize), Timestamp(usize),
WithdrawalsRoot(Hash256),
} }
impl Operation { impl Operation {
fn apply(self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { fn apply<B: BidStuff>(self, bid: &mut B) -> Result<(), BlindedBlockProviderError> {
match self { match self {
Operation::FeeRecipient(fee_recipient) => { Operation::FeeRecipient(fee_recipient) => {
bid.header.fee_recipient = to_ssz_rs(&fee_recipient)? *bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)?
} }
Operation::GasLimit(gas_limit) => bid.header.gas_limit = gas_limit as u64, Operation::GasLimit(gas_limit) => *bid.gas_limit_mut() = gas_limit as u64,
Operation::Value(value) => bid.value = to_ssz_rs(&value)?, Operation::Value(value) => *bid.value_mut() = to_ssz_rs(&value)?,
Operation::ParentHash(parent_hash) => bid.header.parent_hash = to_ssz_rs(&parent_hash)?, Operation::ParentHash(parent_hash) => *bid.parent_hash_mut() = to_ssz_rs(&parent_hash)?,
Operation::PrevRandao(prev_randao) => bid.header.prev_randao = to_ssz_rs(&prev_randao)?, Operation::PrevRandao(prev_randao) => *bid.prev_randao_mut() = to_ssz_rs(&prev_randao)?,
Operation::BlockNumber(block_number) => bid.header.block_number = block_number as u64, Operation::BlockNumber(block_number) => *bid.block_number_mut() = block_number as u64,
Operation::Timestamp(timestamp) => bid.header.timestamp = timestamp as u64, Operation::Timestamp(timestamp) => *bid.timestamp_mut() = timestamp as u64,
Operation::WithdrawalsRoot(root) => *bid.withdrawals_root_mut()? = to_ssz_rs(&root)?,
} }
Ok(()) Ok(())
} }
} }
// contains functions we need for BuilderBids.. not sure what to call this
pub trait BidStuff {
fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress;
fn gas_limit_mut(&mut self) -> &mut u64;
fn value_mut(&mut self) -> &mut U256;
fn parent_hash_mut(&mut self) -> &mut Hash32;
fn prev_randao_mut(&mut self) -> &mut Hash32;
fn block_number_mut(&mut self) -> &mut u64;
fn timestamp_mut(&mut self) -> &mut u64;
fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError>;
fn sign_builder_message(
&mut self,
signing_key: &SecretKey,
context: &Context,
) -> Result<BlsSignature, Error>;
fn to_signed_bid(self, signature: BlsSignature) -> SignedBuilderBid;
}
impl BidStuff for BuilderBid {
fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress {
match self {
Self::Bellatrix(bid) => &mut bid.header.fee_recipient,
Self::Capella(bid) => &mut bid.header.fee_recipient,
}
}
fn gas_limit_mut(&mut self) -> &mut u64 {
match self {
Self::Bellatrix(bid) => &mut bid.header.gas_limit,
Self::Capella(bid) => &mut bid.header.gas_limit,
}
}
fn value_mut(&mut self) -> &mut U256 {
match self {
Self::Bellatrix(bid) => &mut bid.value,
Self::Capella(bid) => &mut bid.value,
}
}
fn parent_hash_mut(&mut self) -> &mut Hash32 {
match self {
Self::Bellatrix(bid) => &mut bid.header.parent_hash,
Self::Capella(bid) => &mut bid.header.parent_hash,
}
}
fn prev_randao_mut(&mut self) -> &mut Hash32 {
match self {
Self::Bellatrix(bid) => &mut bid.header.prev_randao,
Self::Capella(bid) => &mut bid.header.prev_randao,
}
}
fn block_number_mut(&mut self) -> &mut u64 {
match self {
Self::Bellatrix(bid) => &mut bid.header.block_number,
Self::Capella(bid) => &mut bid.header.block_number,
}
}
fn timestamp_mut(&mut self) -> &mut u64 {
match self {
Self::Bellatrix(bid) => &mut bid.header.timestamp,
Self::Capella(bid) => &mut bid.header.timestamp,
}
}
fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError> {
match self {
Self::Bellatrix(_) => Err(BlindedBlockProviderError::Custom(
"withdrawals_root called on bellatrix bid".to_string(),
)),
Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root),
}
}
fn sign_builder_message(
&mut self,
signing_key: &SecretKey,
context: &Context,
) -> Result<Signature, Error> {
match self {
Self::Bellatrix(message) => sign_builder_message(message, signing_key, context),
Self::Capella(message) => sign_builder_message(message, signing_key, context),
}
}
fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid {
match self {
Self::Bellatrix(message) => {
SignedBuilderBid::Bellatrix(SignedBuilderBidBellatrix { message, signature })
}
Self::Capella(message) => {
SignedBuilderBid::Capella(SignedBuilderBidCapella { message, signature })
}
}
}
}
pub struct TestingBuilder<E: EthSpec> { pub struct TestingBuilder<E: EthSpec> {
server: BlindedBlockProviderServer<MockBuilder<E>>, server: BlindedBlockProviderServer<MockBuilder<E>>,
pub builder: MockBuilder<E>, pub builder: MockBuilder<E>,
@ -111,7 +220,10 @@ impl<E: EthSpec> TestingBuilder<E> {
} }
pub async fn run(&self) { pub async fn run(&self) {
self.server.run().await let server = self.server.serve();
if let Err(err) = server.await {
println!("error while listening for incoming: {err}")
}
} }
} }
@ -162,7 +274,7 @@ impl<E: EthSpec> MockBuilder<E> {
*self.invalidate_signatures.write() = false; *self.invalidate_signatures.write() = false;
} }
fn apply_operations(&self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { fn apply_operations<B: BidStuff>(&self, bid: &mut B) -> Result<(), BlindedBlockProviderError> {
let mut guard = self.operations.write(); let mut guard = self.operations.write();
while let Some(op) = guard.pop() { while let Some(op) = guard.pop() {
op.apply(bid)?; op.apply(bid)?;
@ -172,7 +284,7 @@ impl<E: EthSpec> MockBuilder<E> {
} }
#[async_trait] #[async_trait]
impl<E: EthSpec> mev_build_rs::BlindedBlockProvider for MockBuilder<E> { impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
async fn register_validators( async fn register_validators(
&self, &self,
registrations: &mut [SignedValidatorRegistration], registrations: &mut [SignedValidatorRegistration],
@ -200,6 +312,7 @@ impl<E: EthSpec> mev_build_rs::BlindedBlockProvider for MockBuilder<E> {
bid_request: &BidRequest, bid_request: &BidRequest,
) -> Result<SignedBuilderBid, BlindedBlockProviderError> { ) -> Result<SignedBuilderBid, BlindedBlockProviderError> {
let slot = Slot::new(bid_request.slot); let slot = Slot::new(bid_request.slot);
let fork = self.spec.fork_name_at_slot::<E>(slot);
let signed_cached_data = self let signed_cached_data = self
.val_registration_cache .val_registration_cache
.read() .read()
@ -215,9 +328,13 @@ impl<E: EthSpec> mev_build_rs::BlindedBlockProvider for MockBuilder<E> {
.map_err(convert_err)? .map_err(convert_err)?
.ok_or_else(|| convert_err("missing head block"))?; .ok_or_else(|| convert_err("missing head block"))?;
let block = head.data.message_merge().map_err(convert_err)?; let block = head.data.message();
let head_block_root = block.tree_hash_root(); let head_block_root = block.tree_hash_root();
let head_execution_hash = block.body.execution_payload.execution_payload.block_hash; let head_execution_hash = block
.body()
.execution_payload()
.map_err(convert_err)?
.block_hash();
if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? {
return Err(BlindedBlockProviderError::Custom(format!( return Err(BlindedBlockProviderError::Custom(format!(
"head mismatch: {} {}", "head mismatch: {} {}",
@ -232,12 +349,11 @@ impl<E: EthSpec> mev_build_rs::BlindedBlockProvider for MockBuilder<E> {
.map_err(convert_err)? .map_err(convert_err)?
.ok_or_else(|| convert_err("missing finalized block"))? .ok_or_else(|| convert_err("missing finalized block"))?
.data .data
.message_merge() .message()
.body()
.execution_payload()
.map_err(convert_err)? .map_err(convert_err)?
.body .block_hash();
.execution_payload
.execution_payload
.block_hash;
let justified_execution_hash = self let justified_execution_hash = self
.beacon_client .beacon_client
@ -246,12 +362,11 @@ impl<E: EthSpec> mev_build_rs::BlindedBlockProvider for MockBuilder<E> {
.map_err(convert_err)? .map_err(convert_err)?
.ok_or_else(|| convert_err("missing finalized block"))? .ok_or_else(|| convert_err("missing finalized block"))?
.data .data
.message_merge() .message()
.body()
.execution_payload()
.map_err(convert_err)? .map_err(convert_err)?
.body .block_hash();
.execution_payload
.execution_payload
.block_hash;
let val_index = self let val_index = self
.beacon_client .beacon_client
@ -287,14 +402,22 @@ impl<E: EthSpec> mev_build_rs::BlindedBlockProvider for MockBuilder<E> {
.get_randao_mix(head_state.current_epoch()) .get_randao_mix(head_state.current_epoch())
.map_err(convert_err)?; .map_err(convert_err)?;
let payload_attributes = PayloadAttributes { let payload_attributes = match fork {
timestamp, ForkName::Merge => PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None),
prev_randao: *prev_randao, // the withdrawals root is filled in by operations
suggested_fee_recipient: fee_recipient, ForkName::Capella | ForkName::Eip4844 => {
PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![]))
}
ForkName::Base | ForkName::Altair => {
return Err(BlindedBlockProviderError::Custom(format!(
"Unsupported fork: {}",
fork
)));
}
}; };
self.el self.el
.insert_proposer(slot, head_block_root, val_index, payload_attributes) .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone())
.await; .await;
let forkchoice_update_params = ForkchoiceUpdateParameters { let forkchoice_update_params = ForkchoiceUpdateParameters {
@ -308,54 +431,64 @@ impl<E: EthSpec> mev_build_rs::BlindedBlockProvider for MockBuilder<E> {
.el .el
.get_full_payload_caching::<BlindedPayload<E>>( .get_full_payload_caching::<BlindedPayload<E>>(
head_execution_hash, head_execution_hash,
timestamp, &payload_attributes,
*prev_randao,
fee_recipient,
forkchoice_update_params, forkchoice_update_params,
fork,
) )
.await .await
.map_err(convert_err)? .map_err(convert_err)?
.to_payload()
.to_execution_payload_header(); .to_execution_payload_header();
let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; let json_payload = serde_json::to_string(&payload).map_err(convert_err)?;
let mut header: ServerPayloadHeader = let mut message = match fork {
serde_json::from_str(json_payload.as_str()).map_err(convert_err)?; ForkName::Capella => BuilderBid::Capella(BuilderBidCapella {
header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?,
header.gas_limit = cached_data.gas_limit; value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?,
public_key: self.builder_sk.public_key(),
let mut message = BuilderBid { }),
header, ForkName::Merge => BuilderBid::Bellatrix(BuilderBidBellatrix {
value: ssz_rs::U256::default(), header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?,
public_key: self.builder_sk.public_key(), value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?,
public_key: self.builder_sk.public_key(),
}),
ForkName::Base | ForkName::Altair | ForkName::Eip4844 => {
return Err(BlindedBlockProviderError::Custom(format!(
"Unsupported fork: {}",
fork
)))
}
}; };
*message.gas_limit_mut() = cached_data.gas_limit;
self.apply_operations(&mut message)?; self.apply_operations(&mut message)?;
let mut signature = let mut signature =
sign_builder_message(&mut message, &self.builder_sk, self.context.as_ref())?; message.sign_builder_message(&self.builder_sk, self.context.as_ref())?;
if *self.invalidate_signatures.read() { if *self.invalidate_signatures.read() {
signature = Signature::default(); signature = Signature::default();
} }
let signed_bid = SignedBuilderBid { message, signature }; Ok(message.to_signed_bid(signature))
Ok(signed_bid)
} }
async fn open_bid( async fn open_bid(
&self, &self,
signed_block: &mut SignedBlindedBeaconBlock, signed_block: &mut SignedBlindedBeaconBlock,
) -> Result<ServerPayload, BlindedBlockProviderError> { ) -> Result<ServerPayload, BlindedBlockProviderError> {
let node = match signed_block {
SignedBlindedBeaconBlock::Bellatrix(block) => {
block.message.body.execution_payload_header.hash_tree_root()
}
SignedBlindedBeaconBlock::Capella(block) => {
block.message.body.execution_payload_header.hash_tree_root()
}
}
.map_err(convert_err)?;
let payload = self let payload = self
.el .el
.get_payload_by_root(&from_ssz_rs( .get_payload_by_root(&from_ssz_rs(&node)?)
&signed_block
.message
.body
.execution_payload_header
.hash_tree_root()
.map_err(convert_err)?,
)?)
.ok_or_else(|| convert_err("missing payload for tx root"))?; .ok_or_else(|| convert_err("missing payload for tx root"))?;
let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; let json_payload = serde_json::to_string(&payload).map_err(convert_err)?;

View File

@ -9,7 +9,7 @@ use sensitive_url::SensitiveUrl;
use task_executor::TaskExecutor; use task_executor::TaskExecutor;
use tempfile::NamedTempFile; use tempfile::NamedTempFile;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, Uint256}; use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, MainnetEthSpec};
pub struct MockExecutionLayer<T: EthSpec> { pub struct MockExecutionLayer<T: EthSpec> {
pub server: MockServer<T>, pub server: MockServer<T>,
@ -20,40 +20,44 @@ pub struct MockExecutionLayer<T: EthSpec> {
impl<T: EthSpec> MockExecutionLayer<T> { impl<T: EthSpec> MockExecutionLayer<T> {
pub fn default_params(executor: TaskExecutor) -> Self { pub fn default_params(executor: TaskExecutor) -> Self {
let mut spec = MainnetEthSpec::default_spec();
spec.terminal_total_difficulty = DEFAULT_TERMINAL_DIFFICULTY.into();
spec.terminal_block_hash = ExecutionBlockHash::zero();
spec.terminal_block_hash_activation_epoch = Epoch::new(0);
Self::new( Self::new(
executor, executor,
DEFAULT_TERMINAL_DIFFICULTY.into(),
DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_BLOCK,
ExecutionBlockHash::zero(), None,
Epoch::new(0), None,
None,
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
spec,
None, None,
) )
} }
#[allow(clippy::too_many_arguments)]
pub fn new( pub fn new(
executor: TaskExecutor, executor: TaskExecutor,
terminal_total_difficulty: Uint256,
terminal_block: u64, terminal_block: u64,
terminal_block_hash: ExecutionBlockHash, shanghai_time: Option<u64>,
terminal_block_hash_activation_epoch: Epoch, eip4844_time: Option<u64>,
builder_threshold: Option<u128>,
jwt_key: Option<JwtKey>, jwt_key: Option<JwtKey>,
spec: ChainSpec,
builder_url: Option<SensitiveUrl>, builder_url: Option<SensitiveUrl>,
) -> Self { ) -> Self {
let handle = executor.handle().unwrap(); let handle = executor.handle().unwrap();
let mut spec = T::default_spec();
spec.terminal_total_difficulty = terminal_total_difficulty;
spec.terminal_block_hash = terminal_block_hash;
spec.terminal_block_hash_activation_epoch = terminal_block_hash_activation_epoch;
let jwt_key = jwt_key.unwrap_or_else(JwtKey::random); let jwt_key = jwt_key.unwrap_or_else(JwtKey::random);
let server = MockServer::new( let server = MockServer::new(
&handle, &handle,
jwt_key, jwt_key,
terminal_total_difficulty, spec.terminal_total_difficulty,
terminal_block, terminal_block,
terminal_block_hash, spec.terminal_block_hash,
shanghai_time,
eip4844_time,
); );
let url = SensitiveUrl::parse(&server.url()).unwrap(); let url = SensitiveUrl::parse(&server.url()).unwrap();
@ -67,7 +71,7 @@ impl<T: EthSpec> MockExecutionLayer<T> {
builder_url, builder_url,
secret_files: vec![path], secret_files: vec![path],
suggested_fee_recipient: Some(Address::repeat_byte(42)), suggested_fee_recipient: Some(Address::repeat_byte(42)),
builder_profit_threshold: DEFAULT_BUILDER_THRESHOLD_WEI, builder_profit_threshold: builder_threshold.unwrap_or(DEFAULT_BUILDER_THRESHOLD_WEI),
..Default::default() ..Default::default()
}; };
let el = let el =
@ -98,21 +102,19 @@ impl<T: EthSpec> MockExecutionLayer<T> {
justified_hash: None, justified_hash: None,
finalized_hash: None, finalized_hash: None,
}; };
let payload_attributes = PayloadAttributes::new(
timestamp,
prev_randao,
Address::repeat_byte(42),
// FIXME: think about how to handle different forks / withdrawals here..
None,
);
// Insert a proposer to ensure the fork choice updated command works. // Insert a proposer to ensure the fork choice updated command works.
let slot = Slot::new(0); let slot = Slot::new(0);
let validator_index = 0; let validator_index = 0;
self.el self.el
.insert_proposer( .insert_proposer(slot, head_block_root, validator_index, payload_attributes)
slot,
head_block_root,
validator_index,
PayloadAttributes {
timestamp,
prev_randao,
suggested_fee_recipient: Address::repeat_byte(42),
},
)
.await; .await;
self.el self.el
@ -132,25 +134,30 @@ impl<T: EthSpec> MockExecutionLayer<T> {
slot, slot,
chain_health: ChainHealth::Healthy, chain_health: ChainHealth::Healthy,
}; };
let payload = self let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await;
let payload_attributes =
PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None);
let payload: ExecutionPayload<T> = self
.el .el
.get_payload::<FullPayload<T>>( .get_payload::<FullPayload<T>>(
parent_hash, parent_hash,
timestamp, &payload_attributes,
prev_randao,
validator_index,
forkchoice_update_params, forkchoice_update_params,
builder_params, builder_params,
// FIXME: do we need to consider other forks somehow? What about withdrawals?
ForkName::Merge,
&self.spec, &self.spec,
) )
.await .await
.unwrap() .unwrap()
.execution_payload; .to_payload()
let block_hash = payload.block_hash; .into();
assert_eq!(payload.parent_hash, parent_hash);
assert_eq!(payload.block_number, block_number); let block_hash = payload.block_hash();
assert_eq!(payload.timestamp, timestamp); assert_eq!(payload.parent_hash(), parent_hash);
assert_eq!(payload.prev_randao, prev_randao); assert_eq!(payload.block_number(), block_number);
assert_eq!(payload.timestamp(), timestamp);
assert_eq!(payload.prev_randao(), prev_randao);
// Ensure the payload cache is empty. // Ensure the payload cache is empty.
assert!(self assert!(self
@ -162,25 +169,29 @@ impl<T: EthSpec> MockExecutionLayer<T> {
slot, slot,
chain_health: ChainHealth::Healthy, chain_health: ChainHealth::Healthy,
}; };
let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await;
let payload_attributes =
PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None);
let payload_header = self let payload_header = self
.el .el
.get_payload::<BlindedPayload<T>>( .get_payload::<BlindedPayload<T>>(
parent_hash, parent_hash,
timestamp, &payload_attributes,
prev_randao,
validator_index,
forkchoice_update_params, forkchoice_update_params,
builder_params, builder_params,
// FIXME: do we need to consider other forks somehow? What about withdrawals?
ForkName::Merge,
&self.spec, &self.spec,
) )
.await .await
.unwrap() .unwrap()
.execution_payload_header; .to_payload();
assert_eq!(payload_header.block_hash, block_hash);
assert_eq!(payload_header.parent_hash, parent_hash); assert_eq!(payload_header.block_hash(), block_hash);
assert_eq!(payload_header.block_number, block_number); assert_eq!(payload_header.parent_hash(), parent_hash);
assert_eq!(payload_header.timestamp, timestamp); assert_eq!(payload_header.block_number(), block_number);
assert_eq!(payload_header.prev_randao, prev_randao); assert_eq!(payload_header.timestamp(), timestamp);
assert_eq!(payload_header.prev_randao(), prev_randao);
// Ensure the payload cache has the correct payload. // Ensure the payload cache has the correct payload.
assert_eq!( assert_eq!(

View File

@ -22,6 +22,7 @@ use tokio::{runtime, sync::oneshot};
use types::{EthSpec, ExecutionBlockHash, Uint256}; use types::{EthSpec, ExecutionBlockHash, Uint256};
use warp::{http::StatusCode, Filter, Rejection}; use warp::{http::StatusCode, Filter, Rejection};
use crate::EngineCapabilities;
pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator};
pub use hook::Hook; pub use hook::Hook;
pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder}; pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder};
@ -31,6 +32,17 @@ pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400;
pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; pub const DEFAULT_TERMINAL_BLOCK: u64 = 64;
pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32]; pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32];
pub const DEFAULT_BUILDER_THRESHOLD_WEI: u128 = 1_000_000_000_000_000_000; pub const DEFAULT_BUILDER_THRESHOLD_WEI: u128 = 1_000_000_000_000_000_000;
pub const DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI: u128 = 10_000_000_000_000_000;
pub const DEFAULT_BUILDER_PAYLOAD_VALUE_WEI: u128 = 20_000_000_000_000_000;
pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities {
new_payload_v1: true,
new_payload_v2: true,
forkchoice_updated_v1: true,
forkchoice_updated_v2: true,
get_payload_v1: true,
get_payload_v2: true,
exchange_transition_configuration_v1: true,
};
mod execution_block_generator; mod execution_block_generator;
mod handle_rpc; mod handle_rpc;
@ -45,6 +57,8 @@ pub struct MockExecutionConfig {
pub terminal_difficulty: Uint256, pub terminal_difficulty: Uint256,
pub terminal_block: u64, pub terminal_block: u64,
pub terminal_block_hash: ExecutionBlockHash, pub terminal_block_hash: ExecutionBlockHash,
pub shanghai_time: Option<u64>,
pub eip4844_time: Option<u64>,
} }
impl Default for MockExecutionConfig { impl Default for MockExecutionConfig {
@ -55,6 +69,8 @@ impl Default for MockExecutionConfig {
terminal_block: DEFAULT_TERMINAL_BLOCK, terminal_block: DEFAULT_TERMINAL_BLOCK,
terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash: ExecutionBlockHash::zero(),
server_config: Config::default(), server_config: Config::default(),
shanghai_time: None,
eip4844_time: None,
} }
} }
} }
@ -74,6 +90,8 @@ impl<T: EthSpec> MockServer<T> {
DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_DIFFICULTY.into(),
DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_BLOCK,
ExecutionBlockHash::zero(), ExecutionBlockHash::zero(),
None, // FIXME(capella): should this be the default?
None, // FIXME(eip4844): should this be the default?
) )
} }
@ -84,11 +102,18 @@ impl<T: EthSpec> MockServer<T> {
terminal_block, terminal_block,
terminal_block_hash, terminal_block_hash,
server_config, server_config,
shanghai_time,
eip4844_time,
} = config; } = config;
let last_echo_request = Arc::new(RwLock::new(None)); let last_echo_request = Arc::new(RwLock::new(None));
let preloaded_responses = Arc::new(Mutex::new(vec![])); let preloaded_responses = Arc::new(Mutex::new(vec![]));
let execution_block_generator = let execution_block_generator = ExecutionBlockGenerator::new(
ExecutionBlockGenerator::new(terminal_difficulty, terminal_block, terminal_block_hash); terminal_difficulty,
terminal_block,
terminal_block_hash,
shanghai_time,
eip4844_time,
);
let ctx: Arc<Context<T>> = Arc::new(Context { let ctx: Arc<Context<T>> = Arc::new(Context {
config: server_config, config: server_config,
@ -104,6 +129,7 @@ impl<T: EthSpec> MockServer<T> {
hook: <_>::default(), hook: <_>::default(),
new_payload_statuses: <_>::default(), new_payload_statuses: <_>::default(),
fcu_payload_statuses: <_>::default(), fcu_payload_statuses: <_>::default(),
engine_capabilities: Arc::new(RwLock::new(DEFAULT_ENGINE_CAPABILITIES)),
_phantom: PhantomData, _phantom: PhantomData,
}); });
@ -134,12 +160,18 @@ impl<T: EthSpec> MockServer<T> {
} }
} }
pub fn set_engine_capabilities(&self, engine_capabilities: EngineCapabilities) {
*self.ctx.engine_capabilities.write() = engine_capabilities;
}
pub fn new( pub fn new(
handle: &runtime::Handle, handle: &runtime::Handle,
jwt_key: JwtKey, jwt_key: JwtKey,
terminal_difficulty: Uint256, terminal_difficulty: Uint256,
terminal_block: u64, terminal_block: u64,
terminal_block_hash: ExecutionBlockHash, terminal_block_hash: ExecutionBlockHash,
shanghai_time: Option<u64>,
eip4844_time: Option<u64>,
) -> Self { ) -> Self {
Self::new_with_config( Self::new_with_config(
handle, handle,
@ -149,6 +181,8 @@ impl<T: EthSpec> MockServer<T> {
terminal_difficulty, terminal_difficulty,
terminal_block, terminal_block,
terminal_block_hash, terminal_block_hash,
shanghai_time,
eip4844_time,
}, },
) )
} }
@ -452,6 +486,7 @@ pub struct Context<T: EthSpec> {
pub new_payload_statuses: Arc<Mutex<HashMap<ExecutionBlockHash, PayloadStatusV1>>>, pub new_payload_statuses: Arc<Mutex<HashMap<ExecutionBlockHash, PayloadStatusV1>>>,
pub fcu_payload_statuses: Arc<Mutex<HashMap<ExecutionBlockHash, PayloadStatusV1>>>, pub fcu_payload_statuses: Arc<Mutex<HashMap<ExecutionBlockHash, PayloadStatusV1>>>,
pub engine_capabilities: Arc<RwLock<EngineCapabilities>>,
pub _phantom: PhantomData<T>, pub _phantom: PhantomData<T>,
} }
@ -603,11 +638,11 @@ pub fn serve<T: EthSpec>(
"jsonrpc": JSONRPC_VERSION, "jsonrpc": JSONRPC_VERSION,
"result": result "result": result
}), }),
Err(message) => json!({ Err((message, code)) => json!({
"id": id, "id": id,
"jsonrpc": JSONRPC_VERSION, "jsonrpc": JSONRPC_VERSION,
"error": { "error": {
"code": -1234, // Junk error code. "code": code,
"message": message "message": message
} }
}), }),

View File

@ -10,6 +10,20 @@ use types::{
pub const DEFAULT_ETH1_BLOCK_HASH: &[u8] = &[0x42; 32]; pub const DEFAULT_ETH1_BLOCK_HASH: &[u8] = &[0x42; 32];
pub fn bls_withdrawal_credentials(pubkey: &PublicKey, spec: &ChainSpec) -> Hash256 {
let mut credentials = hash(&pubkey.as_ssz_bytes());
credentials[0] = spec.bls_withdrawal_prefix_byte;
Hash256::from_slice(&credentials)
}
fn eth1_withdrawal_credentials(pubkey: &PublicKey, spec: &ChainSpec) -> Hash256 {
let fake_execution_address = &hash(&pubkey.as_ssz_bytes())[0..20];
let mut credentials = [0u8; 32];
credentials[0] = spec.eth1_address_withdrawal_prefix_byte;
credentials[12..].copy_from_slice(fake_execution_address);
Hash256::from_slice(&credentials)
}
/// Builds a genesis state as defined by the Eth2 interop procedure (see below). /// Builds a genesis state as defined by the Eth2 interop procedure (see below).
/// ///
/// Reference: /// Reference:
@ -21,20 +35,75 @@ pub fn interop_genesis_state<T: EthSpec>(
execution_payload_header: Option<ExecutionPayloadHeader<T>>, execution_payload_header: Option<ExecutionPayloadHeader<T>>,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<BeaconState<T>, String> { ) -> Result<BeaconState<T>, String> {
let withdrawal_credentials = keypairs
.iter()
.map(|keypair| bls_withdrawal_credentials(&keypair.pk, spec))
.collect::<Vec<_>>();
interop_genesis_state_with_withdrawal_credentials::<T>(
keypairs,
&withdrawal_credentials,
genesis_time,
eth1_block_hash,
execution_payload_header,
spec,
)
}
// returns an interop genesis state except every other
// validator has eth1 withdrawal credentials
pub fn interop_genesis_state_with_eth1<T: EthSpec>(
keypairs: &[Keypair],
genesis_time: u64,
eth1_block_hash: Hash256,
execution_payload_header: Option<ExecutionPayloadHeader<T>>,
spec: &ChainSpec,
) -> Result<BeaconState<T>, String> {
let withdrawal_credentials = keypairs
.iter()
.enumerate()
.map(|(index, keypair)| {
if index % 2 == 0 {
bls_withdrawal_credentials(&keypair.pk, spec)
} else {
eth1_withdrawal_credentials(&keypair.pk, spec)
}
})
.collect::<Vec<_>>();
interop_genesis_state_with_withdrawal_credentials::<T>(
keypairs,
&withdrawal_credentials,
genesis_time,
eth1_block_hash,
execution_payload_header,
spec,
)
}
pub fn interop_genesis_state_with_withdrawal_credentials<T: EthSpec>(
keypairs: &[Keypair],
withdrawal_credentials: &[Hash256],
genesis_time: u64,
eth1_block_hash: Hash256,
execution_payload_header: Option<ExecutionPayloadHeader<T>>,
spec: &ChainSpec,
) -> Result<BeaconState<T>, String> {
if keypairs.len() != withdrawal_credentials.len() {
return Err(format!(
"wrong number of withdrawal credentials, expected: {}, got: {}",
keypairs.len(),
withdrawal_credentials.len()
));
}
let eth1_timestamp = 2_u64.pow(40); let eth1_timestamp = 2_u64.pow(40);
let amount = spec.max_effective_balance; let amount = spec.max_effective_balance;
let withdrawal_credentials = |pubkey: &PublicKey| {
let mut credentials = hash(&pubkey.as_ssz_bytes());
credentials[0] = spec.bls_withdrawal_prefix_byte;
Hash256::from_slice(&credentials)
};
let datas = keypairs let datas = keypairs
.into_par_iter() .into_par_iter()
.map(|keypair| { .zip(withdrawal_credentials.into_par_iter())
.map(|(keypair, &withdrawal_credentials)| {
let mut data = DepositData { let mut data = DepositData {
withdrawal_credentials: withdrawal_credentials(&keypair.pk), withdrawal_credentials,
pubkey: keypair.pk.clone().into(), pubkey: keypair.pk.clone().into(),
amount, amount,
signature: Signature::empty().into(), signature: Signature::empty().into(),
@ -133,4 +202,83 @@ mod test {
"validator count should be correct" "validator count should be correct"
); );
} }
#[test]
fn interop_state_with_eth1() {
let validator_count = 16;
let genesis_time = 42;
let spec = &TestEthSpec::default_spec();
let keypairs = generate_deterministic_keypairs(validator_count);
let state = interop_genesis_state_with_eth1::<TestEthSpec>(
&keypairs,
genesis_time,
Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH),
None,
spec,
)
.expect("should build state");
assert_eq!(
state.eth1_data().block_hash,
Hash256::from_slice(&[0x42; 32]),
"eth1 block hash should be co-ordinated junk"
);
assert_eq!(
state.genesis_time(),
genesis_time,
"genesis time should be as specified"
);
for b in state.balances() {
assert_eq!(
*b, spec.max_effective_balance,
"validator balances should be max effective balance"
);
}
for (index, v) in state.validators().iter().enumerate() {
let creds = v.withdrawal_credentials.as_bytes();
if index % 2 == 0 {
assert_eq!(
creds[0], spec.bls_withdrawal_prefix_byte,
"first byte of withdrawal creds should be bls prefix"
);
assert_eq!(
&creds[1..],
&hash(&v.pubkey.as_ssz_bytes())[1..],
"rest of withdrawal creds should be pubkey hash"
);
} else {
assert_eq!(
creds[0], spec.eth1_address_withdrawal_prefix_byte,
"first byte of withdrawal creds should be eth1 prefix"
);
assert_eq!(
creds[1..12],
[0u8; 11],
"bytes [1:12] of withdrawal creds must be zero"
);
assert_eq!(
&creds[12..],
&hash(&v.pubkey.as_ssz_bytes())[0..20],
"rest of withdrawal creds should be first 20 bytes of pubkey hash"
)
}
}
assert_eq!(
state.balances().len(),
validator_count,
"validator balances len should be correct"
);
assert_eq!(
state.validators().len(),
validator_count,
"validator count should be correct"
);
}
} }

View File

@ -5,5 +5,8 @@ mod interop;
pub use eth1::Config as Eth1Config; pub use eth1::Config as Eth1Config;
pub use eth1::Eth1Endpoint; pub use eth1::Eth1Endpoint;
pub use eth1_genesis_service::{Eth1GenesisService, Statistics}; pub use eth1_genesis_service::{Eth1GenesisService, Statistics};
pub use interop::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; pub use interop::{
bls_withdrawal_credentials, interop_genesis_state, interop_genesis_state_with_eth1,
interop_genesis_state_with_withdrawal_credentials, DEFAULT_ETH1_BLOCK_HASH,
};
pub use types::test_utils::generate_deterministic_keypairs; pub use types::test_utils::generate_deterministic_keypairs;

View File

@ -37,6 +37,7 @@ sysinfo = "0.26.5"
system_health = { path = "../../common/system_health" } system_health = { path = "../../common/system_health" }
directory = { path = "../../common/directory" } directory = { path = "../../common/directory" }
eth2_serde_utils = "0.1.1" eth2_serde_utils = "0.1.1"
operation_pool = { path = "../operation_pool" }
[dev-dependencies] [dev-dependencies]
store = { path = "../store" } store = { path = "../store" }
@ -46,6 +47,7 @@ logging = { path = "../../common/logging" }
serde_json = "1.0.58" serde_json = "1.0.58"
proto_array = { path = "../../consensus/proto_array" } proto_array = { path = "../../consensus/proto_array" }
unused_port = {path = "../../common/unused_port"} unused_port = {path = "../../common/unused_port"}
genesis = { path = "../genesis" }
[[test]] [[test]]
name = "bn_http_api_tests" name = "bn_http_api_tests"

View File

@ -4,7 +4,7 @@ use lru::LruCache;
use slog::{debug, warn, Logger}; use slog::{debug, warn, Logger};
use state_processing::BlockReplayer; use state_processing::BlockReplayer;
use std::sync::Arc; use std::sync::Arc;
use types::BlindedBeaconBlock; use types::beacon_block::BlindedBeaconBlock;
use warp_utils::reject::{ use warp_utils::reject::{
beacon_chain_error, beacon_state_error, custom_bad_request, custom_server_error, beacon_chain_error, beacon_state_error, custom_bad_request, custom_server_error,
}; };

View File

@ -36,6 +36,7 @@ use eth2::types::{
use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
use lighthouse_version::version_with_platform; use lighthouse_version::version_with_platform;
use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage};
use operation_pool::ReceivedPreCapella;
use parking_lot::RwLock; use parking_lot::RwLock;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use slog::{crit, debug, error, info, warn, Logger}; use slog::{crit, debug, error, info, warn, Logger};
@ -56,9 +57,9 @@ use types::{
Attestation, AttestationData, AttesterSlashing, BeaconStateError, BlindedPayload, Attestation, AttestationData, AttesterSlashing, BeaconStateError, BlindedPayload,
CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload,
ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof,
SignedBeaconBlock, SignedBlindedBeaconBlock, SignedContributionAndProof, SignedBeaconBlock, SignedBlindedBeaconBlock, SignedBlsToExecutionChange,
SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot,
SyncContributionData, SyncCommitteeMessage, SyncContributionData,
}; };
use version::{ use version::{
add_consensus_version_header, execution_optimistic_fork_versioned_response, add_consensus_version_header, execution_optimistic_fork_versioned_response,
@ -1122,7 +1123,9 @@ pub fn serve<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>, network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move { log: Logger| async move {
publish_blocks::publish_block(None, block, chain, &network_tx, log) // need to have cached the blob sidecar somewhere in the beacon chain
// to publish
publish_blocks::publish_block(None, block, None, chain, &network_tx, log)
.await .await
.map(|()| warp::reply()) .map(|()| warp::reply())
}, },
@ -1654,6 +1657,109 @@ pub fn serve<T: BeaconChainTypes>(
}, },
); );
// GET beacon/pool/bls_to_execution_changes
let get_beacon_pool_bls_to_execution_changes = beacon_pool_path
.clone()
.and(warp::path("bls_to_execution_changes"))
.and(warp::path::end())
.and_then(|chain: Arc<BeaconChain<T>>| {
blocking_json_task(move || {
let address_changes = chain.op_pool.get_all_bls_to_execution_changes();
Ok(api_types::GenericResponse::from(address_changes))
})
});
// POST beacon/pool/bls_to_execution_changes
let post_beacon_pool_bls_to_execution_changes = beacon_pool_path
.clone()
.and(warp::path("bls_to_execution_changes"))
.and(warp::path::end())
.and(warp::body::json())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.and_then(
|chain: Arc<BeaconChain<T>>,
address_changes: Vec<SignedBlsToExecutionChange>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
blocking_json_task(move || {
let mut failures = vec![];
for (index, address_change) in address_changes.into_iter().enumerate() {
let validator_index = address_change.message.validator_index;
match chain.verify_bls_to_execution_change_for_http_api(address_change) {
Ok(ObservationOutcome::New(verified_address_change)) => {
let validator_index =
verified_address_change.as_inner().message.validator_index;
let address = verified_address_change
.as_inner()
.message
.to_execution_address;
// New to P2P *and* op pool, gossip immediately if post-Capella.
let received_pre_capella = if chain.current_slot_is_post_capella().unwrap_or(false) {
ReceivedPreCapella::No
} else {
ReceivedPreCapella::Yes
};
if matches!(received_pre_capella, ReceivedPreCapella::No) {
publish_pubsub_message(
&network_tx,
PubsubMessage::BlsToExecutionChange(Box::new(
verified_address_change.as_inner().clone(),
)),
)?;
}
// Import to op pool (may return `false` if there's a race).
let imported =
chain.import_bls_to_execution_change(verified_address_change, received_pre_capella);
info!(
log,
"Processed BLS to execution change";
"validator_index" => validator_index,
"address" => ?address,
"published" => matches!(received_pre_capella, ReceivedPreCapella::No),
"imported" => imported,
);
}
Ok(ObservationOutcome::AlreadyKnown) => {
debug!(
log,
"BLS to execution change already known";
"validator_index" => validator_index,
);
}
Err(e) => {
warn!(
log,
"Invalid BLS to execution change";
"validator_index" => validator_index,
"reason" => ?e,
"source" => "HTTP",
);
failures.push(api_types::Failure::new(
index,
format!("invalid: {e:?}"),
));
}
}
}
if failures.is_empty() {
Ok(())
} else {
Err(warp_utils::reject::indexed_bad_request(
"some BLS to execution changes failed to verify".into(),
failures,
))
}
})
},
);
// GET beacon/deposit_snapshot // GET beacon/deposit_snapshot
let get_beacon_deposit_snapshot = eth_v1 let get_beacon_deposit_snapshot = eth_v1
.and(warp::path("beacon")) .and(warp::path("beacon"))
@ -3470,6 +3576,7 @@ pub fn serve<T: BeaconChainTypes>(
.or(get_beacon_pool_attester_slashings.boxed()) .or(get_beacon_pool_attester_slashings.boxed())
.or(get_beacon_pool_proposer_slashings.boxed()) .or(get_beacon_pool_proposer_slashings.boxed())
.or(get_beacon_pool_voluntary_exits.boxed()) .or(get_beacon_pool_voluntary_exits.boxed())
.or(get_beacon_pool_bls_to_execution_changes.boxed())
.or(get_beacon_deposit_snapshot.boxed()) .or(get_beacon_deposit_snapshot.boxed())
.or(get_beacon_rewards_blocks.boxed()) .or(get_beacon_rewards_blocks.boxed())
.or(get_config_fork_schedule.boxed()) .or(get_config_fork_schedule.boxed())
@ -3523,6 +3630,7 @@ pub fn serve<T: BeaconChainTypes>(
.or(post_beacon_pool_proposer_slashings.boxed()) .or(post_beacon_pool_proposer_slashings.boxed())
.or(post_beacon_pool_voluntary_exits.boxed()) .or(post_beacon_pool_voluntary_exits.boxed())
.or(post_beacon_pool_sync_committees.boxed()) .or(post_beacon_pool_sync_committees.boxed())
.or(post_beacon_pool_bls_to_execution_changes.boxed())
.or(post_beacon_rewards_attestations.boxed()) .or(post_beacon_rewards_attestations.boxed())
.or(post_beacon_rewards_sync_committee.boxed()) .or(post_beacon_rewards_sync_committee.boxed())
.or(post_validator_duties_attester.boxed()) .or(post_validator_duties_attester.boxed())

View File

@ -41,4 +41,16 @@ lazy_static::lazy_static! {
"http_api_block_published_very_late_total", "http_api_block_published_very_late_total",
"The count of times a block was published beyond the attestation deadline" "The count of times a block was published beyond the attestation deadline"
); );
pub static ref HTTP_API_BLOB_BROADCAST_DELAY_TIMES: Result<Histogram> = try_create_histogram(
"http_api_blob_broadcast_delay_times",
"Time between start of the slot and when the blob was broadcast"
);
pub static ref HTTP_API_BLOB_PUBLISHED_LATE_TOTAL: Result<IntCounter> = try_create_int_counter(
"http_api_blob_published_late_total",
"The count of times a blob was published beyond more than half way to the attestation deadline"
);
pub static ref HTTP_API_BLOB_PUBLISHED_VERY_LATE_TOTAL: Result<IntCounter> = try_create_int_counter(
"http_api_blob_published_very_late_total",
"The count of times a blob was published beyond the attestation deadline"
);
} }

View File

@ -3,7 +3,7 @@ use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
use beacon_chain::{ use beacon_chain::{
BeaconChain, BeaconChainTypes, BlockError, CountUnrealized, NotifyExecutionLayer, BeaconChain, BeaconChainTypes, BlockError, CountUnrealized, NotifyExecutionLayer,
}; };
use lighthouse_network::PubsubMessage; use lighthouse_network::{PubsubMessage, SignedBeaconBlockAndBlobsSidecar};
use network::NetworkMessage; use network::NetworkMessage;
use slog::{error, info, warn, Logger}; use slog::{error, info, warn, Logger};
use slot_clock::SlotClock; use slot_clock::SlotClock;
@ -11,8 +11,8 @@ use std::sync::Arc;
use tokio::sync::mpsc::UnboundedSender; use tokio::sync::mpsc::UnboundedSender;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::{ use types::{
BlindedPayload, ExecPayload, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, AbstractExecPayload, BlindedPayload, BlobsSidecar, EthSpec, ExecPayload, ExecutionBlockHash,
SignedBeaconBlock, FullPayload, Hash256, SignedBeaconBlock,
}; };
use warp::Rejection; use warp::Rejection;
@ -20,6 +20,7 @@ use warp::Rejection;
pub async fn publish_block<T: BeaconChainTypes>( pub async fn publish_block<T: BeaconChainTypes>(
block_root: Option<Hash256>, block_root: Option<Hash256>,
block: Arc<SignedBeaconBlock<T::EthSpec>>, block: Arc<SignedBeaconBlock<T::EthSpec>>,
blobs_sidecar: Option<Arc<BlobsSidecar<T::EthSpec>>>,
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>, network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger, log: Logger,
@ -28,7 +29,24 @@ pub async fn publish_block<T: BeaconChainTypes>(
// Send the block, regardless of whether or not it is valid. The API // Send the block, regardless of whether or not it is valid. The API
// specification is very clear that this is the desired behaviour. // specification is very clear that this is the desired behaviour.
crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?;
let message = match &*block {
SignedBeaconBlock::Eip4844(block) => {
if let Some(sidecar) = blobs_sidecar {
PubsubMessage::BeaconBlockAndBlobsSidecars(Arc::new(
SignedBeaconBlockAndBlobsSidecar {
beacon_block: block.clone(),
blobs_sidecar: (*sidecar).clone(),
},
))
} else {
//TODO(pawan): return an empty sidecar instead
return Err(warp_utils::reject::broadcast_without_import(String::new()));
}
}
_ => PubsubMessage::BeaconBlock(block.clone()),
};
crate::publish_pubsub_message(network_tx, message)?;
// Determine the delay after the start of the slot, register it with metrics. // Determine the delay after the start of the slot, register it with metrics.
let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock);
@ -142,6 +160,7 @@ pub async fn publish_blinded_block<T: BeaconChainTypes>(
publish_block::<T>( publish_block::<T>(
Some(block_root), Some(block_root),
Arc::new(full_block), Arc::new(full_block),
None,
chain, chain,
network_tx, network_tx,
log, log,
@ -165,12 +184,22 @@ async fn reconstruct_block<T: BeaconChainTypes>(
// If the execution block hash is zero, use an empty payload. // If the execution block hash is zero, use an empty payload.
let full_payload = if payload_header.block_hash() == ExecutionBlockHash::zero() { let full_payload = if payload_header.block_hash() == ExecutionBlockHash::zero() {
ExecutionPayload::default() FullPayload::default_at_fork(
chain
.spec
.fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())),
)
.map_err(|e| {
warp_utils::reject::custom_server_error(format!(
"Default payload construction error: {e:?}"
))
})?
.into()
// If we already have an execution payload with this transactions root cached, use it. // If we already have an execution payload with this transactions root cached, use it.
} else if let Some(cached_payload) = } else if let Some(cached_payload) =
el.get_payload_by_root(&payload_header.tree_hash_root()) el.get_payload_by_root(&payload_header.tree_hash_root())
{ {
info!(log, "Reconstructing a full block using a local payload"; "block_hash" => ?cached_payload.block_hash); info!(log, "Reconstructing a full block using a local payload"; "block_hash" => ?cached_payload.block_hash());
cached_payload cached_payload
// Otherwise, this means we are attempting a blind block proposal. // Otherwise, this means we are attempting a blind block proposal.
} else { } else {
@ -183,7 +212,7 @@ async fn reconstruct_block<T: BeaconChainTypes>(
e e
)) ))
})?; })?;
info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash); info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash());
full_payload full_payload
}; };

View File

@ -1,9 +1,9 @@
use crate::api_types::{ use crate::api_types::EndpointVersion;
EndpointVersion, ExecutionOptimisticForkVersionedResponse, ForkVersionedResponse,
};
use eth2::CONSENSUS_VERSION_HEADER; use eth2::CONSENSUS_VERSION_HEADER;
use serde::Serialize; use serde::Serialize;
use types::{ForkName, InconsistentFork}; use types::{
ExecutionOptimisticForkVersionedResponse, ForkName, ForkVersionedResponse, InconsistentFork,
};
use warp::reply::{self, Reply, WithHeader}; use warp::reply::{self, Reply, WithHeader};
pub const V1: EndpointVersion = EndpointVersion(1); pub const V1: EndpointVersion = EndpointVersion(1);

View File

@ -1,5 +1,7 @@
use beacon_chain::{ use beacon_chain::{
test_utils::{BeaconChainHarness, BoxedMutator, EphemeralHarnessType}, test_utils::{
BeaconChainHarness, BoxedMutator, Builder as HarnessBuilder, EphemeralHarnessType,
},
BeaconChain, BeaconChainTypes, BeaconChain, BeaconChainTypes,
}; };
use directory::DEFAULT_ROOT_DIR; use directory::DEFAULT_ROOT_DIR;
@ -55,25 +57,39 @@ pub struct ApiServer<E: EthSpec, SFut: Future<Output = ()>> {
pub external_peer_id: PeerId, pub external_peer_id: PeerId,
} }
type Initializer<E> = Box<
dyn FnOnce(HarnessBuilder<EphemeralHarnessType<E>>) -> HarnessBuilder<EphemeralHarnessType<E>>,
>;
type Mutator<E> = BoxedMutator<E, MemoryStore<E>, MemoryStore<E>>; type Mutator<E> = BoxedMutator<E, MemoryStore<E>, MemoryStore<E>>;
impl<E: EthSpec> InteractiveTester<E> { impl<E: EthSpec> InteractiveTester<E> {
pub async fn new(spec: Option<ChainSpec>, validator_count: usize) -> Self { pub async fn new(spec: Option<ChainSpec>, validator_count: usize) -> Self {
Self::new_with_mutator(spec, validator_count, None).await Self::new_with_initializer_and_mutator(spec, validator_count, None, None).await
} }
pub async fn new_with_mutator( pub async fn new_with_initializer_and_mutator(
spec: Option<ChainSpec>, spec: Option<ChainSpec>,
validator_count: usize, validator_count: usize,
initializer: Option<Initializer<E>>,
mutator: Option<Mutator<E>>, mutator: Option<Mutator<E>>,
) -> Self { ) -> Self {
let mut harness_builder = BeaconChainHarness::builder(E::default()) let mut harness_builder = BeaconChainHarness::builder(E::default())
.spec_or_default(spec) .spec_or_default(spec)
.deterministic_keypairs(validator_count)
.logger(test_logger()) .logger(test_logger())
.mock_execution_layer() .mock_execution_layer();
.fresh_ephemeral_store();
harness_builder = if let Some(initializer) = initializer {
// Apply custom initialization provided by the caller.
initializer(harness_builder)
} else {
// Apply default initial configuration.
harness_builder
.deterministic_keypairs(validator_count)
.fresh_ephemeral_store()
};
// Add a mutator for the beacon chain builder which will be called in
// `HarnessBuilder::build`.
if let Some(mutator) = mutator { if let Some(mutator) = mutator {
harness_builder = harness_builder.initial_mutator(mutator); harness_builder = harness_builder.initial_mutator(mutator);
} }

View File

@ -1,8 +1,16 @@
//! Tests for API behaviour across fork boundaries. //! Tests for API behaviour across fork boundaries.
use crate::common::*; use crate::common::*;
use beacon_chain::{test_utils::RelativeSyncCommittee, StateSkipConfig}; use beacon_chain::{
use eth2::types::{StateId, SyncSubcommittee}; test_utils::{RelativeSyncCommittee, DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME},
use types::{ChainSpec, Epoch, EthSpec, MinimalEthSpec, Slot}; StateSkipConfig,
};
use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee};
use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials};
use std::collections::HashSet;
use types::{
test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs},
Address, ChainSpec, Epoch, EthSpec, Hash256, MinimalEthSpec, Slot,
};
type E = MinimalEthSpec; type E = MinimalEthSpec;
@ -12,6 +20,14 @@ fn altair_spec(altair_fork_epoch: Epoch) -> ChainSpec {
spec spec
} }
fn capella_spec(capella_fork_epoch: Epoch) -> ChainSpec {
let mut spec = E::default_spec();
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(0));
spec.capella_fork_epoch = Some(capella_fork_epoch);
spec
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn sync_committee_duties_across_fork() { async fn sync_committee_duties_across_fork() {
let validator_count = E::sync_committee_size(); let validator_count = E::sync_committee_size();
@ -307,3 +323,219 @@ async fn sync_committee_indices_across_fork() {
); );
} }
} }
/// Assert that an HTTP API error has the given status code and indexed errors for the given indices.
fn assert_server_indexed_error(error: eth2::Error, status_code: u16, indices: Vec<usize>) {
let eth2::Error::ServerIndexedMessage(IndexedErrorMessage {
code,
failures,
..
}) = error else {
panic!("wrong error, expected ServerIndexedMessage, got: {error:?}")
};
assert_eq!(code, status_code);
assert_eq!(failures.len(), indices.len());
for (index, failure) in indices.into_iter().zip(failures) {
assert_eq!(failure.index, index as u64);
}
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn bls_to_execution_changes_update_all_around_capella_fork() {
let validator_count = 128;
let fork_epoch = Epoch::new(2);
let spec = capella_spec(fork_epoch);
let max_bls_to_execution_changes = E::max_bls_to_execution_changes();
// Use a genesis state with entirely BLS withdrawal credentials.
// Offset keypairs by `validator_count` to create keys distinct from the signing keys.
let validator_keypairs = generate_deterministic_keypairs(validator_count);
let withdrawal_keypairs = (0..validator_count)
.map(|i| Some(generate_deterministic_keypair(i + validator_count)))
.collect::<Vec<_>>();
let withdrawal_credentials = withdrawal_keypairs
.iter()
.map(|keypair| bls_withdrawal_credentials(&keypair.as_ref().unwrap().pk, &spec))
.collect::<Vec<_>>();
let genesis_state = interop_genesis_state_with_withdrawal_credentials(
&validator_keypairs,
&withdrawal_credentials,
HARNESS_GENESIS_TIME,
Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH),
None,
&spec,
)
.unwrap();
let tester = InteractiveTester::<E>::new_with_initializer_and_mutator(
Some(spec.clone()),
validator_count,
Some(Box::new(|harness_builder| {
harness_builder
.keypairs(validator_keypairs)
.withdrawal_keypairs(withdrawal_keypairs)
.genesis_state_ephemeral_store(genesis_state)
})),
None,
)
.await;
let harness = &tester.harness;
let client = &tester.client;
let all_validators = harness.get_all_validators();
let all_validators_u64 = all_validators.iter().map(|x| *x as u64).collect::<Vec<_>>();
// Create a bunch of valid address changes.
let valid_address_changes = all_validators_u64
.iter()
.map(|&validator_index| {
harness.make_bls_to_execution_change(
validator_index,
Address::from_low_u64_be(validator_index),
)
})
.collect::<Vec<_>>();
// Address changes which conflict with `valid_address_changes` on the address chosen.
let conflicting_address_changes = all_validators_u64
.iter()
.map(|&validator_index| {
harness.make_bls_to_execution_change(
validator_index,
Address::from_low_u64_be(validator_index + 1),
)
})
.collect::<Vec<_>>();
// Address changes signed with the wrong key.
let wrong_key_address_changes = all_validators_u64
.iter()
.map(|&validator_index| {
// Use the correct pubkey.
let pubkey = &harness.get_withdrawal_keypair(validator_index).pk;
// And the wrong secret key.
let secret_key = &harness
.get_withdrawal_keypair((validator_index + 1) % validator_count as u64)
.sk;
harness.make_bls_to_execution_change_with_keys(
validator_index,
Address::from_low_u64_be(validator_index),
pubkey,
secret_key,
)
})
.collect::<Vec<_>>();
// Submit some changes before Capella. Just enough to fill two blocks.
let num_pre_capella = validator_count / 4;
let blocks_filled_pre_capella = 2;
assert_eq!(
num_pre_capella,
blocks_filled_pre_capella * max_bls_to_execution_changes
);
client
.post_beacon_pool_bls_to_execution_changes(&valid_address_changes[..num_pre_capella])
.await
.unwrap();
let expected_received_pre_capella_messages = valid_address_changes[..num_pre_capella].to_vec();
// Conflicting changes for the same validators should all fail.
let error = client
.post_beacon_pool_bls_to_execution_changes(&conflicting_address_changes[..num_pre_capella])
.await
.unwrap_err();
assert_server_indexed_error(error, 400, (0..num_pre_capella).collect());
// Re-submitting the same changes should be accepted.
client
.post_beacon_pool_bls_to_execution_changes(&valid_address_changes[..num_pre_capella])
.await
.unwrap();
// Invalid changes signed with the wrong keys should all be rejected without affecting the seen
// indices filters (apply ALL of them).
let error = client
.post_beacon_pool_bls_to_execution_changes(&wrong_key_address_changes)
.await
.unwrap_err();
assert_server_indexed_error(error, 400, all_validators.clone());
// Advance to right before Capella.
let capella_slot = fork_epoch.start_slot(E::slots_per_epoch());
harness.extend_to_slot(capella_slot - 1).await;
assert_eq!(harness.head_slot(), capella_slot - 1);
assert_eq!(
harness
.chain
.op_pool
.get_bls_to_execution_changes_received_pre_capella(
&harness.chain.head_snapshot().beacon_state,
&spec,
)
.into_iter()
.collect::<HashSet<_>>(),
HashSet::from_iter(expected_received_pre_capella_messages.into_iter()),
"all pre-capella messages should be queued for capella broadcast"
);
// Add Capella blocks which should be full of BLS to execution changes.
for i in 0..validator_count / max_bls_to_execution_changes {
let head_block_root = harness.extend_slots(1).await;
let head_block = harness
.chain
.get_block(&head_block_root)
.await
.unwrap()
.unwrap();
let bls_to_execution_changes = head_block
.message()
.body()
.bls_to_execution_changes()
.unwrap();
// Block should be full.
assert_eq!(
bls_to_execution_changes.len(),
max_bls_to_execution_changes,
"block not full on iteration {i}"
);
// Included changes should be the ones from `valid_address_changes` in any order.
for address_change in bls_to_execution_changes.iter() {
assert!(valid_address_changes.contains(address_change));
}
// After the initial 2 blocks, add the rest of the changes using a large
// request containing all the valid, all the conflicting and all the invalid.
// Despite the invalid and duplicate messages, the new ones should still get picked up by
// the pool.
if i == blocks_filled_pre_capella - 1 {
let all_address_changes: Vec<_> = [
valid_address_changes.clone(),
conflicting_address_changes.clone(),
wrong_key_address_changes.clone(),
]
.concat();
let error = client
.post_beacon_pool_bls_to_execution_changes(&all_address_changes)
.await
.unwrap_err();
assert_server_indexed_error(
error,
400,
(validator_count..3 * validator_count).collect(),
);
}
}
// Eventually all validators should have eth1 withdrawal credentials.
let head_state = harness.get_current_state();
for validator in head_state.validators() {
assert!(validator.has_eth1_withdrawal_credential(&spec));
}
}

View File

@ -5,7 +5,7 @@ use beacon_chain::{
test_utils::{AttestationStrategy, BlockStrategy}, test_utils::{AttestationStrategy, BlockStrategy},
}; };
use eth2::types::DepositContractData; use eth2::types::DepositContractData;
use execution_layer::{ForkChoiceState, PayloadAttributes}; use execution_layer::{ForkchoiceState, PayloadAttributes};
use parking_lot::Mutex; use parking_lot::Mutex;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use state_processing::state_advance::complete_state_advance; use state_processing::state_advance::complete_state_advance;
@ -55,7 +55,7 @@ struct ForkChoiceUpdates {
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
struct ForkChoiceUpdateMetadata { struct ForkChoiceUpdateMetadata {
received_at: Duration, received_at: Duration,
state: ForkChoiceState, state: ForkchoiceState,
payload_attributes: Option<PayloadAttributes>, payload_attributes: Option<PayloadAttributes>,
} }
@ -86,7 +86,7 @@ impl ForkChoiceUpdates {
.payload_attributes .payload_attributes
.as_ref() .as_ref()
.map_or(false, |payload_attributes| { .map_or(false, |payload_attributes| {
payload_attributes.timestamp == proposal_timestamp payload_attributes.timestamp() == proposal_timestamp
}) })
}) })
.cloned() .cloned()
@ -278,9 +278,10 @@ pub async fn proposer_boost_re_org_test(
let num_empty_votes = Some(attesters_per_slot * percent_empty_votes / 100); let num_empty_votes = Some(attesters_per_slot * percent_empty_votes / 100);
let num_head_votes = Some(attesters_per_slot * percent_head_votes / 100); let num_head_votes = Some(attesters_per_slot * percent_head_votes / 100);
let tester = InteractiveTester::<E>::new_with_mutator( let tester = InteractiveTester::<E>::new_with_initializer_and_mutator(
Some(spec), Some(spec),
validator_count, validator_count,
None,
Some(Box::new(move |builder| { Some(Box::new(move |builder| {
builder builder
.proposer_re_org_threshold(Some(ReOrgThreshold(re_org_threshold))) .proposer_re_org_threshold(Some(ReOrgThreshold(re_org_threshold)))
@ -342,7 +343,7 @@ pub async fn proposer_boost_re_org_test(
.lock() .lock()
.set_forkchoice_updated_hook(Box::new(move |state, payload_attributes| { .set_forkchoice_updated_hook(Box::new(move |state, payload_attributes| {
let received_at = chain_inner.slot_clock.now_duration().unwrap(); let received_at = chain_inner.slot_clock.now_duration().unwrap();
let state = ForkChoiceState::from(state); let state = ForkchoiceState::from(state);
let payload_attributes = payload_attributes.map(Into::into); let payload_attributes = payload_attributes.map(Into::into);
let update = ForkChoiceUpdateMetadata { let update = ForkChoiceUpdateMetadata {
received_at, received_at,
@ -521,16 +522,20 @@ pub async fn proposer_boost_re_org_test(
if !misprediction { if !misprediction {
assert_eq!( assert_eq!(
lookahead, payload_lookahead, lookahead,
payload_lookahead,
"lookahead={lookahead:?}, timestamp={}, prev_randao={:?}", "lookahead={lookahead:?}, timestamp={}, prev_randao={:?}",
payload_attribs.timestamp, payload_attribs.prev_randao, payload_attribs.timestamp(),
payload_attribs.prev_randao(),
); );
} else { } else {
// On a misprediction we issue the first fcU 500ms before creating a block! // On a misprediction we issue the first fcU 500ms before creating a block!
assert_eq!( assert_eq!(
lookahead, fork_choice_lookahead, lookahead,
fork_choice_lookahead,
"timestamp={}, prev_randao={:?}", "timestamp={}, prev_randao={:?}",
payload_attribs.timestamp, payload_attribs.prev_randao, payload_attribs.timestamp(),
payload_attribs.prev_randao(),
); );
} }
} }
@ -540,7 +545,7 @@ pub async fn proposer_boost_re_org_test(
pub async fn fork_choice_before_proposal() { pub async fn fork_choice_before_proposal() {
// Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing
// `validator_count // 32`. // `validator_count // 32`.
let validator_count = 32; let validator_count = 64;
let all_validators = (0..validator_count).collect::<Vec<_>>(); let all_validators = (0..validator_count).collect::<Vec<_>>();
let num_initial: u64 = 31; let num_initial: u64 = 31;

View File

@ -11,9 +11,11 @@ use eth2::{
types::{BlockId as CoreBlockId, StateId as CoreStateId, *}, types::{BlockId as CoreBlockId, StateId as CoreStateId, *},
BeaconNodeHttpClient, Error, StatusCode, Timeouts, BeaconNodeHttpClient, Error, StatusCode, Timeouts,
}; };
use execution_layer::test_utils::Operation;
use execution_layer::test_utils::TestingBuilder; use execution_layer::test_utils::TestingBuilder;
use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI; use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI;
use execution_layer::test_utils::{
Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI,
};
use futures::stream::{Stream, StreamExt}; use futures::stream::{Stream, StreamExt};
use futures::FutureExt; use futures::FutureExt;
use http_api::{BlockId, StateId}; use http_api::{BlockId, StateId};
@ -22,6 +24,7 @@ use network::NetworkReceivers;
use proto_array::ExecutionStatus; use proto_array::ExecutionStatus;
use sensitive_url::SensitiveUrl; use sensitive_url::SensitiveUrl;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use state_processing::per_block_processing::get_expected_withdrawals;
use state_processing::per_slot_processing; use state_processing::per_slot_processing;
use std::convert::TryInto; use std::convert::TryInto;
use std::sync::Arc; use std::sync::Arc;
@ -72,38 +75,53 @@ struct ApiTester {
mock_builder: Option<Arc<TestingBuilder<E>>>, mock_builder: Option<Arc<TestingBuilder<E>>>,
} }
struct ApiTesterConfig {
spec: ChainSpec,
builder_threshold: Option<u128>,
}
impl Default for ApiTesterConfig {
fn default() -> Self {
let mut spec = E::default_spec();
spec.shard_committee_period = 2;
Self {
spec,
builder_threshold: None,
}
}
}
impl ApiTester { impl ApiTester {
pub async fn new() -> Self { pub async fn new() -> Self {
// This allows for testing voluntary exits without building out a massive chain. // This allows for testing voluntary exits without building out a massive chain.
let mut spec = E::default_spec(); Self::new_from_config(ApiTesterConfig::default()).await
spec.shard_committee_period = 2;
Self::new_from_spec(spec).await
} }
pub async fn new_with_hard_forks(altair: bool, bellatrix: bool) -> Self { pub async fn new_with_hard_forks(altair: bool, bellatrix: bool) -> Self {
let mut spec = E::default_spec(); let mut config = ApiTesterConfig::default();
spec.shard_committee_period = 2;
// Set whether the chain has undergone each hard fork. // Set whether the chain has undergone each hard fork.
if altair { if altair {
spec.altair_fork_epoch = Some(Epoch::new(0)); config.spec.altair_fork_epoch = Some(Epoch::new(0));
} }
if bellatrix { if bellatrix {
spec.bellatrix_fork_epoch = Some(Epoch::new(0)); config.spec.bellatrix_fork_epoch = Some(Epoch::new(0));
} }
Self::new_from_spec(spec).await Self::new_from_config(config).await
} }
pub async fn new_from_spec(spec: ChainSpec) -> Self { pub async fn new_from_config(config: ApiTesterConfig) -> Self {
// Get a random unused port // Get a random unused port
let spec = config.spec;
let port = unused_port::unused_tcp_port().unwrap(); let port = unused_port::unused_tcp_port().unwrap();
let beacon_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); let beacon_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap();
let harness = Arc::new( let harness = Arc::new(
BeaconChainHarness::builder(MainnetEthSpec) BeaconChainHarness::builder(MainnetEthSpec)
.spec(spec.clone()) .spec(spec.clone())
.logger(logging::test_logger())
.deterministic_keypairs(VALIDATOR_COUNT) .deterministic_keypairs(VALIDATOR_COUNT)
.fresh_ephemeral_store() .fresh_ephemeral_store()
.mock_execution_layer_with_builder(beacon_url.clone()) .mock_execution_layer_with_builder(beacon_url.clone(), config.builder_threshold)
.build(), .build(),
); );
@ -358,6 +376,28 @@ impl ApiTester {
tester tester
} }
pub async fn new_mev_tester_no_builder_threshold() -> Self {
let mut config = ApiTesterConfig {
builder_threshold: Some(0),
spec: E::default_spec(),
};
config.spec.altair_fork_epoch = Some(Epoch::new(0));
config.spec.bellatrix_fork_epoch = Some(Epoch::new(0));
let tester = Self::new_from_config(config)
.await
.test_post_validator_register_validator()
.await;
tester
.mock_builder
.as_ref()
.unwrap()
.builder
.add_operation(Operation::Value(Uint256::from(
DEFAULT_BUILDER_PAYLOAD_VALUE_WEI,
)));
tester
}
fn skip_slots(self, count: u64) -> Self { fn skip_slots(self, count: u64) -> Self {
for _ in 0..count { for _ in 0..count {
self.chain self.chain
@ -1372,9 +1412,9 @@ impl ApiTester {
pub async fn test_get_config_spec(self) -> Self { pub async fn test_get_config_spec(self) -> Self {
let result = self let result = self
.client .client
.get_config_spec::<ConfigAndPresetBellatrix>() .get_config_spec::<ConfigAndPresetCapella>()
.await .await
.map(|res| ConfigAndPreset::Bellatrix(res.data)) .map(|res| ConfigAndPreset::Capella(res.data))
.unwrap(); .unwrap();
let expected = ConfigAndPreset::from_chain_spec::<E>(&self.chain.spec, None); let expected = ConfigAndPreset::from_chain_spec::<E>(&self.chain.spec, None);
@ -2122,7 +2162,7 @@ impl ApiTester {
self self
} }
pub async fn test_blinded_block_production<Payload: ExecPayload<E>>(&self) { pub async fn test_blinded_block_production<Payload: AbstractExecPayload<E>>(&self) {
let fork = self.chain.canonical_head.cached_head().head_fork(); let fork = self.chain.canonical_head.cached_head().head_fork();
let genesis_validators_root = self.chain.genesis_validators_root; let genesis_validators_root = self.chain.genesis_validators_root;
@ -2182,7 +2222,7 @@ impl ApiTester {
} }
} }
pub async fn test_blinded_block_production_no_verify_randao<Payload: ExecPayload<E>>( pub async fn test_blinded_block_production_no_verify_randao<Payload: AbstractExecPayload<E>>(
self, self,
) -> Self { ) -> Self {
for _ in 0..E::slots_per_epoch() { for _ in 0..E::slots_per_epoch() {
@ -2206,7 +2246,9 @@ impl ApiTester {
self self
} }
pub async fn test_blinded_block_production_verify_randao_invalid<Payload: ExecPayload<E>>( pub async fn test_blinded_block_production_verify_randao_invalid<
Payload: AbstractExecPayload<E>,
>(
self, self,
) -> Self { ) -> Self {
let fork = self.chain.canonical_head.cached_head().head_fork(); let fork = self.chain.canonical_head.cached_head().head_fork();
@ -2664,7 +2706,7 @@ impl ApiTester {
let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await;
let payload = self let payload: BlindedPayload<E> = self
.client .client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.await .await
@ -2673,14 +2715,11 @@ impl ApiTester {
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.clone(); .into();
let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64);
assert_eq!( assert_eq!(payload.fee_recipient(), expected_fee_recipient);
payload.execution_payload_header.fee_recipient, assert_eq!(payload.gas_limit(), 11_111_111);
expected_fee_recipient
);
assert_eq!(payload.execution_payload_header.gas_limit, 11_111_111);
// If this cache is empty, it indicates fallback was not used, so the payload came from the // If this cache is empty, it indicates fallback was not used, so the payload came from the
// mock builder. // mock builder.
@ -2707,7 +2746,7 @@ impl ApiTester {
let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await;
let payload = self let payload: BlindedPayload<E> = self
.client .client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.await .await
@ -2716,14 +2755,11 @@ impl ApiTester {
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.clone(); .into();
let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64);
assert_eq!( assert_eq!(payload.fee_recipient(), expected_fee_recipient);
payload.execution_payload_header.fee_recipient, assert_eq!(payload.gas_limit(), 30_000_000);
expected_fee_recipient
);
assert_eq!(payload.execution_payload_header.gas_limit, 30_000_000);
// This cache should not be populated because fallback should not have been used. // This cache should not be populated because fallback should not have been used.
assert!(self assert!(self
@ -2753,7 +2789,7 @@ impl ApiTester {
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
let payload = self let payload: BlindedPayload<E> = self
.client .client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.await .await
@ -2762,12 +2798,9 @@ impl ApiTester {
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.clone(); .into();
assert_eq!( assert_eq!(payload.fee_recipient(), test_fee_recipient);
payload.execution_payload_header.fee_recipient,
test_fee_recipient
);
// This cache should not be populated because fallback should not have been used. // This cache should not be populated because fallback should not have been used.
assert!(self assert!(self
@ -2801,11 +2834,11 @@ impl ApiTester {
.beacon_state .beacon_state
.latest_execution_payload_header() .latest_execution_payload_header()
.unwrap() .unwrap()
.block_hash; .block_hash();
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
let payload = self let payload: BlindedPayload<E> = self
.client .client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.await .await
@ -2814,12 +2847,9 @@ impl ApiTester {
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.clone(); .into();
assert_eq!( assert_eq!(payload.parent_hash(), expected_parent_hash);
payload.execution_payload_header.parent_hash,
expected_parent_hash
);
// If this cache is populated, it indicates fallback to the local EE was correctly used. // If this cache is populated, it indicates fallback to the local EE was correctly used.
assert!(self assert!(self
@ -2856,7 +2886,7 @@ impl ApiTester {
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
let payload = self let payload: BlindedPayload<E> = self
.client .client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.await .await
@ -2865,12 +2895,9 @@ impl ApiTester {
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.clone(); .into();
assert_eq!( assert_eq!(payload.prev_randao(), expected_prev_randao);
payload.execution_payload_header.prev_randao,
expected_prev_randao
);
// If this cache is populated, it indicates fallback to the local EE was correctly used. // If this cache is populated, it indicates fallback to the local EE was correctly used.
assert!(self assert!(self
@ -2901,12 +2928,12 @@ impl ApiTester {
.beacon_state .beacon_state
.latest_execution_payload_header() .latest_execution_payload_header()
.unwrap() .unwrap()
.block_number .block_number()
+ 1; + 1;
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
let payload = self let payload: BlindedPayload<E> = self
.client .client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.await .await
@ -2915,12 +2942,9 @@ impl ApiTester {
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.clone(); .into();
assert_eq!( assert_eq!(payload.block_number(), expected_block_number);
payload.execution_payload_header.block_number,
expected_block_number
);
// If this cache is populated, it indicates fallback to the local EE was correctly used. // If this cache is populated, it indicates fallback to the local EE was correctly used.
assert!(self assert!(self
@ -2951,11 +2975,11 @@ impl ApiTester {
.beacon_state .beacon_state
.latest_execution_payload_header() .latest_execution_payload_header()
.unwrap() .unwrap()
.timestamp; .timestamp();
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
let payload = self let payload: BlindedPayload<E> = self
.client .client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.await .await
@ -2964,9 +2988,9 @@ impl ApiTester {
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.clone(); .into();
assert!(payload.execution_payload_header.timestamp > min_expected_timestamp); assert!(payload.timestamp() > min_expected_timestamp);
// If this cache is populated, it indicates fallback to the local EE was correctly used. // If this cache is populated, it indicates fallback to the local EE was correctly used.
assert!(self assert!(self
@ -2991,7 +3015,7 @@ impl ApiTester {
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
let payload = self let payload: BlindedPayload<E> = self
.client .client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.await .await
@ -3000,7 +3024,7 @@ impl ApiTester {
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.clone(); .into();
// If this cache is populated, it indicates fallback to the local EE was correctly used. // If this cache is populated, it indicates fallback to the local EE was correctly used.
assert!(self assert!(self
@ -3028,7 +3052,7 @@ impl ApiTester {
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
let payload = self let payload: BlindedPayload<E> = self
.client .client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.await .await
@ -3037,7 +3061,7 @@ impl ApiTester {
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.clone(); .into();
// If this cache is populated, it indicates fallback to the local EE was correctly used. // If this cache is populated, it indicates fallback to the local EE was correctly used.
assert!(self assert!(self
@ -3071,7 +3095,7 @@ impl ApiTester {
.get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch()))
.await; .await;
let payload = self let payload: BlindedPayload<E> = self
.client .client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None) .get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None)
.await .await
@ -3080,7 +3104,7 @@ impl ApiTester {
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.clone(); .into();
// This cache should not be populated because fallback should not have been used. // This cache should not be populated because fallback should not have been used.
assert!(self assert!(self
@ -3100,7 +3124,7 @@ impl ApiTester {
.get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch()))
.await; .await;
let payload = self let payload: BlindedPayload<E> = self
.client .client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None) .get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None)
.await .await
@ -3109,7 +3133,7 @@ impl ApiTester {
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.clone(); .into();
// If this cache is populated, it indicates fallback to the local EE was correctly used. // If this cache is populated, it indicates fallback to the local EE was correctly used.
assert!(self assert!(self
@ -3149,7 +3173,7 @@ impl ApiTester {
.get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch()))
.await; .await;
let payload = self let payload: BlindedPayload<E> = self
.client .client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None) .get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None)
.await .await
@ -3158,7 +3182,7 @@ impl ApiTester {
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.clone(); .into();
// If this cache is populated, it indicates fallback to the local EE was correctly used. // If this cache is populated, it indicates fallback to the local EE was correctly used.
assert!(self assert!(self
@ -3188,7 +3212,7 @@ impl ApiTester {
.get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch()))
.await; .await;
let payload = self let payload: BlindedPayload<E> = self
.client .client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None) .get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None)
.await .await
@ -3197,7 +3221,7 @@ impl ApiTester {
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.clone(); .into();
// This cache should not be populated because fallback should not have been used. // This cache should not be populated because fallback should not have been used.
assert!(self assert!(self
@ -3231,7 +3255,7 @@ impl ApiTester {
let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await;
let payload = self let payload: BlindedPayload<E> = self
.client .client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.await .await
@ -3240,13 +3264,10 @@ impl ApiTester {
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.clone(); .into();
let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64);
assert_eq!( assert_eq!(payload.fee_recipient(), expected_fee_recipient);
payload.execution_payload_header.fee_recipient,
expected_fee_recipient
);
// If this cache is populated, it indicates fallback to the local EE was correctly used. // If this cache is populated, it indicates fallback to the local EE was correctly used.
assert!(self assert!(self
@ -3275,7 +3296,7 @@ impl ApiTester {
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
let payload = self let payload: BlindedPayload<E> = self
.client .client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.await .await
@ -3284,7 +3305,7 @@ impl ApiTester {
.body() .body()
.execution_payload() .execution_payload()
.unwrap() .unwrap()
.clone(); .into();
// If this cache is populated, it indicates fallback to the local EE was correctly used. // If this cache is populated, it indicates fallback to the local EE was correctly used.
assert!(self assert!(self
@ -3297,6 +3318,209 @@ impl ApiTester {
self self
} }
pub async fn test_builder_payload_chosen_when_more_profitable(self) -> Self {
// Mutate value.
self.mock_builder
.as_ref()
.unwrap()
.builder
.add_operation(Operation::Value(Uint256::from(
DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1,
)));
let slot = self.chain.slot().unwrap();
let epoch = self.chain.epoch().unwrap();
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.body()
.execution_payload()
.unwrap()
.into();
// The builder's payload should've been chosen, so this cache should not be populated
assert!(self
.chain
.execution_layer
.as_ref()
.unwrap()
.get_payload_by_root(&payload.tree_hash_root())
.is_none());
self
}
pub async fn test_local_payload_chosen_when_equally_profitable(self) -> Self {
// Mutate value.
self.mock_builder
.as_ref()
.unwrap()
.builder
.add_operation(Operation::Value(Uint256::from(
DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI,
)));
let slot = self.chain.slot().unwrap();
let epoch = self.chain.epoch().unwrap();
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.body()
.execution_payload()
.unwrap()
.into();
// The local payload should've been chosen, so this cache should be populated
assert!(self
.chain
.execution_layer
.as_ref()
.unwrap()
.get_payload_by_root(&payload.tree_hash_root())
.is_some());
self
}
pub async fn test_local_payload_chosen_when_more_profitable(self) -> Self {
// Mutate value.
self.mock_builder
.as_ref()
.unwrap()
.builder
.add_operation(Operation::Value(Uint256::from(
DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI - 1,
)));
let slot = self.chain.slot().unwrap();
let epoch = self.chain.epoch().unwrap();
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.body()
.execution_payload()
.unwrap()
.into();
// The local payload should've been chosen, so this cache should be populated
assert!(self
.chain
.execution_layer
.as_ref()
.unwrap()
.get_payload_by_root(&payload.tree_hash_root())
.is_some());
self
}
pub async fn test_builder_works_post_capella(self) -> Self {
// Ensure builder payload is chosen
self.mock_builder
.as_ref()
.unwrap()
.builder
.add_operation(Operation::Value(Uint256::from(
DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1,
)));
let slot = self.chain.slot().unwrap();
let propose_state = self
.harness
.chain
.state_at_slot(slot, StateSkipConfig::WithoutStateRoots)
.unwrap();
let withdrawals = get_expected_withdrawals(&propose_state, &self.chain.spec).unwrap();
let withdrawals_root = withdrawals.tree_hash_root();
// Set withdrawals root for builder
self.mock_builder
.as_ref()
.unwrap()
.builder
.add_operation(Operation::WithdrawalsRoot(withdrawals_root));
let epoch = self.chain.epoch().unwrap();
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.body()
.execution_payload()
.unwrap()
.into();
// The builder's payload should've been chosen, so this cache should not be populated
assert!(self
.chain
.execution_layer
.as_ref()
.unwrap()
.get_payload_by_root(&payload.tree_hash_root())
.is_none());
self
}
pub async fn test_lighthouse_rejects_invalid_withdrawals_root(self) -> Self {
// Ensure builder payload *would be* chosen
self.mock_builder
.as_ref()
.unwrap()
.builder
.add_operation(Operation::Value(Uint256::from(
DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1,
)));
// Set withdrawals root to something invalid
self.mock_builder
.as_ref()
.unwrap()
.builder
.add_operation(Operation::WithdrawalsRoot(Hash256::repeat_byte(0x42)));
let slot = self.chain.slot().unwrap();
let epoch = self.chain.epoch().unwrap();
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.body()
.execution_payload()
.unwrap()
.into();
// The local payload should've been chosen because the builder's was invalid
assert!(self
.chain
.execution_layer
.as_ref()
.unwrap()
.get_payload_by_root(&payload.tree_hash_root())
.is_some());
self
}
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
pub async fn test_get_lighthouse_health(self) -> Self { pub async fn test_get_lighthouse_health(self) -> Self {
self.client.get_lighthouse_health().await.unwrap(); self.client.get_lighthouse_health().await.unwrap();
@ -3766,9 +3990,9 @@ async fn get_events() {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_events_altair() { async fn get_events_altair() {
let mut spec = E::default_spec(); let mut config = ApiTesterConfig::default();
spec.altair_fork_epoch = Some(Epoch::new(0)); config.spec.altair_fork_epoch = Some(Epoch::new(0));
ApiTester::new_from_spec(spec) ApiTester::new_from_config(config)
.await .await
.test_get_events_altair() .test_get_events_altair()
.await; .await;
@ -4281,6 +4505,38 @@ async fn builder_inadequate_builder_threshold() {
.await; .await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn builder_payload_chosen_by_profit() {
ApiTester::new_mev_tester_no_builder_threshold()
.await
.test_builder_payload_chosen_when_more_profitable()
.await
.test_local_payload_chosen_when_equally_profitable()
.await
.test_local_payload_chosen_when_more_profitable()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn builder_works_post_capella() {
let mut config = ApiTesterConfig {
builder_threshold: Some(0),
spec: E::default_spec(),
};
config.spec.altair_fork_epoch = Some(Epoch::new(0));
config.spec.bellatrix_fork_epoch = Some(Epoch::new(0));
config.spec.capella_fork_epoch = Some(Epoch::new(0));
ApiTester::new_from_config(config)
.await
.test_post_validator_register_validator()
.await
.test_builder_works_post_capella()
.await
.test_lighthouse_rejects_invalid_withdrawals_root()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn lighthouse_endpoints() { async fn lighthouse_endpoints() {
ApiTester::new() ApiTester::new()

View File

@ -13,6 +13,8 @@ serde = { version = "1.0.116", features = ["derive"] }
serde_derive = "1.0.116" serde_derive = "1.0.116"
eth2_ssz = "0.4.1" eth2_ssz = "0.4.1"
eth2_ssz_derive = "0.3.0" eth2_ssz_derive = "0.3.0"
tree_hash = "0.4.1"
tree_hash_derive = "0.4.0"
slog = { version = "2.5.2", features = ["max_level_trace"] } slog = { version = "2.5.2", features = ["max_level_trace"] }
lighthouse_version = { path = "../../common/lighthouse_version" } lighthouse_version = { path = "../../common/lighthouse_version" }
tokio = { version = "1.14.0", features = ["time", "macros"] } tokio = { version = "1.14.0", features = ["time", "macros"] }

View File

@ -306,8 +306,8 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc<ForkContext>) -> Gos
let topic_bytes = message.topic.as_str().as_bytes(); let topic_bytes = message.topic.as_str().as_bytes();
match fork_context.current_fork() { match fork_context.current_fork() {
// according to: https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#the-gossip-domain-gossipsub // according to: https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#the-gossip-domain-gossipsub
// the derivation of the message-id remains the same in the merge // the derivation of the message-id remains the same in the merge and for eip 4844.
ForkName::Altair | ForkName::Merge => { ForkName::Altair | ForkName::Merge | ForkName::Capella | ForkName::Eip4844 => {
let topic_len_bytes = topic_bytes.len().to_le_bytes(); let topic_len_bytes = topic_bytes.len().to_le_bytes();
let mut vec = Vec::with_capacity( let mut vec = Vec::with_capacity(
prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(),

View File

@ -15,6 +15,7 @@ pub mod peer_manager;
pub mod rpc; pub mod rpc;
pub mod types; pub mod types;
pub use crate::types::SignedBeaconBlockAndBlobsSidecar;
pub use config::gossip_max_size; pub use config::gossip_max_size;
use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer};

View File

@ -534,6 +534,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
Protocol::Ping => PeerAction::MidToleranceError, Protocol::Ping => PeerAction::MidToleranceError,
Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError,
Protocol::BlocksByRoot => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError,
Protocol::BlobsByRange => PeerAction::MidToleranceError,
Protocol::LightClientBootstrap => PeerAction::LowToleranceError, Protocol::LightClientBootstrap => PeerAction::LowToleranceError,
Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError,
Protocol::MetaData => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError,
@ -550,6 +551,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
Protocol::Ping => PeerAction::Fatal, Protocol::Ping => PeerAction::Fatal,
Protocol::BlocksByRange => return, Protocol::BlocksByRange => return,
Protocol::BlocksByRoot => return, Protocol::BlocksByRoot => return,
Protocol::BlobsByRange => return,
Protocol::Goodbye => return, Protocol::Goodbye => return,
Protocol::LightClientBootstrap => return, Protocol::LightClientBootstrap => return,
Protocol::MetaData => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError,
@ -566,6 +568,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
Protocol::Ping => PeerAction::LowToleranceError, Protocol::Ping => PeerAction::LowToleranceError,
Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError,
Protocol::BlocksByRoot => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError,
Protocol::BlobsByRange => PeerAction::MidToleranceError,
Protocol::LightClientBootstrap => return, Protocol::LightClientBootstrap => return,
Protocol::Goodbye => return, Protocol::Goodbye => return,
Protocol::MetaData => return, Protocol::MetaData => return,

View File

@ -193,14 +193,20 @@ mod tests {
let mut chain_spec = Spec::default_spec(); let mut chain_spec = Spec::default_spec();
let altair_fork_epoch = Epoch::new(1); let altair_fork_epoch = Epoch::new(1);
let merge_fork_epoch = Epoch::new(2); let merge_fork_epoch = Epoch::new(2);
let capella_fork_epoch = Epoch::new(3);
let eip4844_fork_epoch = Epoch::new(4);
chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.altair_fork_epoch = Some(altair_fork_epoch);
chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch);
chain_spec.capella_fork_epoch = Some(capella_fork_epoch);
chain_spec.eip4844_fork_epoch = Some(eip4844_fork_epoch);
let current_slot = match fork_name { let current_slot = match fork_name {
ForkName::Base => Slot::new(0), ForkName::Base => Slot::new(0),
ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Eip4844 => eip4844_fork_epoch.start_slot(Spec::slots_per_epoch()),
}; };
ForkContext::new::<Spec>(current_slot, Hash256::zero(), &chain_spec) ForkContext::new::<Spec>(current_slot, Hash256::zero(), &chain_spec)
} }

View File

@ -15,9 +15,11 @@ use std::io::{Read, Write};
use std::marker::PhantomData; use std::marker::PhantomData;
use std::sync::Arc; use std::sync::Arc;
use tokio_util::codec::{Decoder, Encoder}; use tokio_util::codec::{Decoder, Encoder};
use types::light_client_bootstrap::LightClientBootstrap;
use types::{ use types::{
light_client_bootstrap::LightClientBootstrap, EthSpec, ForkContext, ForkName, Hash256, BlobsSidecar, EthSpec, ForkContext, ForkName, Hash256, SignedBeaconBlock,
SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockMerge, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella,
SignedBeaconBlockEip4844, SignedBeaconBlockMerge,
}; };
use unsigned_varint::codec::Uvi; use unsigned_varint::codec::Uvi;
@ -70,6 +72,7 @@ impl<TSpec: EthSpec> Encoder<RPCCodedResponse<TSpec>> for SSZSnappyInboundCodec<
RPCResponse::Status(res) => res.as_ssz_bytes(), RPCResponse::Status(res) => res.as_ssz_bytes(),
RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(),
RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(),
RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(),
RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(),
RPCResponse::Pong(res) => res.data.as_ssz_bytes(), RPCResponse::Pong(res) => res.data.as_ssz_bytes(),
RPCResponse::MetaData(res) => RPCResponse::MetaData(res) =>
@ -229,6 +232,7 @@ impl<TSpec: EthSpec> Encoder<OutboundRequest<TSpec>> for SSZSnappyOutboundCodec<
OutboundRequest::Goodbye(req) => req.as_ssz_bytes(), OutboundRequest::Goodbye(req) => req.as_ssz_bytes(),
OutboundRequest::BlocksByRange(req) => req.as_ssz_bytes(), OutboundRequest::BlocksByRange(req) => req.as_ssz_bytes(),
OutboundRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(), OutboundRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(),
OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(),
OutboundRequest::Ping(req) => req.as_ssz_bytes(), OutboundRequest::Ping(req) => req.as_ssz_bytes(),
OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode
OutboundRequest::LightClientBootstrap(req) => req.as_ssz_bytes(), OutboundRequest::LightClientBootstrap(req) => req.as_ssz_bytes(),
@ -409,6 +413,14 @@ fn context_bytes<T: EthSpec>(
return match **ref_box_block { return match **ref_box_block {
// NOTE: If you are adding another fork type here, be sure to modify the // NOTE: If you are adding another fork type here, be sure to modify the
// `fork_context.to_context_bytes()` function to support it as well! // `fork_context.to_context_bytes()` function to support it as well!
SignedBeaconBlock::Eip4844 { .. } => {
// Eip4844 context being `None` implies that "merge never happened".
fork_context.to_context_bytes(ForkName::Eip4844)
}
SignedBeaconBlock::Capella { .. } => {
// Capella context being `None` implies that "merge never happened".
fork_context.to_context_bytes(ForkName::Capella)
}
SignedBeaconBlock::Merge { .. } => { SignedBeaconBlock::Merge { .. } => {
// Merge context being `None` implies that "merge never happened". // Merge context being `None` implies that "merge never happened".
fork_context.to_context_bytes(ForkName::Merge) fork_context.to_context_bytes(ForkName::Merge)
@ -471,6 +483,9 @@ fn handle_v1_request<T: EthSpec>(
Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest {
block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, block_roots: VariableList::from_ssz_bytes(decoded_buffer)?,
}))), }))),
Protocol::BlobsByRange => Ok(Some(InboundRequest::BlobsByRange(
BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?,
))),
Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping { Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping {
data: u64::from_ssz_bytes(decoded_buffer)?, data: u64::from_ssz_bytes(decoded_buffer)?,
}))), }))),
@ -507,6 +522,9 @@ fn handle_v2_request<T: EthSpec>(
Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest {
block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, block_roots: VariableList::from_ssz_bytes(decoded_buffer)?,
}))), }))),
Protocol::BlobsByRange => Ok(Some(InboundRequest::BlobsByRange(
BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?,
))),
// MetaData requests return early from InboundUpgrade and do not reach the decoder. // MetaData requests return early from InboundUpgrade and do not reach the decoder.
// Handle this case just for completeness. // Handle this case just for completeness.
Protocol::MetaData => { Protocol::MetaData => {
@ -544,6 +562,7 @@ fn handle_v1_response<T: EthSpec>(
Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?),
)))), )))),
Protocol::BlobsByRange => Err(RPCError::InvalidData("blobs by range via v1".to_string())),
Protocol::Ping => Ok(Some(RPCResponse::Pong(Ping { Protocol::Ping => Ok(Some(RPCResponse::Pong(Ping {
data: u64::from_ssz_bytes(decoded_buffer)?, data: u64::from_ssz_bytes(decoded_buffer)?,
}))), }))),
@ -595,6 +614,16 @@ fn handle_v2_response<T: EthSpec>(
decoded_buffer, decoded_buffer,
)?), )?),
)))), )))),
ForkName::Capella => Ok(Some(RPCResponse::BlocksByRange(Arc::new(
SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes(
decoded_buffer,
)?),
)))),
ForkName::Eip4844 => Ok(Some(RPCResponse::BlocksByRange(Arc::new(
SignedBeaconBlock::Eip4844(SignedBeaconBlockEip4844::from_ssz_bytes(
decoded_buffer,
)?),
)))),
}, },
Protocol::BlocksByRoot => match fork_name { Protocol::BlocksByRoot => match fork_name {
ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
@ -610,6 +639,25 @@ fn handle_v2_response<T: EthSpec>(
decoded_buffer, decoded_buffer,
)?), )?),
)))), )))),
ForkName::Capella => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes(
decoded_buffer,
)?),
)))),
ForkName::Eip4844 => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
SignedBeaconBlock::Eip4844(SignedBeaconBlockEip4844::from_ssz_bytes(
decoded_buffer,
)?),
)))),
},
Protocol::BlobsByRange => match fork_name {
ForkName::Eip4844 => Ok(Some(RPCResponse::BlobsByRange(Arc::new(
BlobsSidecar::from_ssz_bytes(decoded_buffer)?,
)))),
_ => Err(RPCError::ErrorResponse(
RPCResponseErrorCode::InvalidRequest,
"Invalid forkname for blobsbyrange".to_string(),
)),
}, },
_ => Err(RPCError::ErrorResponse( _ => Err(RPCError::ErrorResponse(
RPCResponseErrorCode::InvalidRequest, RPCResponseErrorCode::InvalidRequest,
@ -645,8 +693,8 @@ mod tests {
}; };
use std::sync::Arc; use std::sync::Arc;
use types::{ use types::{
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Epoch, ForkContext, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, Epoch,
FullPayload, Hash256, Signature, SignedBeaconBlock, Slot, ForkContext, FullPayload, Hash256, Signature, SignedBeaconBlock, Slot,
}; };
use snap::write::FrameEncoder; use snap::write::FrameEncoder;
@ -659,14 +707,20 @@ mod tests {
let mut chain_spec = Spec::default_spec(); let mut chain_spec = Spec::default_spec();
let altair_fork_epoch = Epoch::new(1); let altair_fork_epoch = Epoch::new(1);
let merge_fork_epoch = Epoch::new(2); let merge_fork_epoch = Epoch::new(2);
let capella_fork_epoch = Epoch::new(3);
let eip4844_fork_epoch = Epoch::new(4);
chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.altair_fork_epoch = Some(altair_fork_epoch);
chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch);
chain_spec.capella_fork_epoch = Some(capella_fork_epoch);
chain_spec.eip4844_fork_epoch = Some(eip4844_fork_epoch);
let current_slot = match fork_name { let current_slot = match fork_name {
ForkName::Base => Slot::new(0), ForkName::Base => Slot::new(0),
ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Eip4844 => eip4844_fork_epoch.start_slot(Spec::slots_per_epoch()),
}; };
ForkContext::new::<Spec>(current_slot, Hash256::zero(), &chain_spec) ForkContext::new::<Spec>(current_slot, Hash256::zero(), &chain_spec)
} }
@ -870,6 +924,9 @@ mod tests {
OutboundRequest::BlocksByRoot(bbroot) => { OutboundRequest::BlocksByRoot(bbroot) => {
assert_eq!(decoded, InboundRequest::BlocksByRoot(bbroot)) assert_eq!(decoded, InboundRequest::BlocksByRoot(bbroot))
} }
OutboundRequest::BlobsByRange(blbrange) => {
assert_eq!(decoded, InboundRequest::BlobsByRange(blbrange))
}
OutboundRequest::Ping(ping) => { OutboundRequest::Ping(ping) => {
assert_eq!(decoded, InboundRequest::Ping(ping)) assert_eq!(decoded, InboundRequest::Ping(ping))
} }

View File

@ -67,6 +67,7 @@ pub struct OutboundRateLimiterConfig {
pub(super) goodbye_quota: Quota, pub(super) goodbye_quota: Quota,
pub(super) blocks_by_range_quota: Quota, pub(super) blocks_by_range_quota: Quota,
pub(super) blocks_by_root_quota: Quota, pub(super) blocks_by_root_quota: Quota,
pub(super) blobs_by_range_quota: Quota,
} }
impl OutboundRateLimiterConfig { impl OutboundRateLimiterConfig {
@ -77,6 +78,8 @@ impl OutboundRateLimiterConfig {
pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota =
Quota::n_every(methods::MAX_REQUEST_BLOCKS, 10); Quota::n_every(methods::MAX_REQUEST_BLOCKS, 10);
pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10);
pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota =
Quota::n_every(methods::MAX_REQUEST_BLOBS_SIDECARS, 10);
} }
impl Default for OutboundRateLimiterConfig { impl Default for OutboundRateLimiterConfig {
@ -88,6 +91,7 @@ impl Default for OutboundRateLimiterConfig {
goodbye_quota: Self::DEFAULT_GOODBYE_QUOTA, goodbye_quota: Self::DEFAULT_GOODBYE_QUOTA,
blocks_by_range_quota: Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA, blocks_by_range_quota: Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA,
blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA, blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA,
blobs_by_range_quota: Self::DEFAULT_BLOBS_BY_RANGE_QUOTA,
} }
} }
} }
@ -111,6 +115,7 @@ impl Debug for OutboundRateLimiterConfig {
.field("goodbye", fmt_q!(&self.goodbye_quota)) .field("goodbye", fmt_q!(&self.goodbye_quota))
.field("blocks_by_range", fmt_q!(&self.blocks_by_range_quota)) .field("blocks_by_range", fmt_q!(&self.blocks_by_range_quota))
.field("blocks_by_root", fmt_q!(&self.blocks_by_root_quota)) .field("blocks_by_root", fmt_q!(&self.blocks_by_root_quota))
.field("blobs_by_range", fmt_q!(&self.blobs_by_range_quota))
.finish() .finish()
} }
} }
@ -129,6 +134,7 @@ impl FromStr for OutboundRateLimiterConfig {
let mut goodbye_quota = None; let mut goodbye_quota = None;
let mut blocks_by_range_quota = None; let mut blocks_by_range_quota = None;
let mut blocks_by_root_quota = None; let mut blocks_by_root_quota = None;
let mut blobs_by_range_quota = None;
for proto_def in s.split(';') { for proto_def in s.split(';') {
let ProtocolQuota { protocol, quota } = proto_def.parse()?; let ProtocolQuota { protocol, quota } = proto_def.parse()?;
let quota = Some(quota); let quota = Some(quota);
@ -139,6 +145,7 @@ impl FromStr for OutboundRateLimiterConfig {
Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota), Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota),
Protocol::Ping => ping_quota = ping_quota.or(quota), Protocol::Ping => ping_quota = ping_quota.or(quota),
Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota),
Protocol::BlobsByRange => blobs_by_range_quota = blobs_by_range_quota.or(quota),
Protocol::LightClientBootstrap => return Err("Lighthouse does not send LightClientBootstrap requests. Quota should not be set."), Protocol::LightClientBootstrap => return Err("Lighthouse does not send LightClientBootstrap requests. Quota should not be set."),
} }
} }
@ -151,6 +158,8 @@ impl FromStr for OutboundRateLimiterConfig {
.unwrap_or(Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA), .unwrap_or(Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA),
blocks_by_root_quota: blocks_by_root_quota blocks_by_root_quota: blocks_by_root_quota
.unwrap_or(Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA), .unwrap_or(Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA),
blobs_by_range_quota: blobs_by_range_quota
.unwrap_or(Self::DEFAULT_BLOBS_BY_RANGE_QUOTA),
}) })
} }
} }

View File

@ -13,7 +13,8 @@ use std::sync::Arc;
use strum::IntoStaticStr; use strum::IntoStaticStr;
use superstruct::superstruct; use superstruct::superstruct;
use types::{ use types::{
light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, blobs_sidecar::BlobsSidecar, light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec,
Hash256, SignedBeaconBlock, Slot,
}; };
/// Maximum number of blocks in a single request. /// Maximum number of blocks in a single request.
@ -24,6 +25,9 @@ pub const MAX_REQUEST_BLOCKS: u64 = 1024;
pub type MaxErrorLen = U256; pub type MaxErrorLen = U256;
pub const MAX_ERROR_LEN: u64 = 256; pub const MAX_ERROR_LEN: u64 = 256;
pub type MaxRequestBlobsSidecars = U1024;
pub const MAX_REQUEST_BLOBS_SIDECARS: u64 = 1024;
/// Wrapper over SSZ List to represent error message in rpc responses. /// Wrapper over SSZ List to represent error message in rpc responses.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct ErrorType(pub VariableList<u8, MaxErrorLen>); pub struct ErrorType(pub VariableList<u8, MaxErrorLen>);
@ -206,6 +210,16 @@ pub struct BlocksByRangeRequest {
pub count: u64, pub count: u64,
} }
/// Request a number of beacon blobs from a peer.
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BlobsByRangeRequest {
/// The starting slot to request blobs.
pub start_slot: u64,
/// The number of blobs from the start slot.
pub count: u64,
}
/// Request a number of beacon block roots from a peer. /// Request a number of beacon block roots from a peer.
#[derive(Encode, Decode, Clone, Debug, PartialEq)] #[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct OldBlocksByRangeRequest { pub struct OldBlocksByRangeRequest {
@ -245,6 +259,9 @@ pub enum RPCResponse<T: EthSpec> {
/// A response to a get BLOCKS_BY_ROOT request. /// A response to a get BLOCKS_BY_ROOT request.
BlocksByRoot(Arc<SignedBeaconBlock<T>>), BlocksByRoot(Arc<SignedBeaconBlock<T>>),
/// A response to a get BLOBS_BY_RANGE request
BlobsByRange(Arc<BlobsSidecar<T>>),
/// A response to a get LIGHTCLIENT_BOOTSTRAP request. /// A response to a get LIGHTCLIENT_BOOTSTRAP request.
LightClientBootstrap(LightClientBootstrap<T>), LightClientBootstrap(LightClientBootstrap<T>),
@ -263,6 +280,9 @@ pub enum ResponseTermination {
/// Blocks by root stream termination. /// Blocks by root stream termination.
BlocksByRoot, BlocksByRoot,
/// Blobs by range stream termination.
BlobsByRange,
} }
/// The structured response containing a result/code indicating success or failure /// The structured response containing a result/code indicating success or failure
@ -330,6 +350,7 @@ impl<T: EthSpec> RPCCodedResponse<T> {
RPCResponse::Status(_) => false, RPCResponse::Status(_) => false,
RPCResponse::BlocksByRange(_) => true, RPCResponse::BlocksByRange(_) => true,
RPCResponse::BlocksByRoot(_) => true, RPCResponse::BlocksByRoot(_) => true,
RPCResponse::BlobsByRange(_) => true,
RPCResponse::Pong(_) => false, RPCResponse::Pong(_) => false,
RPCResponse::MetaData(_) => false, RPCResponse::MetaData(_) => false,
RPCResponse::LightClientBootstrap(_) => false, RPCResponse::LightClientBootstrap(_) => false,
@ -365,6 +386,7 @@ impl<T: EthSpec> RPCResponse<T> {
RPCResponse::Status(_) => Protocol::Status, RPCResponse::Status(_) => Protocol::Status,
RPCResponse::BlocksByRange(_) => Protocol::BlocksByRange, RPCResponse::BlocksByRange(_) => Protocol::BlocksByRange,
RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot,
RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange,
RPCResponse::Pong(_) => Protocol::Ping, RPCResponse::Pong(_) => Protocol::Ping,
RPCResponse::MetaData(_) => Protocol::MetaData, RPCResponse::MetaData(_) => Protocol::MetaData,
RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
@ -401,6 +423,9 @@ impl<T: EthSpec> std::fmt::Display for RPCResponse<T> {
RPCResponse::BlocksByRoot(block) => { RPCResponse::BlocksByRoot(block) => {
write!(f, "BlocksByRoot: Block slot: {}", block.slot()) write!(f, "BlocksByRoot: Block slot: {}", block.slot())
} }
RPCResponse::BlobsByRange(blob) => {
write!(f, "BlobsByRange: Blob slot: {}", blob.beacon_block_slot)
}
RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data),
RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()),
RPCResponse::LightClientBootstrap(bootstrap) => { RPCResponse::LightClientBootstrap(bootstrap) => {
@ -452,6 +477,12 @@ impl std::fmt::Display for OldBlocksByRangeRequest {
} }
} }
impl std::fmt::Display for BlobsByRangeRequest {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Start Slot: {}, Count: {}", self.start_slot, self.count)
}
}
impl slog::KV for StatusMessage { impl slog::KV for StatusMessage {
fn serialize( fn serialize(
&self, &self,

View File

@ -24,6 +24,7 @@ pub(crate) use handler::HandlerErr;
pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse}; pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse};
pub(crate) use protocol::{InboundRequest, RPCProtocol}; pub(crate) use protocol::{InboundRequest, RPCProtocol};
use crate::rpc::methods::MAX_REQUEST_BLOBS_SIDECARS;
pub use handler::SubstreamId; pub use handler::SubstreamId;
pub use methods::{ pub use methods::{
BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest, BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest,
@ -144,6 +145,11 @@ impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
Duration::from_secs(10), Duration::from_secs(10),
) )
.n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10)) .n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10))
.n_every(
Protocol::BlobsByRange,
MAX_REQUEST_BLOBS_SIDECARS,
Duration::from_secs(10),
)
.build() .build()
.expect("Configuration parameters are valid"); .expect("Configuration parameters are valid");
@ -339,6 +345,7 @@ where
match end { match end {
ResponseTermination::BlocksByRange => Protocol::BlocksByRange, ResponseTermination::BlocksByRange => Protocol::BlocksByRange,
ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot, ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot,
ResponseTermination::BlobsByRange => Protocol::BlobsByRange,
}, },
), ),
}, },

View File

@ -38,6 +38,7 @@ pub enum OutboundRequest<TSpec: EthSpec> {
Goodbye(GoodbyeReason), Goodbye(GoodbyeReason),
BlocksByRange(OldBlocksByRangeRequest), BlocksByRange(OldBlocksByRangeRequest),
BlocksByRoot(BlocksByRootRequest), BlocksByRoot(BlocksByRootRequest),
BlobsByRange(BlobsByRangeRequest),
LightClientBootstrap(LightClientBootstrapRequest), LightClientBootstrap(LightClientBootstrapRequest),
Ping(Ping), Ping(Ping),
MetaData(PhantomData<TSpec>), MetaData(PhantomData<TSpec>),
@ -76,6 +77,11 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy),
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy),
], ],
OutboundRequest::BlobsByRange(_) => vec![ProtocolId::new(
Protocol::BlobsByRange,
Version::V1,
Encoding::SSZSnappy,
)],
OutboundRequest::Ping(_) => vec![ProtocolId::new( OutboundRequest::Ping(_) => vec![ProtocolId::new(
Protocol::Ping, Protocol::Ping,
Version::V1, Version::V1,
@ -100,6 +106,7 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
OutboundRequest::Goodbye(_) => 0, OutboundRequest::Goodbye(_) => 0,
OutboundRequest::BlocksByRange(req) => req.count, OutboundRequest::BlocksByRange(req) => req.count,
OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
OutboundRequest::BlobsByRange(req) => req.count,
OutboundRequest::Ping(_) => 1, OutboundRequest::Ping(_) => 1,
OutboundRequest::MetaData(_) => 1, OutboundRequest::MetaData(_) => 1,
OutboundRequest::LightClientBootstrap(_) => 1, OutboundRequest::LightClientBootstrap(_) => 1,
@ -113,6 +120,7 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
OutboundRequest::Goodbye(_) => Protocol::Goodbye, OutboundRequest::Goodbye(_) => Protocol::Goodbye,
OutboundRequest::BlocksByRange(_) => Protocol::BlocksByRange, OutboundRequest::BlocksByRange(_) => Protocol::BlocksByRange,
OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot,
OutboundRequest::BlobsByRange(_) => Protocol::BlobsByRange,
OutboundRequest::Ping(_) => Protocol::Ping, OutboundRequest::Ping(_) => Protocol::Ping,
OutboundRequest::MetaData(_) => Protocol::MetaData, OutboundRequest::MetaData(_) => Protocol::MetaData,
OutboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap, OutboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
@ -127,6 +135,7 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
// variants that have `multiple_responses()` can have values. // variants that have `multiple_responses()` can have values.
OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange,
OutboundRequest::LightClientBootstrap(_) => unreachable!(), OutboundRequest::LightClientBootstrap(_) => unreachable!(),
OutboundRequest::Status(_) => unreachable!(), OutboundRequest::Status(_) => unreachable!(),
OutboundRequest::Goodbye(_) => unreachable!(), OutboundRequest::Goodbye(_) => unreachable!(),
@ -183,6 +192,7 @@ impl<TSpec: EthSpec> std::fmt::Display for OutboundRequest<TSpec> {
OutboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), OutboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason),
OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req),
OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req),
OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req),
OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
OutboundRequest::MetaData(_) => write!(f, "MetaData request"), OutboundRequest::MetaData(_) => write!(f, "MetaData request"),
OutboundRequest::LightClientBootstrap(bootstrap) => { OutboundRequest::LightClientBootstrap(bootstrap) => {

View File

@ -20,9 +20,11 @@ use tokio_util::{
codec::Framed, codec::Framed,
compat::{Compat, FuturesAsyncReadCompatExt}, compat::{Compat, FuturesAsyncReadCompatExt},
}; };
use types::BlobsSidecar;
use types::{ use types::{
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EthSpec, ForkContext, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge, Blob,
ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, EmptyBlock, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature,
SignedBeaconBlock,
}; };
lazy_static! { lazy_static! {
@ -61,6 +63,13 @@ lazy_static! {
.as_ssz_bytes() .as_ssz_bytes()
.len(); .len();
pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD: usize = SignedBeaconBlock::<MainnetEthSpec>::from_block(
BeaconBlock::Capella(BeaconBlockCapella::full(&MainnetEthSpec::default_spec())),
Signature::empty(),
)
.as_ssz_bytes()
.len();
/// The `BeaconBlockMerge` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. /// The `BeaconBlockMerge` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing.
/// We calculate the value from its fields instead of constructing the block and checking the length. /// We calculate the value from its fields instead of constructing the block and checking the length.
/// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network
@ -68,9 +77,19 @@ lazy_static! {
pub static ref SIGNED_BEACON_BLOCK_MERGE_MAX: usize = pub static ref SIGNED_BEACON_BLOCK_MERGE_MAX: usize =
// Size of a full altair block // Size of a full altair block
*SIGNED_BEACON_BLOCK_ALTAIR_MAX *SIGNED_BEACON_BLOCK_ALTAIR_MAX
+ types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_size() // adding max size of execution payload (~16gb) + types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_merge_size() // adding max size of execution payload (~16gb)
+ ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field
pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD
+ types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_capella_size() // adding max size of execution payload (~16gb)
+ ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field
pub static ref SIGNED_BEACON_BLOCK_EIP4844_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD
+ types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_eip4844_size() // adding max size of execution payload (~16gb)
+ ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload`
+ (<types::KzgCommitment as Encode>::ssz_fixed_len() * <MainnetEthSpec>::max_blobs_per_block())
+ ssz::BYTES_PER_LENGTH_OFFSET; // Length offset for the blob commitments field.
pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize = pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize =
VariableList::<Hash256, MaxRequestBlocks>::from(Vec::<Hash256>::new()) VariableList::<Hash256, MaxRequestBlocks>::from(Vec::<Hash256>::new())
.as_ssz_bytes() .as_ssz_bytes()
@ -96,12 +115,21 @@ lazy_static! {
.as_ssz_bytes() .as_ssz_bytes()
.len(); .len();
pub static ref BLOBS_SIDECAR_MIN: usize = BlobsSidecar::<MainnetEthSpec>::empty()
.as_ssz_bytes()
.len();
pub static ref BLOBS_SIDECAR_MAX: usize = *BLOBS_SIDECAR_MIN // Max size of variable length `blobs` field
+ (MainnetEthSpec::max_blobs_per_block() * <Blob<MainnetEthSpec> as Encode>::ssz_fixed_len());
} }
/// The maximum bytes that can be sent across the RPC pre-merge. /// The maximum bytes that can be sent across the RPC pre-merge.
pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M
/// The maximum bytes that can be sent across the RPC post-merge. /// The maximum bytes that can be sent across the RPC post-merge.
pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M
pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M
// FIXME(sean) should this be increased to account for blobs?
pub(crate) const MAX_RPC_SIZE_POST_EIP4844: usize = 10 * 1_048_576; // 10M
/// The protocol prefix the RPC protocol id. /// The protocol prefix the RPC protocol id.
const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req";
/// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte). /// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte).
@ -113,8 +141,10 @@ const REQUEST_TIMEOUT: u64 = 15;
/// Returns the maximum bytes that can be sent across the RPC. /// Returns the maximum bytes that can be sent across the RPC.
pub fn max_rpc_size(fork_context: &ForkContext) -> usize { pub fn max_rpc_size(fork_context: &ForkContext) -> usize {
match fork_context.current_fork() { match fork_context.current_fork() {
ForkName::Merge => MAX_RPC_SIZE_POST_MERGE,
ForkName::Altair | ForkName::Base => MAX_RPC_SIZE, ForkName::Altair | ForkName::Base => MAX_RPC_SIZE,
ForkName::Merge => MAX_RPC_SIZE_POST_MERGE,
ForkName::Capella => MAX_RPC_SIZE_POST_CAPELLA,
ForkName::Eip4844 => MAX_RPC_SIZE_POST_EIP4844,
} }
} }
@ -135,6 +165,14 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits {
*SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks
*SIGNED_BEACON_BLOCK_MERGE_MAX, // Merge block is larger than base and altair blocks *SIGNED_BEACON_BLOCK_MERGE_MAX, // Merge block is larger than base and altair blocks
), ),
ForkName::Capella => RpcLimits::new(
*SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks
*SIGNED_BEACON_BLOCK_CAPELLA_MAX, // Capella block is larger than base, altair and merge blocks
),
ForkName::Eip4844 => RpcLimits::new(
*SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks
*SIGNED_BEACON_BLOCK_EIP4844_MAX, // EIP 4844 block is larger than all prior fork blocks
),
} }
} }
@ -152,6 +190,8 @@ pub enum Protocol {
/// The `BlocksByRoot` protocol name. /// The `BlocksByRoot` protocol name.
#[strum(serialize = "beacon_blocks_by_root")] #[strum(serialize = "beacon_blocks_by_root")]
BlocksByRoot, BlocksByRoot,
/// The `BlobsByRange` protocol name.
BlobsByRange,
/// The `Ping` protocol name. /// The `Ping` protocol name.
Ping, Ping,
/// The `MetaData` protocol name. /// The `MetaData` protocol name.
@ -287,6 +327,10 @@ impl ProtocolId {
Protocol::BlocksByRoot => { Protocol::BlocksByRoot => {
RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX) RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX)
} }
Protocol::BlobsByRange => RpcLimits::new(
<BlobsByRangeRequest as Encode>::ssz_fixed_len(),
<BlobsByRangeRequest as Encode>::ssz_fixed_len(),
),
Protocol::Ping => RpcLimits::new( Protocol::Ping => RpcLimits::new(
<Ping as Encode>::ssz_fixed_len(), <Ping as Encode>::ssz_fixed_len(),
<Ping as Encode>::ssz_fixed_len(), <Ping as Encode>::ssz_fixed_len(),
@ -309,7 +353,7 @@ impl ProtocolId {
Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response
Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()),
Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()),
Protocol::BlobsByRange => RpcLimits::new(*BLOBS_SIDECAR_MIN, *BLOBS_SIDECAR_MAX),
Protocol::Ping => RpcLimits::new( Protocol::Ping => RpcLimits::new(
<Ping as Encode>::ssz_fixed_len(), <Ping as Encode>::ssz_fixed_len(),
<Ping as Encode>::ssz_fixed_len(), <Ping as Encode>::ssz_fixed_len(),
@ -427,6 +471,7 @@ pub enum InboundRequest<TSpec: EthSpec> {
Goodbye(GoodbyeReason), Goodbye(GoodbyeReason),
BlocksByRange(OldBlocksByRangeRequest), BlocksByRange(OldBlocksByRangeRequest),
BlocksByRoot(BlocksByRootRequest), BlocksByRoot(BlocksByRootRequest),
BlobsByRange(BlobsByRangeRequest),
LightClientBootstrap(LightClientBootstrapRequest), LightClientBootstrap(LightClientBootstrapRequest),
Ping(Ping), Ping(Ping),
MetaData(PhantomData<TSpec>), MetaData(PhantomData<TSpec>),
@ -443,6 +488,7 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
InboundRequest::Goodbye(_) => 0, InboundRequest::Goodbye(_) => 0,
InboundRequest::BlocksByRange(req) => req.count, InboundRequest::BlocksByRange(req) => req.count,
InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
InboundRequest::BlobsByRange(req) => req.count,
InboundRequest::Ping(_) => 1, InboundRequest::Ping(_) => 1,
InboundRequest::MetaData(_) => 1, InboundRequest::MetaData(_) => 1,
InboundRequest::LightClientBootstrap(_) => 1, InboundRequest::LightClientBootstrap(_) => 1,
@ -456,6 +502,7 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
InboundRequest::Goodbye(_) => Protocol::Goodbye, InboundRequest::Goodbye(_) => Protocol::Goodbye,
InboundRequest::BlocksByRange(_) => Protocol::BlocksByRange, InboundRequest::BlocksByRange(_) => Protocol::BlocksByRange,
InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot,
InboundRequest::BlobsByRange(_) => Protocol::BlobsByRange,
InboundRequest::Ping(_) => Protocol::Ping, InboundRequest::Ping(_) => Protocol::Ping,
InboundRequest::MetaData(_) => Protocol::MetaData, InboundRequest::MetaData(_) => Protocol::MetaData,
InboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap, InboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
@ -470,6 +517,7 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
// variants that have `multiple_responses()` can have values. // variants that have `multiple_responses()` can have values.
InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
InboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange,
InboundRequest::Status(_) => unreachable!(), InboundRequest::Status(_) => unreachable!(),
InboundRequest::Goodbye(_) => unreachable!(), InboundRequest::Goodbye(_) => unreachable!(),
InboundRequest::Ping(_) => unreachable!(), InboundRequest::Ping(_) => unreachable!(),
@ -576,6 +624,7 @@ impl<TSpec: EthSpec> std::fmt::Display for InboundRequest<TSpec> {
InboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), InboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason),
InboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), InboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req),
InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req),
InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req),
InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
InboundRequest::MetaData(_) => write!(f, "MetaData request"), InboundRequest::MetaData(_) => write!(f, "MetaData request"),
InboundRequest::LightClientBootstrap(bootstrap) => { InboundRequest::LightClientBootstrap(bootstrap) => {

View File

@ -93,6 +93,8 @@ pub struct RPCRateLimiter {
bbrange_rl: Limiter<PeerId>, bbrange_rl: Limiter<PeerId>,
/// BlocksByRoot rate limiter. /// BlocksByRoot rate limiter.
bbroots_rl: Limiter<PeerId>, bbroots_rl: Limiter<PeerId>,
/// BlobsByRange rate limiter.
blbrange_rl: Limiter<PeerId>,
/// LightClientBootstrap rate limiter. /// LightClientBootstrap rate limiter.
lcbootstrap_rl: Limiter<PeerId>, lcbootstrap_rl: Limiter<PeerId>,
} }
@ -121,6 +123,8 @@ pub struct RPCRateLimiterBuilder {
bbrange_quota: Option<Quota>, bbrange_quota: Option<Quota>,
/// Quota for the BlocksByRoot protocol. /// Quota for the BlocksByRoot protocol.
bbroots_quota: Option<Quota>, bbroots_quota: Option<Quota>,
/// Quota for the BlobsByRange protocol.
blbrange_quota: Option<Quota>,
/// Quota for the LightClientBootstrap protocol. /// Quota for the LightClientBootstrap protocol.
lcbootstrap_quota: Option<Quota>, lcbootstrap_quota: Option<Quota>,
} }
@ -136,6 +140,7 @@ impl RPCRateLimiterBuilder {
Protocol::Goodbye => self.goodbye_quota = q, Protocol::Goodbye => self.goodbye_quota = q,
Protocol::BlocksByRange => self.bbrange_quota = q, Protocol::BlocksByRange => self.bbrange_quota = q,
Protocol::BlocksByRoot => self.bbroots_quota = q, Protocol::BlocksByRoot => self.bbroots_quota = q,
Protocol::BlobsByRange => self.blbrange_quota = q,
Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, Protocol::LightClientBootstrap => self.lcbootstrap_quota = q,
} }
self self
@ -180,6 +185,10 @@ impl RPCRateLimiterBuilder {
.lcbootstrap_quota .lcbootstrap_quota
.ok_or("LightClientBootstrap quota not specified")?; .ok_or("LightClientBootstrap quota not specified")?;
let blbrange_quota = self
.blbrange_quota
.ok_or("BlobsByRange quota not specified")?;
// create the rate limiters // create the rate limiters
let ping_rl = Limiter::from_quota(ping_quota)?; let ping_rl = Limiter::from_quota(ping_quota)?;
let metadata_rl = Limiter::from_quota(metadata_quota)?; let metadata_rl = Limiter::from_quota(metadata_quota)?;
@ -187,6 +196,7 @@ impl RPCRateLimiterBuilder {
let goodbye_rl = Limiter::from_quota(goodbye_quota)?; let goodbye_rl = Limiter::from_quota(goodbye_quota)?;
let bbroots_rl = Limiter::from_quota(bbroots_quota)?; let bbroots_rl = Limiter::from_quota(bbroots_quota)?;
let bbrange_rl = Limiter::from_quota(bbrange_quota)?; let bbrange_rl = Limiter::from_quota(bbrange_quota)?;
let blbrange_rl = Limiter::from_quota(blbrange_quota)?;
let lcbootstrap_rl = Limiter::from_quota(lcbootstrap_quote)?; let lcbootstrap_rl = Limiter::from_quota(lcbootstrap_quote)?;
// check for peers to prune every 30 seconds, starting in 30 seconds // check for peers to prune every 30 seconds, starting in 30 seconds
@ -201,6 +211,7 @@ impl RPCRateLimiterBuilder {
goodbye_rl, goodbye_rl,
bbroots_rl, bbroots_rl,
bbrange_rl, bbrange_rl,
blbrange_rl,
lcbootstrap_rl, lcbootstrap_rl,
init_time: Instant::now(), init_time: Instant::now(),
}) })
@ -254,6 +265,7 @@ impl RPCRateLimiter {
Protocol::Goodbye => &mut self.goodbye_rl, Protocol::Goodbye => &mut self.goodbye_rl,
Protocol::BlocksByRange => &mut self.bbrange_rl, Protocol::BlocksByRange => &mut self.bbrange_rl,
Protocol::BlocksByRoot => &mut self.bbroots_rl, Protocol::BlocksByRoot => &mut self.bbroots_rl,
Protocol::BlobsByRange => &mut self.blbrange_rl,
Protocol::LightClientBootstrap => &mut self.lcbootstrap_rl, Protocol::LightClientBootstrap => &mut self.lcbootstrap_rl,
}; };
check(limiter) check(limiter)
@ -267,6 +279,7 @@ impl RPCRateLimiter {
self.goodbye_rl.prune(time_since_start); self.goodbye_rl.prune(time_since_start);
self.bbrange_rl.prune(time_since_start); self.bbrange_rl.prune(time_since_start);
self.bbroots_rl.prune(time_since_start); self.bbroots_rl.prune(time_since_start);
self.blbrange_rl.prune(time_since_start);
} }
} }

View File

@ -60,6 +60,7 @@ impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
goodbye_quota, goodbye_quota,
blocks_by_range_quota, blocks_by_range_quota,
blocks_by_root_quota, blocks_by_root_quota,
blobs_by_range_quota,
} = config; } = config;
let limiter = RateLimiter::builder() let limiter = RateLimiter::builder()
@ -69,6 +70,7 @@ impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
.set_quota(Protocol::Goodbye, goodbye_quota) .set_quota(Protocol::Goodbye, goodbye_quota)
.set_quota(Protocol::BlocksByRange, blocks_by_range_quota) .set_quota(Protocol::BlocksByRange, blocks_by_range_quota)
.set_quota(Protocol::BlocksByRoot, blocks_by_root_quota) .set_quota(Protocol::BlocksByRoot, blocks_by_root_quota)
.set_quota(Protocol::BlobsByRange, blobs_by_range_quota)
// Manually set the LightClientBootstrap quota, since we use the same rate limiter for // Manually set the LightClientBootstrap quota, since we use the same rate limiter for
// inbound and outbound requests, and the LightClientBootstrap is an only inbound // inbound and outbound requests, and the LightClientBootstrap is an only inbound
// protocol. // protocol.

View File

@ -1,8 +1,10 @@
use std::sync::Arc; use std::sync::Arc;
use libp2p::core::connection::ConnectionId; use libp2p::core::connection::ConnectionId;
use types::{light_client_bootstrap::LightClientBootstrap, EthSpec, SignedBeaconBlock}; use types::light_client_bootstrap::LightClientBootstrap;
use types::{BlobsSidecar, EthSpec, SignedBeaconBlock};
use crate::rpc::methods::BlobsByRangeRequest;
use crate::rpc::{ use crate::rpc::{
methods::{ methods::{
BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest,
@ -32,6 +34,8 @@ pub enum Request {
Status(StatusMessage), Status(StatusMessage),
/// A blocks by range request. /// A blocks by range request.
BlocksByRange(BlocksByRangeRequest), BlocksByRange(BlocksByRangeRequest),
/// A blobs by range request.
BlobsByRange(BlobsByRangeRequest),
/// A request blocks root request. /// A request blocks root request.
BlocksByRoot(BlocksByRootRequest), BlocksByRoot(BlocksByRootRequest),
// light client bootstrap request // light client bootstrap request
@ -49,6 +53,7 @@ impl<TSpec: EthSpec> std::convert::From<Request> for OutboundRequest<TSpec> {
step: 1, step: 1,
}) })
} }
Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r),
Request::LightClientBootstrap(b) => OutboundRequest::LightClientBootstrap(b), Request::LightClientBootstrap(b) => OutboundRequest::LightClientBootstrap(b),
Request::Status(s) => OutboundRequest::Status(s), Request::Status(s) => OutboundRequest::Status(s),
} }
@ -67,6 +72,8 @@ pub enum Response<TSpec: EthSpec> {
Status(StatusMessage), Status(StatusMessage),
/// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch. /// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch.
BlocksByRange(Option<Arc<SignedBeaconBlock<TSpec>>>), BlocksByRange(Option<Arc<SignedBeaconBlock<TSpec>>>),
/// A response to a get BLOBS_BY_RANGE request. A None response signals the end of the batch.
BlobsByRange(Option<Arc<BlobsSidecar<TSpec>>>),
/// A response to a get BLOCKS_BY_ROOT request. /// A response to a get BLOCKS_BY_ROOT request.
BlocksByRoot(Option<Arc<SignedBeaconBlock<TSpec>>>), BlocksByRoot(Option<Arc<SignedBeaconBlock<TSpec>>>),
/// A response to a LightClientUpdate request. /// A response to a LightClientUpdate request.
@ -84,6 +91,10 @@ impl<TSpec: EthSpec> std::convert::From<Response<TSpec>> for RPCCodedResponse<TS
Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)), Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)),
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange),
}, },
Response::BlobsByRange(r) => match r {
Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRange(b)),
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange),
},
Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)),
Response::LightClientBootstrap(b) => { Response::LightClientBootstrap(b) => {
RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b)) RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b))

View File

@ -20,6 +20,8 @@ pub struct GossipCache {
topic_msgs: HashMap<GossipTopic, HashMap<Vec<u8>, Key>>, topic_msgs: HashMap<GossipTopic, HashMap<Vec<u8>, Key>>,
/// Timeout for blocks. /// Timeout for blocks.
beacon_block: Option<Duration>, beacon_block: Option<Duration>,
/// Timeout for blobs.
beacon_block_and_blobs_sidecar: Option<Duration>,
/// Timeout for aggregate attestations. /// Timeout for aggregate attestations.
aggregates: Option<Duration>, aggregates: Option<Duration>,
/// Timeout for attestations. /// Timeout for attestations.
@ -34,6 +36,8 @@ pub struct GossipCache {
signed_contribution_and_proof: Option<Duration>, signed_contribution_and_proof: Option<Duration>,
/// Timeout for sync committee messages. /// Timeout for sync committee messages.
sync_committee_message: Option<Duration>, sync_committee_message: Option<Duration>,
/// Timeout for signed BLS to execution changes.
bls_to_execution_change: Option<Duration>,
/// Timeout for light client finality updates. /// Timeout for light client finality updates.
light_client_finality_update: Option<Duration>, light_client_finality_update: Option<Duration>,
/// Timeout for light client optimistic updates. /// Timeout for light client optimistic updates.
@ -45,6 +49,8 @@ pub struct GossipCacheBuilder {
default_timeout: Option<Duration>, default_timeout: Option<Duration>,
/// Timeout for blocks. /// Timeout for blocks.
beacon_block: Option<Duration>, beacon_block: Option<Duration>,
/// Timeout for blob sidecars.
beacon_block_and_blobs_sidecar: Option<Duration>,
/// Timeout for aggregate attestations. /// Timeout for aggregate attestations.
aggregates: Option<Duration>, aggregates: Option<Duration>,
/// Timeout for attestations. /// Timeout for attestations.
@ -59,6 +65,8 @@ pub struct GossipCacheBuilder {
signed_contribution_and_proof: Option<Duration>, signed_contribution_and_proof: Option<Duration>,
/// Timeout for sync committee messages. /// Timeout for sync committee messages.
sync_committee_message: Option<Duration>, sync_committee_message: Option<Duration>,
/// Timeout for signed BLS to execution changes.
bls_to_execution_change: Option<Duration>,
/// Timeout for light client finality updates. /// Timeout for light client finality updates.
light_client_finality_update: Option<Duration>, light_client_finality_update: Option<Duration>,
/// Timeout for light client optimistic updates. /// Timeout for light client optimistic updates.
@ -121,6 +129,12 @@ impl GossipCacheBuilder {
self self
} }
/// Timeout for BLS to execution change messages.
pub fn bls_to_execution_change_timeout(mut self, timeout: Duration) -> Self {
self.bls_to_execution_change = Some(timeout);
self
}
/// Timeout for light client finality update messages. /// Timeout for light client finality update messages.
pub fn light_client_finality_update_timeout(mut self, timeout: Duration) -> Self { pub fn light_client_finality_update_timeout(mut self, timeout: Duration) -> Self {
self.light_client_finality_update = Some(timeout); self.light_client_finality_update = Some(timeout);
@ -137,6 +151,7 @@ impl GossipCacheBuilder {
let GossipCacheBuilder { let GossipCacheBuilder {
default_timeout, default_timeout,
beacon_block, beacon_block,
beacon_block_and_blobs_sidecar,
aggregates, aggregates,
attestation, attestation,
voluntary_exit, voluntary_exit,
@ -144,6 +159,7 @@ impl GossipCacheBuilder {
attester_slashing, attester_slashing,
signed_contribution_and_proof, signed_contribution_and_proof,
sync_committee_message, sync_committee_message,
bls_to_execution_change,
light_client_finality_update, light_client_finality_update,
light_client_optimistic_update, light_client_optimistic_update,
} = self; } = self;
@ -151,6 +167,7 @@ impl GossipCacheBuilder {
expirations: DelayQueue::default(), expirations: DelayQueue::default(),
topic_msgs: HashMap::default(), topic_msgs: HashMap::default(),
beacon_block: beacon_block.or(default_timeout), beacon_block: beacon_block.or(default_timeout),
beacon_block_and_blobs_sidecar: beacon_block_and_blobs_sidecar.or(default_timeout),
aggregates: aggregates.or(default_timeout), aggregates: aggregates.or(default_timeout),
attestation: attestation.or(default_timeout), attestation: attestation.or(default_timeout),
voluntary_exit: voluntary_exit.or(default_timeout), voluntary_exit: voluntary_exit.or(default_timeout),
@ -158,6 +175,7 @@ impl GossipCacheBuilder {
attester_slashing: attester_slashing.or(default_timeout), attester_slashing: attester_slashing.or(default_timeout),
signed_contribution_and_proof: signed_contribution_and_proof.or(default_timeout), signed_contribution_and_proof: signed_contribution_and_proof.or(default_timeout),
sync_committee_message: sync_committee_message.or(default_timeout), sync_committee_message: sync_committee_message.or(default_timeout),
bls_to_execution_change: bls_to_execution_change.or(default_timeout),
light_client_finality_update: light_client_finality_update.or(default_timeout), light_client_finality_update: light_client_finality_update.or(default_timeout),
light_client_optimistic_update: light_client_optimistic_update.or(default_timeout), light_client_optimistic_update: light_client_optimistic_update.or(default_timeout),
} }
@ -175,6 +193,7 @@ impl GossipCache {
pub fn insert(&mut self, topic: GossipTopic, data: Vec<u8>) { pub fn insert(&mut self, topic: GossipTopic, data: Vec<u8>) {
let expire_timeout = match topic.kind() { let expire_timeout = match topic.kind() {
GossipKind::BeaconBlock => self.beacon_block, GossipKind::BeaconBlock => self.beacon_block,
GossipKind::BeaconBlocksAndBlobsSidecar => self.beacon_block_and_blobs_sidecar,
GossipKind::BeaconAggregateAndProof => self.aggregates, GossipKind::BeaconAggregateAndProof => self.aggregates,
GossipKind::Attestation(_) => self.attestation, GossipKind::Attestation(_) => self.attestation,
GossipKind::VoluntaryExit => self.voluntary_exit, GossipKind::VoluntaryExit => self.voluntary_exit,
@ -182,6 +201,7 @@ impl GossipCache {
GossipKind::AttesterSlashing => self.attester_slashing, GossipKind::AttesterSlashing => self.attester_slashing,
GossipKind::SignedContributionAndProof => self.signed_contribution_and_proof, GossipKind::SignedContributionAndProof => self.signed_contribution_and_proof,
GossipKind::SyncCommitteeMessage(_) => self.sync_committee_message, GossipKind::SyncCommitteeMessage(_) => self.sync_committee_message,
GossipKind::BlsToExecutionChange => self.bls_to_execution_change,
GossipKind::LightClientFinalityUpdate => self.light_client_finality_update, GossipKind::LightClientFinalityUpdate => self.light_client_finality_update,
GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update, GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update,
}; };

View File

@ -1,3 +1,5 @@
use self::behaviour::Behaviour;
use self::gossip_cache::GossipCache;
use crate::config::{gossipsub_config, NetworkLoad}; use crate::config::{gossipsub_config, NetworkLoad};
use crate::discovery::{ use crate::discovery::{
subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS,
@ -7,15 +9,16 @@ use crate::peer_manager::{
ConnectionDirection, PeerManager, PeerManagerEvent, ConnectionDirection, PeerManager, PeerManagerEvent,
}; };
use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS};
use crate::rpc::*;
use crate::service::behaviour::BehaviourEvent; use crate::service::behaviour::BehaviourEvent;
pub use crate::service::behaviour::Gossipsub; pub use crate::service::behaviour::Gossipsub;
use crate::types::{ use crate::types::{
subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet,
SubnetDiscovery, SubnetDiscovery,
}; };
use crate::EnrExt;
use crate::Eth2Enr; use crate::Eth2Enr;
use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash};
use crate::{rpc::*, EnrExt};
use api_types::{PeerRequestId, Request, RequestId, Response}; use api_types::{PeerRequestId, Request, RequestId, Response};
use futures::stream::StreamExt; use futures::stream::StreamExt;
use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings};
@ -31,20 +34,18 @@ use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol};
use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent}; use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent};
use libp2p::PeerId; use libp2p::PeerId;
use slog::{crit, debug, info, o, trace, warn}; use slog::{crit, debug, info, o, trace, warn};
use std::marker::PhantomData;
use std::path::PathBuf; use std::path::PathBuf;
use std::pin::Pin; use std::pin::Pin;
use std::sync::Arc; use std::{
use std::task::{Context, Poll}; marker::PhantomData,
sync::Arc,
task::{Context, Poll},
};
use types::{ use types::{
consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId,
}; };
use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER}; use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER};
use self::behaviour::Behaviour;
use self::gossip_cache::GossipCache;
pub mod api_types; pub mod api_types;
mod behaviour; mod behaviour;
mod gossip_cache; mod gossip_cache;
@ -197,6 +198,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
.attester_slashing_timeout(half_epoch * 2) .attester_slashing_timeout(half_epoch * 2)
// .signed_contribution_and_proof_timeout(timeout) // Do not retry // .signed_contribution_and_proof_timeout(timeout) // Do not retry
// .sync_committee_message_timeout(timeout) // Do not retry // .sync_committee_message_timeout(timeout) // Do not retry
.bls_to_execution_change_timeout(half_epoch * 2)
.build() .build()
}; };
@ -996,6 +998,9 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
Request::BlocksByRoot { .. } => { Request::BlocksByRoot { .. } => {
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"])
} }
Request::BlobsByRange { .. } => {
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"])
}
} }
NetworkEvent::RequestReceived { NetworkEvent::RequestReceived {
peer_id, peer_id,
@ -1259,6 +1264,14 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
); );
Some(event) Some(event)
} }
InboundRequest::BlobsByRange(req) => {
let event = self.build_request(
peer_request_id,
peer_id,
Request::BlobsByRange(req),
);
Some(event)
}
InboundRequest::LightClientBootstrap(req) => { InboundRequest::LightClientBootstrap(req) => {
let event = self.build_request( let event = self.build_request(
peer_request_id, peer_request_id,
@ -1291,6 +1304,9 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
RPCResponse::BlocksByRange(resp) => { RPCResponse::BlocksByRange(resp) => {
self.build_response(id, peer_id, Response::BlocksByRange(Some(resp))) self.build_response(id, peer_id, Response::BlocksByRange(Some(resp)))
} }
RPCResponse::BlobsByRange(resp) => {
self.build_response(id, peer_id, Response::BlobsByRange(Some(resp)))
}
RPCResponse::BlocksByRoot(resp) => { RPCResponse::BlocksByRoot(resp) => {
self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp)))
} }
@ -1304,6 +1320,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
let response = match termination { let response = match termination {
ResponseTermination::BlocksByRange => Response::BlocksByRange(None), ResponseTermination::BlocksByRange => Response::BlocksByRange(None),
ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None),
ResponseTermination::BlobsByRange => Response::BlobsByRange(None),
}; };
self.build_response(id, peer_id, response) self.build_response(id, peer_id, response)
} }

View File

@ -252,6 +252,7 @@ pub(crate) fn create_whitelist_filter(
add(ProposerSlashing); add(ProposerSlashing);
add(AttesterSlashing); add(AttesterSlashing);
add(SignedContributionAndProof); add(SignedContributionAndProof);
add(BlsToExecutionChange);
add(LightClientFinalityUpdate); add(LightClientFinalityUpdate);
add(LightClientOptimisticUpdate); add(LightClientOptimisticUpdate);
for id in 0..attestation_subnet_count { for id in 0..attestation_subnet_count {

View File

@ -13,7 +13,7 @@ pub type EnrSyncCommitteeBitfield<T> = BitVector<<T as EthSpec>::SyncCommitteeSu
pub type Enr = discv5::enr::Enr<discv5::enr::CombinedKey>; pub type Enr = discv5::enr::Enr<discv5::enr::CombinedKey>;
pub use globals::NetworkGlobals; pub use globals::NetworkGlobals;
pub use pubsub::{PubsubMessage, SnappyTransform}; pub use pubsub::{PubsubMessage, SignedBeaconBlockAndBlobsSidecar, SnappyTransform};
pub use subnet::{Subnet, SubnetDiscovery}; pub use subnet::{Subnet, SubnetDiscovery};
pub use sync_state::{BackFillState, SyncState}; pub use sync_state::{BackFillState, SyncState};
pub use topics::{ pub use topics::{

View File

@ -3,22 +3,39 @@
use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::types::{GossipEncoding, GossipKind, GossipTopic};
use crate::TopicHash; use crate::TopicHash;
use libp2p::gossipsub::{DataTransform, GossipsubMessage, RawGossipsubMessage}; use libp2p::gossipsub::{DataTransform, GossipsubMessage, RawGossipsubMessage};
use serde_derive::{Deserialize, Serialize};
use snap::raw::{decompress_len, Decoder, Encoder}; use snap::raw::{decompress_len, Decoder, Encoder};
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use std::boxed::Box; use std::boxed::Box;
use std::io::{Error, ErrorKind}; use std::io::{Error, ErrorKind};
use std::sync::Arc; use std::sync::Arc;
use tree_hash_derive::TreeHash;
use types::{ use types::{
Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate, Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ForkContext, ForkName,
LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing,
SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockMerge, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase,
SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, SignedBeaconBlockCapella, SignedBeaconBlockEip4844, SignedBeaconBlockMerge,
SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId,
SyncCommitteeMessage, SyncSubnetId,
}; };
/// TODO(pawan): move this to consensus/types? strictly not a consensus type
#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq)]
#[serde(bound = "T: EthSpec")]
pub struct SignedBeaconBlockAndBlobsSidecar<T: EthSpec> {
// TODO(pawan): switch to a SignedBeaconBlock and use ssz offsets for decoding to make this
// future proof?
pub beacon_block: SignedBeaconBlockEip4844<T>,
pub blobs_sidecar: BlobsSidecar<T>,
}
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub enum PubsubMessage<T: EthSpec> { pub enum PubsubMessage<T: EthSpec> {
/// Gossipsub message providing notification of a new block. /// Gossipsub message providing notification of a new block.
BeaconBlock(Arc<SignedBeaconBlock<T>>), BeaconBlock(Arc<SignedBeaconBlock<T>>),
/// Gossipsub message providing notification of a new SignedBeaconBlock coupled with a blobs sidecar.
BeaconBlockAndBlobsSidecars(Arc<SignedBeaconBlockAndBlobsSidecar<T>>),
/// Gossipsub message providing notification of a Aggregate attestation and associated proof. /// Gossipsub message providing notification of a Aggregate attestation and associated proof.
AggregateAndProofAttestation(Box<SignedAggregateAndProof<T>>), AggregateAndProofAttestation(Box<SignedAggregateAndProof<T>>),
/// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id. /// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id.
@ -33,6 +50,8 @@ pub enum PubsubMessage<T: EthSpec> {
SignedContributionAndProof(Box<SignedContributionAndProof<T>>), SignedContributionAndProof(Box<SignedContributionAndProof<T>>),
/// Gossipsub message providing notification of unaggregated sync committee signatures with its subnet id. /// Gossipsub message providing notification of unaggregated sync committee signatures with its subnet id.
SyncCommitteeMessage(Box<(SyncSubnetId, SyncCommitteeMessage)>), SyncCommitteeMessage(Box<(SyncSubnetId, SyncCommitteeMessage)>),
/// Gossipsub message for BLS to execution change messages.
BlsToExecutionChange(Box<SignedBlsToExecutionChange>),
/// Gossipsub message providing notification of a light client finality update. /// Gossipsub message providing notification of a light client finality update.
LightClientFinalityUpdate(Box<LightClientFinalityUpdate<T>>), LightClientFinalityUpdate(Box<LightClientFinalityUpdate<T>>),
/// Gossipsub message providing notification of a light client optimistic update. /// Gossipsub message providing notification of a light client optimistic update.
@ -110,6 +129,9 @@ impl<T: EthSpec> PubsubMessage<T> {
pub fn kind(&self) -> GossipKind { pub fn kind(&self) -> GossipKind {
match self { match self {
PubsubMessage::BeaconBlock(_) => GossipKind::BeaconBlock, PubsubMessage::BeaconBlock(_) => GossipKind::BeaconBlock,
PubsubMessage::BeaconBlockAndBlobsSidecars(_) => {
GossipKind::BeaconBlocksAndBlobsSidecar
}
PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof, PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof,
PubsubMessage::Attestation(attestation_data) => { PubsubMessage::Attestation(attestation_data) => {
GossipKind::Attestation(attestation_data.0) GossipKind::Attestation(attestation_data.0)
@ -119,6 +141,7 @@ impl<T: EthSpec> PubsubMessage<T> {
PubsubMessage::AttesterSlashing(_) => GossipKind::AttesterSlashing, PubsubMessage::AttesterSlashing(_) => GossipKind::AttesterSlashing,
PubsubMessage::SignedContributionAndProof(_) => GossipKind::SignedContributionAndProof, PubsubMessage::SignedContributionAndProof(_) => GossipKind::SignedContributionAndProof,
PubsubMessage::SyncCommitteeMessage(data) => GossipKind::SyncCommitteeMessage(data.0), PubsubMessage::SyncCommitteeMessage(data) => GossipKind::SyncCommitteeMessage(data.0),
PubsubMessage::BlsToExecutionChange(_) => GossipKind::BlsToExecutionChange,
PubsubMessage::LightClientFinalityUpdate(_) => GossipKind::LightClientFinalityUpdate, PubsubMessage::LightClientFinalityUpdate(_) => GossipKind::LightClientFinalityUpdate,
PubsubMessage::LightClientOptimisticUpdate(_) => { PubsubMessage::LightClientOptimisticUpdate(_) => {
GossipKind::LightClientOptimisticUpdate GossipKind::LightClientOptimisticUpdate
@ -175,6 +198,16 @@ impl<T: EthSpec> PubsubMessage<T> {
SignedBeaconBlockMerge::from_ssz_bytes(data) SignedBeaconBlockMerge::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?, .map_err(|e| format!("{:?}", e))?,
), ),
Some(ForkName::Eip4844) => {
return Err(
"beacon_block topic is not used from eip4844 fork onwards"
.to_string(),
)
}
Some(ForkName::Capella) => SignedBeaconBlock::<T>::Capella(
SignedBeaconBlockCapella::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
),
None => { None => {
return Err(format!( return Err(format!(
"Unknown gossipsub fork digest: {:?}", "Unknown gossipsub fork digest: {:?}",
@ -184,6 +217,28 @@ impl<T: EthSpec> PubsubMessage<T> {
}; };
Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block)))
} }
GossipKind::BeaconBlocksAndBlobsSidecar => {
match fork_context.from_context_bytes(gossip_topic.fork_digest) {
Some(ForkName::Eip4844) => {
let block_and_blobs_sidecar =
SignedBeaconBlockAndBlobsSidecar::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?;
Ok(PubsubMessage::BeaconBlockAndBlobsSidecars(Arc::new(
block_and_blobs_sidecar,
)))
}
Some(
ForkName::Base
| ForkName::Altair
| ForkName::Merge
| ForkName::Capella,
)
| None => Err(format!(
"beacon_blobs_and_sidecar topic invalid for given fork digest {:?}",
gossip_topic.fork_digest
)),
}
}
GossipKind::VoluntaryExit => { GossipKind::VoluntaryExit => {
let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data) let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?; .map_err(|e| format!("{:?}", e))?;
@ -214,6 +269,14 @@ impl<T: EthSpec> PubsubMessage<T> {
sync_committee, sync_committee,
)))) ))))
} }
GossipKind::BlsToExecutionChange => {
let bls_to_execution_change =
SignedBlsToExecutionChange::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?;
Ok(PubsubMessage::BlsToExecutionChange(Box::new(
bls_to_execution_change,
)))
}
GossipKind::LightClientFinalityUpdate => { GossipKind::LightClientFinalityUpdate => {
let light_client_finality_update = let light_client_finality_update =
LightClientFinalityUpdate::from_ssz_bytes(data) LightClientFinalityUpdate::from_ssz_bytes(data)
@ -244,6 +307,7 @@ impl<T: EthSpec> PubsubMessage<T> {
// messages for us. // messages for us.
match &self { match &self {
PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(), PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(),
PubsubMessage::BeaconBlockAndBlobsSidecars(data) => data.as_ssz_bytes(),
PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(), PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(),
PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(), PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(),
PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(),
@ -251,6 +315,7 @@ impl<T: EthSpec> PubsubMessage<T> {
PubsubMessage::Attestation(data) => data.1.as_ssz_bytes(), PubsubMessage::Attestation(data) => data.1.as_ssz_bytes(),
PubsubMessage::SignedContributionAndProof(data) => data.as_ssz_bytes(), PubsubMessage::SignedContributionAndProof(data) => data.as_ssz_bytes(),
PubsubMessage::SyncCommitteeMessage(data) => data.1.as_ssz_bytes(), PubsubMessage::SyncCommitteeMessage(data) => data.1.as_ssz_bytes(),
PubsubMessage::BlsToExecutionChange(data) => data.as_ssz_bytes(),
PubsubMessage::LightClientFinalityUpdate(data) => data.as_ssz_bytes(), PubsubMessage::LightClientFinalityUpdate(data) => data.as_ssz_bytes(),
PubsubMessage::LightClientOptimisticUpdate(data) => data.as_ssz_bytes(), PubsubMessage::LightClientOptimisticUpdate(data) => data.as_ssz_bytes(),
} }
@ -266,6 +331,12 @@ impl<T: EthSpec> std::fmt::Display for PubsubMessage<T> {
block.slot(), block.slot(),
block.message().proposer_index() block.message().proposer_index()
), ),
PubsubMessage::BeaconBlockAndBlobsSidecars(block_and_blob) => write!(
f,
"Beacon block and Blobs Sidecar: slot: {}, blobs: {}",
block_and_blob.beacon_block.message.slot,
block_and_blob.blobs_sidecar.blobs.len(),
),
PubsubMessage::AggregateAndProofAttestation(att) => write!( PubsubMessage::AggregateAndProofAttestation(att) => write!(
f, f,
"Aggregate and Proof: slot: {}, index: {}, aggregator_index: {}", "Aggregate and Proof: slot: {}, index: {}, aggregator_index: {}",
@ -287,6 +358,13 @@ impl<T: EthSpec> std::fmt::Display for PubsubMessage<T> {
PubsubMessage::SyncCommitteeMessage(data) => { PubsubMessage::SyncCommitteeMessage(data) => {
write!(f, "Sync committee message: subnet_id: {}", *data.0) write!(f, "Sync committee message: subnet_id: {}", *data.0)
} }
PubsubMessage::BlsToExecutionChange(data) => {
write!(
f,
"Signed BLS to execution change: validator_index: {}, address: {:?}",
data.message.validator_index, data.message.to_execution_address
)
}
PubsubMessage::LightClientFinalityUpdate(_data) => { PubsubMessage::LightClientFinalityUpdate(_data) => {
write!(f, "Light CLient Finality Update") write!(f, "Light CLient Finality Update")
} }

View File

@ -11,6 +11,7 @@ use crate::Subnet;
pub const TOPIC_PREFIX: &str = "eth2"; pub const TOPIC_PREFIX: &str = "eth2";
pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy"; pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy";
pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block";
pub const BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC: &str = "beacon_block_and_blobs_sidecar";
pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof"; pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof";
pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_"; pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_";
pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit";
@ -18,16 +19,18 @@ pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing";
pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing";
pub const SIGNED_CONTRIBUTION_AND_PROOF_TOPIC: &str = "sync_committee_contribution_and_proof"; pub const SIGNED_CONTRIBUTION_AND_PROOF_TOPIC: &str = "sync_committee_contribution_and_proof";
pub const SYNC_COMMITTEE_PREFIX_TOPIC: &str = "sync_committee_"; pub const SYNC_COMMITTEE_PREFIX_TOPIC: &str = "sync_committee_";
pub const BLS_TO_EXECUTION_CHANGE_TOPIC: &str = "bls_to_execution_change";
pub const LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; pub const LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update";
pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update";
pub const CORE_TOPICS: [GossipKind; 6] = [ pub const CORE_TOPICS: [GossipKind; 7] = [
GossipKind::BeaconBlock, GossipKind::BeaconBlock,
GossipKind::BeaconAggregateAndProof, GossipKind::BeaconAggregateAndProof,
GossipKind::VoluntaryExit, GossipKind::VoluntaryExit,
GossipKind::ProposerSlashing, GossipKind::ProposerSlashing,
GossipKind::AttesterSlashing, GossipKind::AttesterSlashing,
GossipKind::SignedContributionAndProof, GossipKind::SignedContributionAndProof,
GossipKind::BlsToExecutionChange,
]; ];
pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [ pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [
@ -54,6 +57,8 @@ pub struct GossipTopic {
pub enum GossipKind { pub enum GossipKind {
/// Topic for publishing beacon blocks. /// Topic for publishing beacon blocks.
BeaconBlock, BeaconBlock,
/// Topic for publishing beacon block coupled with blob sidecars.
BeaconBlocksAndBlobsSidecar,
/// Topic for publishing aggregate attestations and proofs. /// Topic for publishing aggregate attestations and proofs.
BeaconAggregateAndProof, BeaconAggregateAndProof,
/// Topic for publishing raw attestations on a particular subnet. /// Topic for publishing raw attestations on a particular subnet.
@ -70,6 +75,8 @@ pub enum GossipKind {
/// Topic for publishing unaggregated sync committee signatures on a particular subnet. /// Topic for publishing unaggregated sync committee signatures on a particular subnet.
#[strum(serialize = "sync_committee")] #[strum(serialize = "sync_committee")]
SyncCommitteeMessage(SyncSubnetId), SyncCommitteeMessage(SyncSubnetId),
/// Topic for validator messages which change their withdrawal address.
BlsToExecutionChange,
/// Topic for publishing finality updates for light clients. /// Topic for publishing finality updates for light clients.
LightClientFinalityUpdate, LightClientFinalityUpdate,
/// Topic for publishing optimistic updates for light clients. /// Topic for publishing optimistic updates for light clients.
@ -143,10 +150,12 @@ impl GossipTopic {
let kind = match topic_parts[3] { let kind = match topic_parts[3] {
BEACON_BLOCK_TOPIC => GossipKind::BeaconBlock, BEACON_BLOCK_TOPIC => GossipKind::BeaconBlock,
BEACON_AGGREGATE_AND_PROOF_TOPIC => GossipKind::BeaconAggregateAndProof, BEACON_AGGREGATE_AND_PROOF_TOPIC => GossipKind::BeaconAggregateAndProof,
BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC => GossipKind::BeaconBlocksAndBlobsSidecar,
SIGNED_CONTRIBUTION_AND_PROOF_TOPIC => GossipKind::SignedContributionAndProof, SIGNED_CONTRIBUTION_AND_PROOF_TOPIC => GossipKind::SignedContributionAndProof,
VOLUNTARY_EXIT_TOPIC => GossipKind::VoluntaryExit, VOLUNTARY_EXIT_TOPIC => GossipKind::VoluntaryExit,
PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing, PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing,
ATTESTER_SLASHING_TOPIC => GossipKind::AttesterSlashing, ATTESTER_SLASHING_TOPIC => GossipKind::AttesterSlashing,
BLS_TO_EXECUTION_CHANGE_TOPIC => GossipKind::BlsToExecutionChange,
LIGHT_CLIENT_FINALITY_UPDATE => GossipKind::LightClientFinalityUpdate, LIGHT_CLIENT_FINALITY_UPDATE => GossipKind::LightClientFinalityUpdate,
LIGHT_CLIENT_OPTIMISTIC_UPDATE => GossipKind::LightClientOptimisticUpdate, LIGHT_CLIENT_OPTIMISTIC_UPDATE => GossipKind::LightClientOptimisticUpdate,
topic => match committee_topic_index(topic) { topic => match committee_topic_index(topic) {
@ -198,6 +207,7 @@ impl std::fmt::Display for GossipTopic {
let kind = match self.kind { let kind = match self.kind {
GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(),
GossipKind::BeaconBlocksAndBlobsSidecar => BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC.into(),
GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(),
GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(),
GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(),
@ -207,6 +217,7 @@ impl std::fmt::Display for GossipTopic {
GossipKind::SyncCommitteeMessage(index) => { GossipKind::SyncCommitteeMessage(index) => {
format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index) format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index)
} }
GossipKind::BlsToExecutionChange => BLS_TO_EXECUTION_CHANGE_TOPIC.into(),
GossipKind::LightClientFinalityUpdate => LIGHT_CLIENT_FINALITY_UPDATE.into(), GossipKind::LightClientFinalityUpdate => LIGHT_CLIENT_FINALITY_UPDATE.into(),
GossipKind::LightClientOptimisticUpdate => LIGHT_CLIENT_OPTIMISTIC_UPDATE.into(), GossipKind::LightClientOptimisticUpdate => LIGHT_CLIENT_OPTIMISTIC_UPDATE.into(),
}; };
@ -281,6 +292,7 @@ mod tests {
VoluntaryExit, VoluntaryExit,
ProposerSlashing, ProposerSlashing,
AttesterSlashing, AttesterSlashing,
BeaconBlocksAndBlobsSidecar,
] ]
.iter() .iter()
{ {

View File

@ -25,14 +25,20 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext {
let mut chain_spec = E::default_spec(); let mut chain_spec = E::default_spec();
let altair_fork_epoch = Epoch::new(1); let altair_fork_epoch = Epoch::new(1);
let merge_fork_epoch = Epoch::new(2); let merge_fork_epoch = Epoch::new(2);
let capella_fork_epoch = Epoch::new(3);
let eip4844_fork_epoch = Epoch::new(4);
chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.altair_fork_epoch = Some(altair_fork_epoch);
chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch);
chain_spec.capella_fork_epoch = Some(capella_fork_epoch);
chain_spec.eip4844_fork_epoch = Some(eip4844_fork_epoch);
let current_slot = match fork_name { let current_slot = match fork_name {
ForkName::Base => Slot::new(0), ForkName::Base => Slot::new(0),
ForkName::Altair => altair_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Altair => altair_fork_epoch.start_slot(E::slots_per_epoch()),
ForkName::Merge => merge_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(E::slots_per_epoch()),
ForkName::Capella => capella_fork_epoch.start_slot(E::slots_per_epoch()),
ForkName::Eip4844 => eip4844_fork_epoch.start_slot(E::slots_per_epoch()),
}; };
ForkContext::new::<E>(current_slot, Hash256::zero(), &chain_spec) ForkContext::new::<E>(current_slot, Hash256::zero(), &chain_spec)
} }

View File

@ -9,8 +9,8 @@ use std::time::Duration;
use tokio::runtime::Runtime; use tokio::runtime::Runtime;
use tokio::time::sleep; use tokio::time::sleep;
use types::{ use types::{
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Epoch, EthSpec, ForkContext, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, Epoch, EthSpec,
ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, ForkContext, ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot,
}; };
mod common; mod common;

View File

@ -45,6 +45,7 @@ tokio-util = { version = "0.6.3", features = ["time"] }
derivative = "2.2.0" derivative = "2.2.0"
delay_map = "0.1.1" delay_map = "0.1.1"
ethereum-types = { version = "0.14.1", optional = true } ethereum-types = { version = "0.14.1", optional = true }
operation_pool = { path = "../operation_pool" }
execution_layer = { path = "../execution_layer" } execution_layer = { path = "../execution_layer" }
[features] [features]

View File

@ -45,7 +45,9 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock, NotifyExe
use derivative::Derivative; use derivative::Derivative;
use futures::stream::{Stream, StreamExt}; use futures::stream::{Stream, StreamExt};
use futures::task::Poll; use futures::task::Poll;
use lighthouse_network::rpc::methods::BlobsByRangeRequest;
use lighthouse_network::rpc::LightClientBootstrapRequest; use lighthouse_network::rpc::LightClientBootstrapRequest;
use lighthouse_network::SignedBeaconBlockAndBlobsSidecar;
use lighthouse_network::{ use lighthouse_network::{
rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage}, rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage},
Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Client, MessageId, NetworkGlobals, PeerId, PeerRequestId,
@ -63,8 +65,8 @@ use task_executor::TaskExecutor;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use types::{ use types::{
Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate, Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate,
ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange,
SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId,
}; };
use work_reprocessing_queue::{ use work_reprocessing_queue::{
spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock, spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock,
@ -114,6 +116,10 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024;
/// before we start dropping them. /// before we start dropping them.
const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024;
/// The maximum number of queued `SignedBeaconBlockAndBlobsSidecar` objects received on gossip that
/// will be stored before we start dropping them.
const MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN: usize = 1_024;
/// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but /// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but
/// within acceptable clock disparity) that will be queued before we start dropping them. /// within acceptable clock disparity) that will be queued before we start dropping them.
const MAX_DELAYED_BLOCK_QUEUE_LEN: usize = 1_024; const MAX_DELAYED_BLOCK_QUEUE_LEN: usize = 1_024;
@ -166,10 +172,18 @@ const MAX_STATUS_QUEUE_LEN: usize = 1_024;
/// will be stored before we start dropping them. /// will be stored before we start dropping them.
const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024; const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024;
const MAX_BLOBS_BY_RANGE_QUEUE_LEN: usize = 1_024;
/// The maximum number of queued `BlocksByRootRequest` objects received from the network RPC that /// The maximum number of queued `BlocksByRootRequest` objects received from the network RPC that
/// will be stored before we start dropping them. /// will be stored before we start dropping them.
const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024; const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024;
/// Maximum number of `SignedBlsToExecutionChange` messages to queue before dropping them.
///
/// This value is set high to accommodate the large spike that is expected immediately after Capella
/// is activated.
const MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN: usize = 16_384;
/// The maximum number of queued `LightClientBootstrapRequest` objects received from the network RPC that /// The maximum number of queued `LightClientBootstrapRequest` objects received from the network RPC that
/// will be stored before we start dropping them. /// will be stored before we start dropping them.
const MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN: usize = 1_024; const MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN: usize = 1_024;
@ -202,6 +216,7 @@ pub const GOSSIP_ATTESTATION_BATCH: &str = "gossip_attestation_batch";
pub const GOSSIP_AGGREGATE: &str = "gossip_aggregate"; pub const GOSSIP_AGGREGATE: &str = "gossip_aggregate";
pub const GOSSIP_AGGREGATE_BATCH: &str = "gossip_aggregate_batch"; pub const GOSSIP_AGGREGATE_BATCH: &str = "gossip_aggregate_batch";
pub const GOSSIP_BLOCK: &str = "gossip_block"; pub const GOSSIP_BLOCK: &str = "gossip_block";
pub const GOSSIP_BLOCK_AND_BLOBS_SIDECAR: &str = "gossip_block_and_blobs_sidecar";
pub const DELAYED_IMPORT_BLOCK: &str = "delayed_import_block"; pub const DELAYED_IMPORT_BLOCK: &str = "delayed_import_block";
pub const GOSSIP_VOLUNTARY_EXIT: &str = "gossip_voluntary_exit"; pub const GOSSIP_VOLUNTARY_EXIT: &str = "gossip_voluntary_exit";
pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing"; pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing";
@ -215,10 +230,12 @@ pub const CHAIN_SEGMENT: &str = "chain_segment";
pub const STATUS_PROCESSING: &str = "status_processing"; pub const STATUS_PROCESSING: &str = "status_processing";
pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request";
pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request";
pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request";
pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap"; pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap";
pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation";
pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate";
pub const UNKNOWN_LIGHT_CLIENT_UPDATE: &str = "unknown_light_client_update"; pub const UNKNOWN_LIGHT_CLIENT_UPDATE: &str = "unknown_light_client_update";
pub const GOSSIP_BLS_TO_EXECUTION_CHANGE: &str = "gossip_bls_to_execution_change";
/// A simple first-in-first-out queue with a maximum length. /// A simple first-in-first-out queue with a maximum length.
struct FifoQueue<T> { struct FifoQueue<T> {
@ -422,6 +439,26 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
} }
} }
/// Create a new `Work` event for some blobs sidecar.
pub fn gossip_block_and_blobs_sidecar(
message_id: MessageId,
peer_id: PeerId,
peer_client: Client,
block_and_blobs: Arc<SignedBeaconBlockAndBlobsSidecar<T::EthSpec>>,
seen_timestamp: Duration,
) -> Self {
Self {
drop_during_sync: false,
work: Work::GossipBlockAndBlobsSidecar {
message_id,
peer_id,
peer_client,
block_and_blobs,
seen_timestamp,
},
}
}
/// Create a new `Work` event for some sync committee signature. /// Create a new `Work` event for some sync committee signature.
pub fn gossip_sync_signature( pub fn gossip_sync_signature(
message_id: MessageId, message_id: MessageId,
@ -544,6 +581,22 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
} }
} }
/// Create a new `Work` event for some BLS to execution change.
pub fn gossip_bls_to_execution_change(
message_id: MessageId,
peer_id: PeerId,
bls_to_execution_change: Box<SignedBlsToExecutionChange>,
) -> Self {
Self {
drop_during_sync: false,
work: Work::GossipBlsToExecutionChange {
message_id,
peer_id,
bls_to_execution_change,
},
}
}
/// Create a new `Work` event for some block, where the result from computation (if any) is /// Create a new `Work` event for some block, where the result from computation (if any) is
/// sent to the other side of `result_tx`. /// sent to the other side of `result_tx`.
pub fn rpc_beacon_block( pub fn rpc_beacon_block(
@ -615,6 +668,21 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
} }
} }
pub fn blobs_by_range_request(
peer_id: PeerId,
request_id: PeerRequestId,
request: BlobsByRangeRequest,
) -> Self {
Self {
drop_during_sync: false,
work: Work::BlobsByRangeRequest {
peer_id,
request_id,
request,
},
}
}
/// Create a new work event to process `LightClientBootstrap`s from the RPC network. /// Create a new work event to process `LightClientBootstrap`s from the RPC network.
pub fn lightclient_bootstrap_request( pub fn lightclient_bootstrap_request(
peer_id: PeerId, peer_id: PeerId,
@ -770,6 +838,13 @@ pub enum Work<T: BeaconChainTypes> {
block: Arc<SignedBeaconBlock<T::EthSpec>>, block: Arc<SignedBeaconBlock<T::EthSpec>>,
seen_timestamp: Duration, seen_timestamp: Duration,
}, },
GossipBlockAndBlobsSidecar {
message_id: MessageId,
peer_id: PeerId,
peer_client: Client,
block_and_blobs: Arc<SignedBeaconBlockAndBlobsSidecar<T::EthSpec>>,
seen_timestamp: Duration,
},
DelayedImportBlock { DelayedImportBlock {
peer_id: PeerId, peer_id: PeerId,
block: Box<GossipVerifiedBlock<T>>, block: Box<GossipVerifiedBlock<T>>,
@ -840,6 +915,16 @@ pub enum Work<T: BeaconChainTypes> {
request_id: PeerRequestId, request_id: PeerRequestId,
request: BlocksByRootRequest, request: BlocksByRootRequest,
}, },
BlobsByRangeRequest {
peer_id: PeerId,
request_id: PeerRequestId,
request: BlobsByRangeRequest,
},
GossipBlsToExecutionChange {
message_id: MessageId,
peer_id: PeerId,
bls_to_execution_change: Box<SignedBlsToExecutionChange>,
},
LightClientBootstrapRequest { LightClientBootstrapRequest {
peer_id: PeerId, peer_id: PeerId,
request_id: PeerRequestId, request_id: PeerRequestId,
@ -856,6 +941,7 @@ impl<T: BeaconChainTypes> Work<T> {
Work::GossipAggregate { .. } => GOSSIP_AGGREGATE, Work::GossipAggregate { .. } => GOSSIP_AGGREGATE,
Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH, Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH,
Work::GossipBlock { .. } => GOSSIP_BLOCK, Work::GossipBlock { .. } => GOSSIP_BLOCK,
Work::GossipBlockAndBlobsSidecar { .. } => GOSSIP_BLOCK_AND_BLOBS_SIDECAR,
Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK, Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK,
Work::GossipVoluntaryExit { .. } => GOSSIP_VOLUNTARY_EXIT, Work::GossipVoluntaryExit { .. } => GOSSIP_VOLUNTARY_EXIT,
Work::GossipProposerSlashing { .. } => GOSSIP_PROPOSER_SLASHING, Work::GossipProposerSlashing { .. } => GOSSIP_PROPOSER_SLASHING,
@ -869,9 +955,11 @@ impl<T: BeaconChainTypes> Work<T> {
Work::Status { .. } => STATUS_PROCESSING, Work::Status { .. } => STATUS_PROCESSING,
Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST,
Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST, Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST,
Work::BlobsByRangeRequest { .. } => BLOBS_BY_RANGE_REQUEST,
Work::LightClientBootstrapRequest { .. } => LIGHT_CLIENT_BOOTSTRAP_REQUEST, Work::LightClientBootstrapRequest { .. } => LIGHT_CLIENT_BOOTSTRAP_REQUEST,
Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION,
Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE,
Work::GossipBlsToExecutionChange { .. } => GOSSIP_BLS_TO_EXECUTION_CHANGE,
Work::UnknownLightClientOptimisticUpdate { .. } => UNKNOWN_LIGHT_CLIENT_UPDATE, Work::UnknownLightClientOptimisticUpdate { .. } => UNKNOWN_LIGHT_CLIENT_UPDATE,
} }
} }
@ -1015,11 +1103,18 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN);
let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN);
let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN);
let mut gossip_block_and_blobs_sidecar_queue =
FifoQueue::new(MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN);
let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN); let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN);
let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN);
let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN); let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN);
let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN); let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN);
let mut blbrange_queue = FifoQueue::new(MAX_BLOBS_BY_RANGE_QUEUE_LEN);
let mut gossip_bls_to_execution_change_queue =
FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN);
let mut lcbootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN); let mut lcbootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN);
// Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to
// receive them back once they are ready (`ready_work_rx`). // receive them back once they are ready (`ready_work_rx`).
@ -1122,6 +1217,8 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
// required to verify some attestations. // required to verify some attestations.
} else if let Some(item) = gossip_block_queue.pop() { } else if let Some(item) = gossip_block_queue.pop() {
self.spawn_worker(item, toolbox); self.spawn_worker(item, toolbox);
} else if let Some(item) = gossip_block_and_blobs_sidecar_queue.pop() {
self.spawn_worker(item, toolbox);
// Check the aggregates, *then* the unaggregates since we assume that // Check the aggregates, *then* the unaggregates since we assume that
// aggregates are more valuable to local validators and effectively give us // aggregates are more valuable to local validators and effectively give us
// more information with less signature verification time. // more information with less signature verification time.
@ -1252,9 +1349,12 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
self.spawn_worker(item, toolbox); self.spawn_worker(item, toolbox);
} else if let Some(item) = gossip_proposer_slashing_queue.pop() { } else if let Some(item) = gossip_proposer_slashing_queue.pop() {
self.spawn_worker(item, toolbox); self.spawn_worker(item, toolbox);
// Check exits last since our validators don't get rewards from them. // Check exits and address changes late since our validators don't get
// rewards from them.
} else if let Some(item) = gossip_voluntary_exit_queue.pop() { } else if let Some(item) = gossip_voluntary_exit_queue.pop() {
self.spawn_worker(item, toolbox); self.spawn_worker(item, toolbox);
} else if let Some(item) = gossip_bls_to_execution_change_queue.pop() {
self.spawn_worker(item, toolbox);
// Handle backfill sync chain segments. // Handle backfill sync chain segments.
} else if let Some(item) = backfill_chain_segment.pop() { } else if let Some(item) = backfill_chain_segment.pop() {
self.spawn_worker(item, toolbox); self.spawn_worker(item, toolbox);
@ -1328,6 +1428,9 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
Work::GossipBlock { .. } => { Work::GossipBlock { .. } => {
gossip_block_queue.push(work, work_id, &self.log) gossip_block_queue.push(work, work_id, &self.log)
} }
Work::GossipBlockAndBlobsSidecar { .. } => {
gossip_block_and_blobs_sidecar_queue.push(work, work_id, &self.log)
}
Work::DelayedImportBlock { .. } => { Work::DelayedImportBlock { .. } => {
delayed_block_queue.push(work, work_id, &self.log) delayed_block_queue.push(work, work_id, &self.log)
} }
@ -1367,6 +1470,9 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
Work::BlocksByRootsRequest { .. } => { Work::BlocksByRootsRequest { .. } => {
bbroots_queue.push(work, work_id, &self.log) bbroots_queue.push(work, work_id, &self.log)
} }
Work::BlobsByRangeRequest { .. } => {
blbrange_queue.push(work, work_id, &self.log)
}
Work::LightClientBootstrapRequest { .. } => { Work::LightClientBootstrapRequest { .. } => {
lcbootstrap_queue.push(work, work_id, &self.log) lcbootstrap_queue.push(work, work_id, &self.log)
} }
@ -1376,6 +1482,9 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
Work::UnknownBlockAggregate { .. } => { Work::UnknownBlockAggregate { .. } => {
unknown_block_aggregate_queue.push(work) unknown_block_aggregate_queue.push(work)
} }
Work::GossipBlsToExecutionChange { .. } => {
gossip_bls_to_execution_change_queue.push(work, work_id, &self.log)
}
Work::UnknownLightClientOptimisticUpdate { .. } => { Work::UnknownLightClientOptimisticUpdate { .. } => {
unknown_light_client_update_queue.push(work, work_id, &self.log) unknown_light_client_update_queue.push(work, work_id, &self.log)
} }
@ -1431,6 +1540,10 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL, &metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL,
gossip_attester_slashing_queue.len() as i64, gossip_attester_slashing_queue.len() as i64,
); );
metrics::set_gauge(
&metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL,
gossip_bls_to_execution_change_queue.len() as i64,
);
if aggregate_queue.is_full() && aggregate_debounce.elapsed() { if aggregate_queue.is_full() && aggregate_debounce.elapsed() {
error!( error!(
@ -1592,6 +1705,12 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
) )
.await .await
}), }),
/*
* Verification for blobs sidecars received on gossip.
*/
Work::GossipBlockAndBlobsSidecar { .. } => {
warn!(self.log, "Unexpected block and blobs on gossip")
}
/* /*
* Import for blocks that we received earlier than their intended slot. * Import for blocks that we received earlier than their intended slot.
*/ */
@ -1669,6 +1788,20 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
seen_timestamp, seen_timestamp,
) )
}), }),
/*
* BLS to execution change verification.
*/
Work::GossipBlsToExecutionChange {
message_id,
peer_id,
bls_to_execution_change,
} => task_spawner.spawn_blocking(move || {
worker.process_gossip_bls_to_execution_change(
message_id,
peer_id,
*bls_to_execution_change,
)
}),
/* /*
* Light client finality update verification. * Light client finality update verification.
*/ */
@ -1779,6 +1912,9 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
request, request,
) )
}), }),
Work::BlobsByRangeRequest { .. } => {
warn!(self.log.clone(), "Unexpected BlobsByRange Request")
}
/* /*
* Processing of lightclient bootstrap requests from other peers. * Processing of lightclient bootstrap requests from other peers.
*/ */

View File

@ -12,6 +12,7 @@ use beacon_chain::{
GossipVerifiedBlock, NotifyExecutionLayer, GossipVerifiedBlock, NotifyExecutionLayer,
}; };
use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource};
use operation_pool::ReceivedPreCapella;
use slog::{crit, debug, error, info, trace, warn}; use slog::{crit, debug, error, info, trace, warn};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use ssz::Encode; use ssz::Encode;
@ -22,8 +23,8 @@ use tokio::sync::mpsc;
use types::{ use types::{
Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate,
LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock,
SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId,
SyncSubnetId, SyncCommitteeMessage, SyncSubnetId,
}; };
use super::{ use super::{
@ -676,6 +677,7 @@ impl<T: BeaconChainTypes> Worker<T> {
.await .await
{ {
let block_root = gossip_verified_block.block_root; let block_root = gossip_verified_block.block_root;
if let Some(handle) = duplicate_cache.check_and_insert(block_root) { if let Some(handle) = duplicate_cache.check_and_insert(block_root) {
self.process_gossip_verified_block( self.process_gossip_verified_block(
peer_id, peer_id,
@ -1190,6 +1192,83 @@ impl<T: BeaconChainTypes> Worker<T> {
metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL); metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL);
} }
pub fn process_gossip_bls_to_execution_change(
self,
message_id: MessageId,
peer_id: PeerId,
bls_to_execution_change: SignedBlsToExecutionChange,
) {
let validator_index = bls_to_execution_change.message.validator_index;
let address = bls_to_execution_change.message.to_execution_address;
let change = match self
.chain
.verify_bls_to_execution_change_for_gossip(bls_to_execution_change)
{
Ok(ObservationOutcome::New(change)) => change,
Ok(ObservationOutcome::AlreadyKnown) => {
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
debug!(
self.log,
"Dropping BLS to execution change";
"validator_index" => validator_index,
"peer" => %peer_id
);
return;
}
Err(e) => {
debug!(
self.log,
"Dropping invalid BLS to execution change";
"validator_index" => validator_index,
"peer" => %peer_id,
"error" => ?e
);
// We ignore pre-capella messages without penalizing peers.
if matches!(e, BeaconChainError::BlsToExecutionPriorToCapella) {
self.propagate_validation_result(
message_id,
peer_id,
MessageAcceptance::Ignore,
);
} else {
// We penalize the peer slightly to prevent overuse of invalids.
self.propagate_validation_result(
message_id,
peer_id,
MessageAcceptance::Reject,
);
self.gossip_penalize_peer(
peer_id,
PeerAction::HighToleranceError,
"invalid_bls_to_execution_change",
);
}
return;
}
};
metrics::inc_counter(&metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_VERIFIED_TOTAL);
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept);
// Address change messages from gossip are only processed *after* the
// Capella fork epoch.
let received_pre_capella = ReceivedPreCapella::No;
self.chain
.import_bls_to_execution_change(change, received_pre_capella);
debug!(
self.log,
"Successfully imported BLS to execution change";
"validator_index" => validator_index,
"address" => ?address,
);
metrics::inc_counter(&metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_IMPORTED_TOTAL);
}
/// Process the sync committee signature received from the gossip network and: /// Process the sync committee signature received from the gossip network and:
/// ///
/// - If it passes gossip propagation criteria, tell the network thread to forward it. /// - If it passes gossip propagation criteria, tell the network thread to forward it.

View File

@ -145,6 +145,19 @@ lazy_static! {
"beacon_processor_attester_slashing_imported_total", "beacon_processor_attester_slashing_imported_total",
"Total number of attester slashings imported to the op pool." "Total number of attester slashings imported to the op pool."
); );
// Gossip BLS to execution changes.
pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_bls_to_execution_change_queue_total",
"Count of address changes from gossip waiting to be verified."
);
pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_bls_to_execution_change_verified_total",
"Total number of address changes verified for propagation."
);
pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_IMPORTED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_bls_to_execution_change_imported_total",
"Total number of address changes imported to the op pool."
);
// Rpc blocks. // Rpc blocks.
pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_rpc_block_queue_total", "beacon_processor_rpc_block_queue_total",
@ -154,6 +167,15 @@ lazy_static! {
"beacon_processor_rpc_block_imported_total", "beacon_processor_rpc_block_imported_total",
"Total number of gossip blocks imported to fork choice, etc." "Total number of gossip blocks imported to fork choice, etc."
); );
// Rpc blobs.
pub static ref BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_rpc_blob_queue_total",
"Count of blobs from the rpc waiting to be verified."
);
pub static ref BEACON_PROCESSOR_RPC_BLOB_IMPORTED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_rpc_blob_imported_total",
"Total number of gossip blobs imported."
);
// Chain segments. // Chain segments.
pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge( pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_chain_segment_queue_total", "beacon_processor_chain_segment_queue_total",

View File

@ -168,6 +168,9 @@ impl<T: BeaconChainTypes> Router<T> {
Request::BlocksByRoot(request) => self Request::BlocksByRoot(request) => self
.processor .processor
.on_blocks_by_root_request(peer_id, id, request), .on_blocks_by_root_request(peer_id, id, request),
Request::BlobsByRange(request) => self
.processor
.on_blobs_by_range_request(peer_id, id, request),
Request::LightClientBootstrap(request) => self Request::LightClientBootstrap(request) => self
.processor .processor
.on_lightclient_bootstrap(peer_id, id, request), .on_lightclient_bootstrap(peer_id, id, request),
@ -195,6 +198,10 @@ impl<T: BeaconChainTypes> Router<T> {
self.processor self.processor
.on_blocks_by_root_response(peer_id, request_id, beacon_block); .on_blocks_by_root_response(peer_id, request_id, beacon_block);
} }
Response::BlobsByRange(beacon_blob) => {
self.processor
.on_blobs_by_range_response(peer_id, request_id, beacon_blob);
}
Response::LightClientBootstrap(_) => unreachable!(), Response::LightClientBootstrap(_) => unreachable!(),
} }
} }
@ -233,6 +240,14 @@ impl<T: BeaconChainTypes> Router<T> {
block, block,
); );
} }
PubsubMessage::BeaconBlockAndBlobsSidecars(block_and_blobs) => {
self.processor.on_block_and_blobs_sidecar_gossip(
id,
peer_id,
self.network_globals.client(&peer_id),
block_and_blobs,
);
}
PubsubMessage::VoluntaryExit(exit) => { PubsubMessage::VoluntaryExit(exit) => {
debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id); debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id);
self.processor.on_voluntary_exit_gossip(id, peer_id, exit); self.processor.on_voluntary_exit_gossip(id, peer_id, exit);
@ -280,6 +295,18 @@ impl<T: BeaconChainTypes> Router<T> {
sync_committtee_msg.0, sync_committtee_msg.0,
); );
} }
PubsubMessage::BlsToExecutionChange(bls_to_execution_change) => {
trace!(
self.log,
"Received BLS to execution change";
"peer_id" => %peer_id
);
self.processor.on_bls_to_execution_change_gossip(
id,
peer_id,
bls_to_execution_change,
);
}
PubsubMessage::LightClientFinalityUpdate(light_client_finality_update) => { PubsubMessage::LightClientFinalityUpdate(light_client_finality_update) => {
trace!( trace!(
self.log, self.log,

View File

@ -6,7 +6,8 @@ use crate::status::status_message;
use crate::sync::manager::RequestId as SyncId; use crate::sync::manager::RequestId as SyncId;
use crate::sync::SyncMessage; use crate::sync::SyncMessage;
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use lighthouse_network::rpc::*; use lighthouse_network::rpc::methods::BlobsByRangeRequest;
use lighthouse_network::{rpc::*, SignedBeaconBlockAndBlobsSidecar};
use lighthouse_network::{ use lighthouse_network::{
Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response, Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response,
}; };
@ -17,9 +18,10 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH};
use store::SyncCommitteeMessage; use store::SyncCommitteeMessage;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use types::{ use types::{
Attestation, AttesterSlashing, EthSpec, LightClientFinalityUpdate, LightClientOptimisticUpdate, Attestation, AttesterSlashing, BlobsSidecar, EthSpec, LightClientFinalityUpdate,
ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock,
SignedVoluntaryExit, SubnetId, SyncSubnetId, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId,
SyncSubnetId,
}; };
/// Processes validated messages from the network. It relays necessary data to the syncing thread /// Processes validated messages from the network. It relays necessary data to the syncing thread
@ -161,6 +163,17 @@ impl<T: BeaconChainTypes> Processor<T> {
)) ))
} }
pub fn on_blobs_by_range_request(
&mut self,
peer_id: PeerId,
request_id: PeerRequestId,
request: BlobsByRangeRequest,
) {
self.send_beacon_processor_work(BeaconWorkEvent::blobs_by_range_request(
peer_id, request_id, request,
))
}
/// Handle a `LightClientBootstrap` request from the peer. /// Handle a `LightClientBootstrap` request from the peer.
pub fn on_lightclient_bootstrap( pub fn on_lightclient_bootstrap(
&mut self, &mut self,
@ -217,6 +230,33 @@ impl<T: BeaconChainTypes> Processor<T> {
}); });
} }
pub fn on_blobs_by_range_response(
&mut self,
peer_id: PeerId,
request_id: RequestId,
blob_wrapper: Option<Arc<BlobsSidecar<T::EthSpec>>>,
) {
trace!(
self.log,
"Received BlobsByRange Response";
"peer" => %peer_id,
);
if let RequestId::Sync(id) = request_id {
self.send_to_sync(SyncMessage::RpcBlob {
peer_id,
request_id: id,
blob_sidecar: blob_wrapper,
seen_timestamp: timestamp_now(),
});
} else {
debug!(
self.log,
"All blobs by range responses should belong to sync"
);
}
}
/// Handle a `BlocksByRoot` response from the peer. /// Handle a `BlocksByRoot` response from the peer.
pub fn on_blocks_by_root_response( pub fn on_blocks_by_root_response(
&mut self, &mut self,
@ -268,6 +308,22 @@ impl<T: BeaconChainTypes> Processor<T> {
)) ))
} }
pub fn on_block_and_blobs_sidecar_gossip(
&mut self,
message_id: MessageId,
peer_id: PeerId,
peer_client: Client,
block_and_blobs: Arc<SignedBeaconBlockAndBlobsSidecar<T::EthSpec>>,
) {
self.send_beacon_processor_work(BeaconWorkEvent::gossip_block_and_blobs_sidecar(
message_id,
peer_id,
peer_client,
block_and_blobs,
timestamp_now(),
))
}
pub fn on_unaggregated_attestation_gossip( pub fn on_unaggregated_attestation_gossip(
&mut self, &mut self,
message_id: MessageId, message_id: MessageId,
@ -369,6 +425,19 @@ impl<T: BeaconChainTypes> Processor<T> {
)) ))
} }
pub fn on_bls_to_execution_change_gossip(
&mut self,
message_id: MessageId,
peer_id: PeerId,
bls_to_execution_change: Box<SignedBlsToExecutionChange>,
) {
self.send_beacon_processor_work(BeaconWorkEvent::gossip_bls_to_execution_change(
message_id,
peer_id,
bls_to_execution_change,
))
}
pub fn on_light_client_finality_update_gossip( pub fn on_light_client_finality_update_gossip(
&mut self, &mut self,
message_id: MessageId, message_id: MessageId,

View File

@ -47,13 +47,13 @@ use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS;
use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::types::{NetworkGlobals, SyncState};
use lighthouse_network::SyncInfo; use lighthouse_network::SyncInfo;
use lighthouse_network::{PeerAction, PeerId}; use lighthouse_network::{PeerAction, PeerId};
use slog::{crit, debug, error, info, trace, Logger}; use slog::{crit, debug, error, info, trace, warn, Logger};
use std::boxed::Box; use std::boxed::Box;
use std::ops::Sub; use std::ops::Sub;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use types::{EthSpec, Hash256, SignedBeaconBlock, Slot}; use types::{BlobsSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot};
/// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync
/// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a
@ -93,6 +93,14 @@ pub enum SyncMessage<T: EthSpec> {
seen_timestamp: Duration, seen_timestamp: Duration,
}, },
/// A blob has been received from RPC.
RpcBlob {
peer_id: PeerId,
request_id: RequestId,
blob_sidecar: Option<Arc<BlobsSidecar<T>>>,
seen_timestamp: Duration,
},
/// A block with an unknown parent has been received. /// A block with an unknown parent has been received.
UnknownBlock(PeerId, Arc<SignedBeaconBlock<T>>, Hash256), UnknownBlock(PeerId, Arc<SignedBeaconBlock<T>>, Hash256),
@ -584,6 +592,9 @@ impl<T: BeaconChainTypes> SyncManager<T> {
.block_lookups .block_lookups
.parent_chain_processed(chain_hash, result, &mut self.network), .parent_chain_processed(chain_hash, result, &mut self.network),
}, },
SyncMessage::RpcBlob { .. } => {
warn!(self.log, "Unexpected blob message received");
}
} }
} }

View File

@ -13,12 +13,13 @@ parking_lot = "0.12.0"
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
state_processing = { path = "../../consensus/state_processing" } state_processing = { path = "../../consensus/state_processing" }
eth2_ssz = "0.4.1" eth2_ssz = "0.4.1"
eth2_ssz_derive = "0.3.0" eth2_ssz_derive = "0.3.1"
rayon = "1.5.0" rayon = "1.5.0"
serde = "1.0.116" serde = "1.0.116"
serde_derive = "1.0.116" serde_derive = "1.0.116"
store = { path = "../store" } store = { path = "../store" }
bitvec = "1" bitvec = "1"
rand = "0.8.5"
[dev-dependencies] [dev-dependencies]
beacon_chain = { path = "../beacon_chain" } beacon_chain = { path = "../beacon_chain" }

View File

@ -0,0 +1,147 @@
use state_processing::SigVerifiedOp;
use std::collections::{hash_map::Entry, HashMap, HashSet};
use std::sync::Arc;
use types::{
AbstractExecPayload, BeaconState, ChainSpec, EthSpec, SignedBeaconBlock,
SignedBlsToExecutionChange,
};
/// Indicates if a `BlsToExecutionChange` was received before or after the
/// Capella fork. This is used to know which messages we should broadcast at the
/// Capella fork epoch.
#[derive(Copy, Clone)]
pub enum ReceivedPreCapella {
Yes,
No,
}
/// Pool of BLS to execution changes that maintains a LIFO queue and an index by validator.
///
/// Using the LIFO queue for block production disincentivises spam on P2P at the Capella fork,
/// and is less-relevant after that.
#[derive(Debug, Default)]
pub struct BlsToExecutionChanges<T: EthSpec> {
/// Map from validator index to BLS to execution change.
by_validator_index: HashMap<u64, Arc<SigVerifiedOp<SignedBlsToExecutionChange, T>>>,
/// Last-in-first-out (LIFO) queue of verified messages.
queue: Vec<Arc<SigVerifiedOp<SignedBlsToExecutionChange, T>>>,
/// Contains a set of validator indices which need to have their changes
/// broadcast at the capella epoch.
received_pre_capella_indices: HashSet<u64>,
}
impl<T: EthSpec> BlsToExecutionChanges<T> {
pub fn existing_change_equals(
&self,
address_change: &SignedBlsToExecutionChange,
) -> Option<bool> {
self.by_validator_index
.get(&address_change.message.validator_index)
.map(|existing| existing.as_inner() == address_change)
}
pub fn insert(
&mut self,
verified_change: SigVerifiedOp<SignedBlsToExecutionChange, T>,
received_pre_capella: ReceivedPreCapella,
) -> bool {
let validator_index = verified_change.as_inner().message.validator_index;
// Wrap in an `Arc` once on insert.
let verified_change = Arc::new(verified_change);
match self.by_validator_index.entry(validator_index) {
Entry::Vacant(entry) => {
self.queue.push(verified_change.clone());
entry.insert(verified_change);
if matches!(received_pre_capella, ReceivedPreCapella::Yes) {
self.received_pre_capella_indices.insert(validator_index);
}
true
}
Entry::Occupied(_) => false,
}
}
/// FIFO ordering, used for persistence to disk.
pub fn iter_fifo(
&self,
) -> impl Iterator<Item = &Arc<SigVerifiedOp<SignedBlsToExecutionChange, T>>> {
self.queue.iter()
}
/// LIFO ordering, used for block packing.
pub fn iter_lifo(
&self,
) -> impl Iterator<Item = &Arc<SigVerifiedOp<SignedBlsToExecutionChange, T>>> {
self.queue.iter().rev()
}
/// Returns only those which are flagged for broadcasting at the Capella
/// fork. Uses FIFO ordering, although we expect this list to be shuffled by
/// the caller.
pub fn iter_received_pre_capella(
&self,
) -> impl Iterator<Item = &Arc<SigVerifiedOp<SignedBlsToExecutionChange, T>>> {
self.queue.iter().filter(|address_change| {
self.received_pre_capella_indices
.contains(&address_change.as_inner().message.validator_index)
})
}
/// Returns the set of indicies which should have their address changes
/// broadcast at the Capella fork.
pub fn iter_pre_capella_indices(&self) -> impl Iterator<Item = &u64> {
self.received_pre_capella_indices.iter()
}
/// Prune BLS to execution changes that have been applied to the state more than 1 block ago.
///
/// The block check is necessary to avoid pruning too eagerly and losing the ability to include
/// address changes during re-orgs. This is isn't *perfect* so some address changes could
/// still get stuck if there are gnarly re-orgs and the changes can't be widely republished
/// due to the gossip duplicate rules.
pub fn prune<Payload: AbstractExecPayload<T>>(
&mut self,
head_block: &SignedBeaconBlock<T, Payload>,
head_state: &BeaconState<T>,
spec: &ChainSpec,
) {
let mut validator_indices_pruned = vec![];
self.queue.retain(|address_change| {
let validator_index = address_change.as_inner().message.validator_index;
head_state
.validators()
.get(validator_index as usize)
.map_or(true, |validator| {
let prune = validator.has_eth1_withdrawal_credential(spec)
&& head_block
.message()
.body()
.bls_to_execution_changes()
.map_or(true, |recent_changes| {
!recent_changes
.iter()
.any(|c| c.message.validator_index == validator_index)
});
if prune {
validator_indices_pruned.push(validator_index);
}
!prune
})
});
for validator_index in validator_indices_pruned {
self.by_validator_index.remove(&validator_index);
}
}
/// Removes `broadcasted` validators from the set of validators that should
/// have their BLS changes broadcast at the Capella fork boundary.
pub fn register_indices_broadcasted_at_capella(&mut self, broadcasted: &HashSet<u64>) {
self.received_pre_capella_indices = self
.received_pre_capella_indices
.difference(broadcasted)
.copied()
.collect();
}
}

Some files were not shown because too many files have changed in this diff Show More