Merge pull request #4477 from realbigsean/merge-unstable-deneb-june-6th

Merge unstable deneb june 6th
This commit is contained in:
realbigsean 2023-07-17 16:32:26 -04:00 committed by GitHub
commit f98671f5ab
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
149 changed files with 6282 additions and 2786 deletions

View File

@ -58,8 +58,8 @@ jobs:
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install anvil - name: Install Foundry (anvil)
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil uses: foundry-rs/foundry-toolchain@v1
- name: Run tests in release - name: Run tests in release
run: make test-release run: make test-release
release-tests-windows: release-tests-windows:
@ -78,9 +78,8 @@ jobs:
run: | run: |
choco install python protoc visualstudio2019-workload-vctools -y choco install python protoc visualstudio2019-workload-vctools -y
npm config set msvs_version 2019 npm config set msvs_version 2019
- name: Install anvil - name: Install Foundry (anvil)
# Extra feature to work around https://github.com/foundry-rs/foundry/issues/5115 uses: foundry-rs/foundry-toolchain@v1
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil --features ethers/ipc
- name: Install make - name: Install make
run: choco install -y make run: choco install -y make
- uses: KyleMayes/install-llvm-action@v1 - uses: KyleMayes/install-llvm-action@v1
@ -155,8 +154,8 @@ jobs:
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install anvil - name: Install Foundry (anvil)
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil uses: foundry-rs/foundry-toolchain@v1
- name: Run tests in debug - name: Run tests in debug
run: make test-debug run: make test-debug
state-transition-vectors-ubuntu: state-transition-vectors-ubuntu:
@ -211,8 +210,8 @@ jobs:
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install anvil - name: Install Foundry (anvil)
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil uses: foundry-rs/foundry-toolchain@v1
- name: Run the beacon chain sim that starts from an eth1 contract - name: Run the beacon chain sim that starts from an eth1 contract
run: cargo run --release --bin simulator eth1-sim run: cargo run --release --bin simulator eth1-sim
merge-transition-ubuntu: merge-transition-ubuntu:
@ -227,8 +226,8 @@ jobs:
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install anvil - name: Install Foundry (anvil)
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil uses: foundry-rs/foundry-toolchain@v1
- name: Run the beacon chain sim and go through the merge transition - name: Run the beacon chain sim and go through the merge transition
run: cargo run --release --bin simulator eth1-sim --post-merge run: cargo run --release --bin simulator eth1-sim --post-merge
no-eth1-simulator-ubuntu: no-eth1-simulator-ubuntu:
@ -257,8 +256,8 @@ jobs:
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install anvil - name: Install Foundry (anvil)
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil uses: foundry-rs/foundry-toolchain@v1
- name: Run the syncing simulator - name: Run the syncing simulator
run: cargo run --release --bin simulator syncing-sim run: cargo run --release --bin simulator syncing-sim
doppelganger-protection-test: doppelganger-protection-test:

134
Cargo.lock generated
View File

@ -257,6 +257,15 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "anvil-rpc"
version = "0.1.0"
source = "git+https://github.com/foundry-rs/foundry?rev=b45456717ffae1af65acdc71099f8cb95e6683a0#b45456717ffae1af65acdc71099f8cb95e6683a0"
dependencies = [
"serde",
"serde_json",
]
[[package]] [[package]]
name = "anyhow" name = "anyhow"
version = "1.0.71" version = "1.0.71"
@ -506,9 +515,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]] [[package]]
name = "axum" name = "axum"
version = "0.5.17" version = "0.6.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43" checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"axum-core", "axum-core",
@ -524,22 +533,23 @@ dependencies = [
"mime", "mime",
"percent-encoding", "percent-encoding",
"pin-project-lite 0.2.9", "pin-project-lite 0.2.9",
"rustversion",
"serde", "serde",
"serde_json", "serde_json",
"serde_path_to_error",
"serde_urlencoded", "serde_urlencoded",
"sync_wrapper", "sync_wrapper",
"tokio", "tokio",
"tower", "tower",
"tower-http",
"tower-layer", "tower-layer",
"tower-service", "tower-service",
] ]
[[package]] [[package]]
name = "axum-core" name = "axum-core"
version = "0.2.9" version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc" checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"bytes", "bytes",
@ -547,6 +557,7 @@ dependencies = [
"http", "http",
"http-body", "http-body",
"mime", "mime",
"rustversion",
"tower-layer", "tower-layer",
"tower-service", "tower-service",
] ]
@ -605,7 +616,7 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
[[package]] [[package]]
name = "beacon-api-client" name = "beacon-api-client"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/ralexstokes/beacon-api-client#30679e9e25d61731cde54e14cd8a3688a39d8e5b" source = "git+https://github.com/ralexstokes/beacon-api-client?rev=93d7e8c#93d7e8c38fe9782c4862909663e7b57c44f805a9"
dependencies = [ dependencies = [
"ethereum-consensus", "ethereum-consensus",
"http", "http",
@ -684,7 +695,7 @@ dependencies = [
[[package]] [[package]]
name = "beacon_node" name = "beacon_node"
version = "4.2.0" version = "4.3.0"
dependencies = [ dependencies = [
"beacon_chain", "beacon_chain",
"clap", "clap",
@ -717,6 +728,30 @@ dependencies = [
"unused_port", "unused_port",
] ]
[[package]]
name = "beacon_processor"
version = "0.1.0"
dependencies = [
"derivative",
"ethereum_ssz",
"fnv",
"futures",
"hex",
"itertools",
"lazy_static",
"lighthouse_metrics",
"lighthouse_network",
"logging",
"parking_lot 0.12.1",
"slog",
"slot_clock",
"strum",
"task_executor",
"tokio",
"tokio-util 0.6.10",
"types",
]
[[package]] [[package]]
name = "bincode" name = "bincode"
version = "1.3.3" version = "1.3.3"
@ -891,7 +926,7 @@ dependencies = [
[[package]] [[package]]
name = "boot_node" name = "boot_node"
version = "4.2.0" version = "4.3.0"
dependencies = [ dependencies = [
"beacon_node", "beacon_node",
"clap", "clap",
@ -1218,6 +1253,7 @@ name = "client"
version = "0.2.0" version = "0.2.0"
dependencies = [ dependencies = [
"beacon_chain", "beacon_chain",
"beacon_processor",
"directory", "directory",
"dirs", "dirs",
"environment", "environment",
@ -1235,6 +1271,7 @@ dependencies = [
"logging", "logging",
"monitoring_api", "monitoring_api",
"network", "network",
"num_cpus",
"operation_pool", "operation_pool",
"parking_lot 0.12.1", "parking_lot 0.12.1",
"sensitive_url", "sensitive_url",
@ -2579,7 +2616,7 @@ dependencies = [
[[package]] [[package]]
name = "ethereum-consensus" name = "ethereum-consensus"
version = "0.1.1" version = "0.1.1"
source = "git+https://github.com/ralexstokes//ethereum-consensus?rev=9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d#9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=e380108#e380108d15fcc40349927fdf3d11c71f9edb67c2"
dependencies = [ dependencies = [
"async-stream", "async-stream",
"blst", "blst",
@ -2592,8 +2629,9 @@ dependencies = [
"rand 0.8.5", "rand 0.8.5",
"serde", "serde",
"serde_json", "serde_json",
"serde_yaml",
"sha2 0.9.9", "sha2 0.9.9",
"ssz-rs", "ssz_rs",
"thiserror", "thiserror",
"tokio", "tokio",
"tokio-stream", "tokio-stream",
@ -2867,7 +2905,7 @@ dependencies = [
"serde_json", "serde_json",
"slog", "slog",
"slot_clock", "slot_clock",
"ssz-rs", "ssz_rs",
"ssz_types", "ssz_types",
"state_processing", "state_processing",
"strum", "strum",
@ -3592,12 +3630,6 @@ dependencies = [
"pin-project-lite 0.2.9", "pin-project-lite 0.2.9",
] ]
[[package]]
name = "http-range-header"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29"
[[package]] [[package]]
name = "http_api" name = "http_api"
version = "0.1.0" version = "0.1.0"
@ -4165,7 +4197,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
[[package]] [[package]]
name = "lcli" name = "lcli"
version = "4.2.0" version = "4.3.0"
dependencies = [ dependencies = [
"account_utils", "account_utils",
"beacon_chain", "beacon_chain",
@ -4812,7 +4844,7 @@ dependencies = [
[[package]] [[package]]
name = "lighthouse" name = "lighthouse"
version = "4.2.0" version = "4.3.0"
dependencies = [ dependencies = [
"account_manager", "account_manager",
"account_utils", "account_utils",
@ -5078,9 +5110,9 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5"
[[package]] [[package]]
name = "matchit" name = "matchit"
version = "0.5.0" version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40"
[[package]] [[package]]
name = "md-5" name = "md-5"
@ -5169,16 +5201,20 @@ dependencies = [
[[package]] [[package]]
name = "mev-rs" name = "mev-rs"
version = "0.2.1" version = "0.3.0"
source = "git+https://github.com/ralexstokes//mev-rs?rev=7813d4a4a564e0754e9aaab2d95520ba437c3889#7813d4a4a564e0754e9aaab2d95520ba437c3889" source = "git+https://github.com/ralexstokes/mev-rs?rev=216657016d5c0889b505857c89ae42c7aa2764af#216657016d5c0889b505857c89ae42c7aa2764af"
dependencies = [ dependencies = [
"anvil-rpc",
"async-trait", "async-trait",
"axum", "axum",
"beacon-api-client", "beacon-api-client",
"ethereum-consensus", "ethereum-consensus",
"hyper", "hyper",
"parking_lot 0.12.1",
"reqwest",
"serde", "serde",
"ssz-rs", "serde_json",
"ssz_rs",
"thiserror", "thiserror",
"tokio", "tokio",
"tracing", "tracing",
@ -5539,6 +5575,7 @@ name = "network"
version = "0.2.0" version = "0.2.0"
dependencies = [ dependencies = [
"beacon_chain", "beacon_chain",
"beacon_processor",
"delay_map", "delay_map",
"derivative", "derivative",
"environment", "environment",
@ -5562,6 +5599,7 @@ dependencies = [
"matches", "matches",
"num_cpus", "num_cpus",
"operation_pool", "operation_pool",
"parking_lot 0.12.1",
"rand 0.8.5", "rand 0.8.5",
"rlp", "rlp",
"slog", "slog",
@ -5627,6 +5665,7 @@ dependencies = [
"execution_layer", "execution_layer",
"sensitive_url", "sensitive_url",
"tempfile", "tempfile",
"tokio",
"types", "types",
"validator_client", "validator_client",
"validator_dir", "validator_dir",
@ -7464,6 +7503,16 @@ dependencies = [
"serde", "serde",
] ]
[[package]]
name = "serde_path_to_error"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b1b6471d7496b051e03f1958802a73f88b947866f5146f329e47e36554f4e55"
dependencies = [
"itoa",
"serde",
]
[[package]] [[package]]
name = "serde_repr" name = "serde_repr"
version = "0.1.12" version = "0.1.12"
@ -7973,23 +8022,24 @@ dependencies = [
] ]
[[package]] [[package]]
name = "ssz-rs" name = "ssz_rs"
version = "0.8.0" version = "0.9.0"
source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "057291e5631f280978fa9c8009390663ca4613359fc1318e36a8c24c392f6d1f"
dependencies = [ dependencies = [
"bitvec 1.0.1", "bitvec 1.0.1",
"hex", "hex",
"num-bigint", "num-bigint",
"serde", "serde",
"sha2 0.9.9", "sha2 0.9.9",
"ssz-rs-derive", "ssz_rs_derive",
"thiserror",
] ]
[[package]] [[package]]
name = "ssz-rs-derive" name = "ssz_rs_derive"
version = "0.8.0" version = "0.9.0"
source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -8784,25 +8834,6 @@ dependencies = [
"tracing", "tracing",
] ]
[[package]]
name = "tower-http"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858"
dependencies = [
"bitflags 1.3.2",
"bytes",
"futures-core",
"futures-util",
"http",
"http-body",
"http-range-header",
"pin-project-lite 0.2.9",
"tower",
"tower-layer",
"tower-service",
]
[[package]] [[package]]
name = "tower-layer" name = "tower-layer"
version = "0.3.2" version = "0.3.2"
@ -9108,6 +9139,7 @@ dependencies = [
"smallvec", "smallvec",
"ssz_types", "ssz_types",
"state_processing", "state_processing",
"strum",
"superstruct 0.6.0", "superstruct 0.6.0",
"swap_or_not_shuffle", "swap_or_not_shuffle",
"tempfile", "tempfile",

View File

@ -4,6 +4,7 @@ members = [
"beacon_node", "beacon_node",
"beacon_node/beacon_chain", "beacon_node/beacon_chain",
"beacon_node/beacon_processor",
"beacon_node/builder_client", "beacon_node/builder_client",
"beacon_node/client", "beacon_node/client",
"beacon_node/eth1", "beacon_node/eth1",
@ -92,13 +93,6 @@ resolver = "2"
warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" }
arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" } arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" }
[patch."https://github.com/ralexstokes/mev-rs"]
mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" }
[patch."https://github.com/ralexstokes/ethereum-consensus"]
ethereum-consensus = { git = "https://github.com/ralexstokes//ethereum-consensus", rev = "9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" }
[patch."https://github.com/ralexstokes/ssz-rs"]
ssz-rs = { git = "https://github.com/ralexstokes//ssz-rs", rev = "adf1a0b14cef90b9536f28ef89da1fab316465e1" }
[profile.maxperf] [profile.maxperf]
inherits = "release" inherits = "release"
lto = "fat" lto = "fat"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "beacon_node" name = "beacon_node"
version = "4.2.0" version = "4.3.0"
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"] authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
edition = "2021" edition = "2021"

View File

@ -86,7 +86,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let ideal_reward = reward_numerator let ideal_reward = reward_numerator
.safe_div(active_increments)? .safe_div(active_increments)?
.safe_div(WEIGHT_DENOMINATOR)?; .safe_div(WEIGHT_DENOMINATOR)?;
if !state.is_in_inactivity_leak(previous_epoch, spec) { if !state.is_in_inactivity_leak(previous_epoch, spec)? {
ideal_rewards_hashmap ideal_rewards_hashmap
.insert((flag_index, effective_balance), (ideal_reward, penalty)); .insert((flag_index, effective_balance), (ideal_reward, penalty));
} else { } else {

View File

@ -236,6 +236,17 @@ pub struct PrePayloadAttributes {
pub parent_block_number: u64, pub parent_block_number: u64,
} }
/// Information about a state/block at a specific slot.
#[derive(Debug, Clone, Copy)]
pub struct FinalizationAndCanonicity {
/// True if the slot of the state or block is finalized.
///
/// This alone DOES NOT imply that the state/block is finalized, use `self.is_finalized()`.
pub slot_is_finalized: bool,
/// True if the state or block is canonical at its slot.
pub canonical: bool,
}
/// Define whether a forkchoiceUpdate needs to be checked for an override (`Yes`) or has already /// Define whether a forkchoiceUpdate needs to be checked for an override (`Yes`) or has already
/// been checked (`AlreadyApplied`). It is safe to specify `Yes` even if re-orgs are disabled. /// been checked (`AlreadyApplied`). It is safe to specify `Yes` even if re-orgs are disabled.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
@ -470,6 +481,12 @@ pub struct BeaconChain<T: BeaconChainTypes> {
type BeaconBlockAndState<T, Payload> = (BeaconBlock<T, Payload>, BeaconState<T>); type BeaconBlockAndState<T, Payload> = (BeaconBlock<T, Payload>, BeaconState<T>);
impl FinalizationAndCanonicity {
pub fn is_finalized(self) -> bool {
self.slot_is_finalized && self.canonical
}
}
impl<T: BeaconChainTypes> BeaconChain<T> { impl<T: BeaconChainTypes> BeaconChain<T> {
/// Checks if a block is finalized. /// Checks if a block is finalized.
/// The finalization check is done with the block slot. The block root is used to verify that /// The finalization check is done with the block slot. The block root is used to verify that
@ -499,16 +516,30 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
state_root: &Hash256, state_root: &Hash256,
state_slot: Slot, state_slot: Slot,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
self.state_finalization_and_canonicity(state_root, state_slot)
.map(FinalizationAndCanonicity::is_finalized)
}
/// Fetch the finalization and canonicity status of the state with `state_root`.
pub fn state_finalization_and_canonicity(
&self,
state_root: &Hash256,
state_slot: Slot,
) -> Result<FinalizationAndCanonicity, Error> {
let finalized_slot = self let finalized_slot = self
.canonical_head .canonical_head
.cached_head() .cached_head()
.finalized_checkpoint() .finalized_checkpoint()
.epoch .epoch
.start_slot(T::EthSpec::slots_per_epoch()); .start_slot(T::EthSpec::slots_per_epoch());
let is_canonical = self let slot_is_finalized = state_slot <= finalized_slot;
let canonical = self
.state_root_at_slot(state_slot)? .state_root_at_slot(state_slot)?
.map_or(false, |canonical_root| state_root == &canonical_root); .map_or(false, |canonical_root| state_root == &canonical_root);
Ok(state_slot <= finalized_slot && is_canonical) Ok(FinalizationAndCanonicity {
slot_is_finalized,
canonical,
})
} }
/// Persists the head tracker and fork choice. /// Persists the head tracker and fork choice.
@ -1981,7 +2012,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self: &Arc<Self>, self: &Arc<Self>,
blob_sidecar: SignedBlobSidecar<T::EthSpec>, blob_sidecar: SignedBlobSidecar<T::EthSpec>,
subnet_id: u64, subnet_id: u64,
) -> Result<GossipVerifiedBlob<T::EthSpec>, BlobError<T::EthSpec>> { ) -> Result<GossipVerifiedBlob<T>, BlobError<T::EthSpec>> {
blob_verification::validate_blob_sidecar_for_gossip(blob_sidecar, subnet_id, self) blob_verification::validate_blob_sidecar_for_gossip(blob_sidecar, subnet_id, self)
} }
@ -2664,6 +2695,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
signature_verified_block.block_root(), signature_verified_block.block_root(),
signature_verified_block, signature_verified_block,
notify_execution_layer, notify_execution_layer,
|| Ok(()),
) )
.await .await
{ {
@ -2710,7 +2742,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// Returns an `Err` if the given block was invalid, or an error was encountered during /// Returns an `Err` if the given block was invalid, or an error was encountered during
pub async fn verify_block_for_gossip( pub async fn verify_block_for_gossip(
self: &Arc<Self>, self: &Arc<Self>,
block: BlockWrapper<T::EthSpec>, block: Arc<SignedBeaconBlock<T::EthSpec>>,
) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> { ) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> {
let chain = self.clone(); let chain = self.clone();
self.task_executor self.task_executor
@ -2754,7 +2786,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
pub async fn process_blob( pub async fn process_blob(
self: &Arc<Self>, self: &Arc<Self>,
blob: GossipVerifiedBlob<T::EthSpec>, blob: GossipVerifiedBlob<T>,
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> { ) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
self.check_availability_and_maybe_import(blob.slot(), |chain| { self.check_availability_and_maybe_import(blob.slot(), |chain| {
chain.data_availability_checker.put_gossip_blob(blob) chain.data_availability_checker.put_gossip_blob(blob)
@ -2783,6 +2815,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
block_root: Hash256, block_root: Hash256,
unverified_block: B, unverified_block: B,
notify_execution_layer: NotifyExecutionLayer, notify_execution_layer: NotifyExecutionLayer,
publish_fn: impl FnOnce() -> Result<(), BlockError<T::EthSpec>> + Send + 'static,
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> { ) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
// Start the Prometheus timer. // Start the Prometheus timer.
let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES);
@ -2798,6 +2831,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
notify_execution_layer, notify_execution_layer,
)?; )?;
//TODO(sean) error handling?
publish_fn()?;
let executed_block = self let executed_block = self
.clone() .clone()
.into_executed_block(execution_pending) .into_executed_block(execution_pending)
@ -3073,7 +3109,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
block_delay, block_delay,
&state, &state,
payload_verification_status, payload_verification_status,
self.config.progressive_balances_mode,
&self.spec, &self.spec,
&self.log,
) )
.map_err(|e| BlockError::BeaconChainError(e.into()))?; .map_err(|e| BlockError::BeaconChainError(e.into()))?;
} }
@ -6012,13 +6050,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// Since we are likely calling this during the slot we are going to propose in, don't take into /// Since we are likely calling this during the slot we are going to propose in, don't take into
/// account the current slot when accounting for skips. /// account the current slot when accounting for skips.
pub fn is_healthy(&self, parent_root: &Hash256) -> Result<ChainHealth, Error> { pub fn is_healthy(&self, parent_root: &Hash256) -> Result<ChainHealth, Error> {
let cached_head = self.canonical_head.cached_head();
// Check if the merge has been finalized. // Check if the merge has been finalized.
if let Some(finalized_hash) = self if let Some(finalized_hash) = cached_head.forkchoice_update_parameters().finalized_hash {
.canonical_head
.cached_head()
.forkchoice_update_parameters()
.finalized_hash
{
if ExecutionBlockHash::zero() == finalized_hash { if ExecutionBlockHash::zero() == finalized_hash {
return Ok(ChainHealth::PreMerge); return Ok(ChainHealth::PreMerge);
} }
@ -6045,17 +6079,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Check slots at the head of the chain. // Check slots at the head of the chain.
let prev_slot = current_slot.saturating_sub(Slot::new(1)); let prev_slot = current_slot.saturating_sub(Slot::new(1));
let head_skips = prev_slot.saturating_sub(self.canonical_head.cached_head().head_slot()); let head_skips = prev_slot.saturating_sub(cached_head.head_slot());
let head_skips_check = head_skips.as_usize() <= self.config.builder_fallback_skips; let head_skips_check = head_skips.as_usize() <= self.config.builder_fallback_skips;
// Check if finalization is advancing. // Check if finalization is advancing.
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
let epochs_since_finalization = current_epoch.saturating_sub( let epochs_since_finalization =
self.canonical_head current_epoch.saturating_sub(cached_head.finalized_checkpoint().epoch);
.cached_head()
.finalized_checkpoint()
.epoch,
);
let finalization_check = epochs_since_finalization.as_usize() let finalization_check = epochs_since_finalization.as_usize()
<= self.config.builder_fallback_epochs_since_finalization; <= self.config.builder_fallback_epochs_since_finalization;

View File

@ -16,7 +16,7 @@ use eth2::types::BlockContentsTuple;
use kzg::Kzg; use kzg::Kzg;
use slog::{debug, warn}; use slog::{debug, warn};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use ssz_types::FixedVector; use ssz_types::{FixedVector, VariableList};
use std::borrow::Cow; use std::borrow::Cow;
use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList};
use types::{ use types::{
@ -125,25 +125,40 @@ impl<T: EthSpec> From<BeaconStateError> for BlobError<T> {
} }
} }
pub type GossipVerifiedBlobList<T> = VariableList<
GossipVerifiedBlob<T>,
<<T as BeaconChainTypes>::EthSpec as EthSpec>::MaxBlobsPerBlock,
>;
/// A wrapper around a `BlobSidecar` that indicates it has been approved for re-gossiping on /// A wrapper around a `BlobSidecar` that indicates it has been approved for re-gossiping on
/// the p2p network. /// the p2p network.
#[derive(Debug, Clone)] #[derive(Debug)]
pub struct GossipVerifiedBlob<T: EthSpec> { pub struct GossipVerifiedBlob<T: BeaconChainTypes> {
blob: Arc<BlobSidecar<T>>, blob: SignedBlobSidecar<T::EthSpec>,
} }
impl<T: EthSpec> GossipVerifiedBlob<T> { impl<T: BeaconChainTypes> GossipVerifiedBlob<T> {
pub fn new(
blob: SignedBlobSidecar<T::EthSpec>,
chain: &BeaconChain<T>,
) -> Result<Self, BlobError<T::EthSpec>> {
let blob_index = blob.message.index;
validate_blob_sidecar_for_gossip(blob, blob_index, chain)
}
pub fn id(&self) -> BlobIdentifier { pub fn id(&self) -> BlobIdentifier {
self.blob.id() self.blob.message.id()
} }
pub fn block_root(&self) -> Hash256 { pub fn block_root(&self) -> Hash256 {
self.blob.block_root self.blob.message.block_root
} }
pub fn to_blob(self) -> Arc<BlobSidecar<T>> { pub fn to_blob(self) -> Arc<BlobSidecar<T::EthSpec>> {
self.blob self.blob.message
}
pub fn signed_blob(&self) -> SignedBlobSidecar<T::EthSpec> {
self.blob.clone()
} }
pub fn slot(&self) -> Slot { pub fn slot(&self) -> Slot {
self.blob.slot self.blob.message.slot
} }
} }
@ -151,7 +166,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
signed_blob_sidecar: SignedBlobSidecar<T::EthSpec>, signed_blob_sidecar: SignedBlobSidecar<T::EthSpec>,
subnet: u64, subnet: u64,
chain: &BeaconChain<T>, chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlob<T::EthSpec>, BlobError<T::EthSpec>> { ) -> Result<GossipVerifiedBlob<T>, BlobError<T::EthSpec>> {
let blob_slot = signed_blob_sidecar.message.slot; let blob_slot = signed_blob_sidecar.message.slot;
let blob_index = signed_blob_sidecar.message.index; let blob_index = signed_blob_sidecar.message.index;
let block_parent_root = signed_blob_sidecar.message.block_parent_root; let block_parent_root = signed_blob_sidecar.message.block_parent_root;
@ -366,7 +381,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
// Note: If this BlobSidecar goes on to fail full verification, we do not evict it from the seen_cache // Note: If this BlobSidecar goes on to fail full verification, we do not evict it from the seen_cache
// as alternate blob_sidecars for the same identifier can still be retrieved // as alternate blob_sidecars for the same identifier can still be retrieved
// over rpc. Evicting them from this cache would allow faster propagation over gossip. So we allow // over rpc. Evicting them from this cache would allow faster propagation over gossip. So we allow
// retreieval of potentially valid blocks over rpc, but try to punish the proposer for signing // retrieval of potentially valid blocks over rpc, but try to punish the proposer for signing
// invalid messages. Issue for more background // invalid messages. Issue for more background
// https://github.com/ethereum/consensus-specs/issues/3261 // https://github.com/ethereum/consensus-specs/issues/3261
if chain if chain
@ -383,7 +398,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
} }
Ok(GossipVerifiedBlob { Ok(GossipVerifiedBlob {
blob: signed_blob_sidecar.message, blob: signed_blob_sidecar,
}) })
} }
@ -511,7 +526,6 @@ pub fn verify_kzg_for_blob_list<T: EthSpec>(
let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list
.clone() .clone()
.into_iter() .into_iter()
//TODO(sean) remove clone
.map(|blob| (blob.blob.clone(), (blob.kzg_commitment, blob.kzg_proof))) .map(|blob| (blob.blob.clone(), (blob.kzg_commitment, blob.kzg_proof)))
.unzip(); .unzip();
if validate_blobs::<T>( if validate_blobs::<T>(

View File

@ -48,7 +48,10 @@
// returned alongside. // returned alongside.
#![allow(clippy::result_large_err)] #![allow(clippy::result_large_err)]
use crate::blob_verification::{AsBlock, BlobError, BlockWrapper, MaybeAvailableBlock}; use crate::blob_verification::{
AsBlock, BlobError, BlockWrapper, GossipVerifiedBlob, GossipVerifiedBlobList,
MaybeAvailableBlock,
};
use crate::data_availability_checker::{ use crate::data_availability_checker::{
AvailabilityCheckError, AvailabilityPendingBlock, AvailableBlock, AvailabilityCheckError, AvailabilityPendingBlock, AvailableBlock,
}; };
@ -57,6 +60,7 @@ use crate::execution_payload::{
is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block,
AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier,
}; };
use crate::observed_block_producers::SeenBlock;
use crate::snapshot_cache::PreProcessingSnapshot; use crate::snapshot_cache::PreProcessingSnapshot;
use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS;
use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::validator_pubkey_cache::ValidatorPubkeyCache;
@ -68,7 +72,7 @@ use crate::{
metrics, BeaconChain, BeaconChainError, BeaconChainTypes, metrics, BeaconChain, BeaconChainError, BeaconChainTypes,
}; };
use derivative::Derivative; use derivative::Derivative;
use eth2::types::EventKind; use eth2::types::{EventKind, SignedBlockContents};
use execution_layer::PayloadStatus; use execution_layer::PayloadStatus;
pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus};
use parking_lot::RwLockReadGuard; use parking_lot::RwLockReadGuard;
@ -78,6 +82,7 @@ use slog::{debug, error, warn, Logger};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use ssz::Encode; use ssz::Encode;
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use ssz_types::VariableList;
use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block}; use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block};
use state_processing::{ use state_processing::{
block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError},
@ -91,7 +96,7 @@ use std::fs;
use std::io::Write; use std::io::Write;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use store::{Error as DBError, HotStateSummary, KeyValueStore, SignedBlobSidecarList, StoreOp};
use task_executor::JoinHandle; use task_executor::JoinHandle;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::blob_sidecar::BlobIdentifier; use types::blob_sidecar::BlobIdentifier;
@ -189,13 +194,6 @@ pub enum BlockError<T: EthSpec> {
/// ///
/// The block is valid and we have already imported a block with this hash. /// The block is valid and we have already imported a block with this hash.
BlockIsAlreadyKnown, BlockIsAlreadyKnown,
/// A block for this proposer and slot has already been observed.
///
/// ## Peer scoring
///
/// The `proposer` has already proposed a block at this slot. The existing block may or may not
/// be equal to the given block.
RepeatProposal { proposer: u64, slot: Slot },
/// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER. /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER.
/// ///
/// ## Peer scoring /// ## Peer scoring
@ -291,6 +289,14 @@ pub enum BlockError<T: EthSpec> {
/// problems to worry about than losing peers, and we're doing the network a favour by /// problems to worry about than losing peers, and we're doing the network a favour by
/// disconnecting. /// disconnecting.
ParentExecutionPayloadInvalid { parent_root: Hash256 }, ParentExecutionPayloadInvalid { parent_root: Hash256 },
/// The block is a slashable equivocation from the proposer.
///
/// ## Peer scoring
///
/// Honest peers shouldn't forward more than 1 equivocating block from the same proposer, so
/// we penalise them with a mid-tolerance error.
Slashable,
//TODO(sean) peer scoring docs
/// A blob alone failed validation. /// A blob alone failed validation.
BlobValidation(BlobError<T>), BlobValidation(BlobError<T>),
/// The block and blob together failed validation. /// The block and blob together failed validation.
@ -625,6 +631,13 @@ pub struct GossipVerifiedBlock<T: BeaconChainTypes> {
consensus_context: ConsensusContext<T::EthSpec>, consensus_context: ConsensusContext<T::EthSpec>,
} }
impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
/// Useful for publishing after gossip verification.
pub fn into_block_wrapper(self) -> BlockWrapper<T::EthSpec> {
self.block.into_block_wrapper()
}
}
/// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit /// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit
/// signatures) have been verified. /// signatures) have been verified.
pub struct SignatureVerifiedBlock<T: BeaconChainTypes> { pub struct SignatureVerifiedBlock<T: BeaconChainTypes> {
@ -794,6 +807,69 @@ pub struct BlockImportData<E: EthSpec> {
pub consensus_context: ConsensusContext<E>, pub consensus_context: ConsensusContext<E>,
} }
pub type GossipVerifiedBlockContents<T> =
(GossipVerifiedBlock<T>, Option<GossipVerifiedBlobList<T>>);
pub trait IntoGossipVerifiedBlockContents<T: BeaconChainTypes>: Sized {
fn into_gossip_verified_block(
self,
chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlockContents<T>, BlockError<T::EthSpec>>;
fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec>;
fn inner_blobs(&self) -> Option<SignedBlobSidecarList<T::EthSpec>>;
}
impl<T: BeaconChainTypes> IntoGossipVerifiedBlockContents<T> for GossipVerifiedBlockContents<T> {
fn into_gossip_verified_block(
self,
_chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlockContents<T>, BlockError<T::EthSpec>> {
Ok(self)
}
fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec> {
self.0.block.as_block()
}
fn inner_blobs(&self) -> Option<SignedBlobSidecarList<T::EthSpec>> {
self.1.as_ref().map(|blobs| {
VariableList::from(
blobs
.into_iter()
.map(GossipVerifiedBlob::signed_blob)
.collect::<Vec<_>>(),
)
})
}
}
impl<T: BeaconChainTypes> IntoGossipVerifiedBlockContents<T> for SignedBlockContents<T::EthSpec> {
fn into_gossip_verified_block(
self,
chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlockContents<T>, BlockError<T::EthSpec>> {
let (block, blobs) = self.deconstruct();
let gossip_verified_block = GossipVerifiedBlock::new(Arc::new(block), chain)?;
let gossip_verified_blobs = blobs
.map(|blobs| {
Ok::<_, BlobError<T::EthSpec>>(VariableList::from(
blobs
.into_iter()
.map(|blob| GossipVerifiedBlob::new(blob, chain))
.collect::<Result<Vec<_>, BlobError<T::EthSpec>>>()?,
))
})
.transpose()?;
Ok((gossip_verified_block, gossip_verified_blobs))
}
fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec> {
self.signed_block()
}
fn inner_blobs(&self) -> Option<SignedBlobSidecarList<T::EthSpec>> {
self.blobs_cloned()
}
}
/// Implemented on types that can be converted into a `ExecutionPendingBlock`. /// Implemented on types that can be converted into a `ExecutionPendingBlock`.
/// ///
/// Used to allow functions to accept blocks at various stages of verification. /// Used to allow functions to accept blocks at various stages of verification.
@ -832,10 +908,12 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
/// ///
/// Returns an error if the block is invalid, or i8f the block was unable to be verified. /// Returns an error if the block is invalid, or i8f the block was unable to be verified.
pub fn new( pub fn new(
block: BlockWrapper<T::EthSpec>, block: Arc<SignedBeaconBlock<T::EthSpec>>,
chain: &BeaconChain<T>, chain: &BeaconChain<T>,
) -> Result<Self, BlockError<T::EthSpec>> { ) -> Result<Self, BlockError<T::EthSpec>> {
let maybe_available = chain.data_availability_checker.check_availability(block)?; let maybe_available = chain
.data_availability_checker
.check_availability(block.into())?;
// If the block is valid for gossip we don't supply it to the slasher here because // If the block is valid for gossip we don't supply it to the slasher here because
// we assume it will be transformed into a fully verified block. We *do* need to supply // we assume it will be transformed into a fully verified block. We *do* need to supply
// it to the slasher if an error occurs, because that's the end of this block's journey, // it to the slasher if an error occurs, because that's the end of this block's journey,
@ -892,19 +970,6 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
return Err(BlockError::BlockIsAlreadyKnown); return Err(BlockError::BlockIsAlreadyKnown);
} }
// Check that we have not already received a block with a valid signature for this slot.
if chain
.observed_block_producers
.read()
.proposer_has_been_observed(block.message())
.map_err(|e| BlockError::BeaconChainError(e.into()))?
{
return Err(BlockError::RepeatProposal {
proposer: block.message().proposer_index(),
slot: block.slot(),
});
}
// Do not process a block that doesn't descend from the finalized root. // Do not process a block that doesn't descend from the finalized root.
// //
// We check this *before* we load the parent so that we can return a more detailed error. // We check this *before* we load the parent so that we can return a more detailed error.
@ -1020,17 +1085,18 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
// //
// It's important to double-check that the proposer still hasn't been observed so we don't // It's important to double-check that the proposer still hasn't been observed so we don't
// have a race-condition when verifying two blocks simultaneously. // have a race-condition when verifying two blocks simultaneously.
if chain match chain
.observed_block_producers .observed_block_producers
.write() .write()
.observe_proposer(block.message()) .observe_proposal(block_root, block.message())
.map_err(|e| BlockError::BeaconChainError(e.into()))? .map_err(|e| BlockError::BeaconChainError(e.into()))?
{ {
return Err(BlockError::RepeatProposal { SeenBlock::Slashable => {
proposer: block.message().proposer_index(), return Err(BlockError::Slashable);
slot: block.slot(), }
}); SeenBlock::Duplicate => return Err(BlockError::BlockIsAlreadyKnown),
} SeenBlock::UniqueNonSlashable => {}
};
if block.message().proposer_index() != expected_proposer as u64 { if block.message().proposer_index() != expected_proposer as u64 {
return Err(BlockError::IncorrectBlockProposer { return Err(BlockError::IncorrectBlockProposer {
@ -1293,6 +1359,12 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
chain: &Arc<BeaconChain<T>>, chain: &Arc<BeaconChain<T>>,
notify_execution_layer: NotifyExecutionLayer, notify_execution_layer: NotifyExecutionLayer,
) -> Result<Self, BlockError<T::EthSpec>> { ) -> Result<Self, BlockError<T::EthSpec>> {
chain
.observed_block_producers
.write()
.observe_proposal(block_root, block.message())
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
if let Some(parent) = chain if let Some(parent) = chain
.canonical_head .canonical_head
.fork_choice_read_lock() .fork_choice_read_lock()

View File

@ -343,7 +343,7 @@ where
let beacon_block = genesis_block(&mut beacon_state, &self.spec)?; let beacon_block = genesis_block(&mut beacon_state, &self.spec)?;
beacon_state beacon_state
.build_all_caches(&self.spec) .build_caches(&self.spec)
.map_err(|e| format!("Failed to build genesis state caches: {:?}", e))?; .map_err(|e| format!("Failed to build genesis state caches: {:?}", e))?;
let beacon_state_root = beacon_block.message().state_root(); let beacon_state_root = beacon_block.message().state_root();
@ -433,7 +433,7 @@ where
// Prime all caches before storing the state in the database and computing the tree hash // Prime all caches before storing the state in the database and computing the tree hash
// root. // root.
weak_subj_state weak_subj_state
.build_all_caches(&self.spec) .build_caches(&self.spec)
.map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?;
weak_subj_state weak_subj_state
.update_tree_hash_cache() .update_tree_hash_cache()
@ -701,6 +701,8 @@ where
store.clone(), store.clone(),
Some(current_slot), Some(current_slot),
&self.spec, &self.spec,
self.chain_config.progressive_balances_mode,
&log,
)?; )?;
} }
@ -714,7 +716,7 @@ where
head_snapshot head_snapshot
.beacon_state .beacon_state
.build_all_caches(&self.spec) .build_caches(&self.spec)
.map_err(|e| format!("Failed to build state caches: {:?}", e))?; .map_err(|e| format!("Failed to build state caches: {:?}", e))?;
// Perform a check to ensure that the finalization points of the head and fork choice are // Perform a check to ensure that the finalization points of the head and fork choice are
@ -840,9 +842,7 @@ where
observed_sync_aggregators: <_>::default(), observed_sync_aggregators: <_>::default(),
// TODO: allow for persisting and loading the pool from disk. // TODO: allow for persisting and loading the pool from disk.
observed_block_producers: <_>::default(), observed_block_producers: <_>::default(),
// TODO: allow for persisting and loading the pool from disk.
observed_blob_sidecars: <_>::default(), observed_blob_sidecars: <_>::default(),
// TODO: allow for persisting and loading the pool from disk.
observed_voluntary_exits: <_>::default(), observed_voluntary_exits: <_>::default(),
observed_proposer_slashings: <_>::default(), observed_proposer_slashings: <_>::default(),
observed_attester_slashings: <_>::default(), observed_attester_slashings: <_>::default(),

View File

@ -1,7 +1,7 @@
pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold};
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use std::time::Duration; use std::time::Duration;
use types::{Checkpoint, Epoch}; use types::{Checkpoint, Epoch, ProgressiveBalancesMode};
pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20); pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20);
pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2); pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2);
@ -81,6 +81,10 @@ pub struct ChainConfig {
pub always_prepare_payload: bool, pub always_prepare_payload: bool,
/// Whether backfill sync processing should be rate-limited. /// Whether backfill sync processing should be rate-limited.
pub enable_backfill_rate_limiting: bool, pub enable_backfill_rate_limiting: bool,
/// Whether to use `ProgressiveBalancesCache` in unrealized FFG progression calculation.
pub progressive_balances_mode: ProgressiveBalancesMode,
/// Number of epochs between each migration of data from the hot database to the freezer.
pub epochs_per_migration: u64,
} }
impl Default for ChainConfig { impl Default for ChainConfig {
@ -111,6 +115,8 @@ impl Default for ChainConfig {
genesis_backfill: false, genesis_backfill: false,
always_prepare_payload: false, always_prepare_payload: false,
enable_backfill_rate_limiting: true, enable_backfill_rate_limiting: true,
progressive_balances_mode: ProgressiveBalancesMode::Checked,
epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION,
} }
} }
} }

View File

@ -205,7 +205,7 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
/// This should only accept gossip verified blobs, so we should not have to worry about dupes. /// This should only accept gossip verified blobs, so we should not have to worry about dupes.
pub fn put_gossip_blob( pub fn put_gossip_blob(
&self, &self,
gossip_blob: GossipVerifiedBlob<T::EthSpec>, gossip_blob: GossipVerifiedBlob<T>,
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> { ) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
// Verify the KZG commitments. // Verify the KZG commitments.
let kzg_verified_blob = if let Some(kzg) = self.kzg.as_ref() { let kzg_verified_blob = if let Some(kzg) = self.kzg.as_ref() {
@ -531,6 +531,17 @@ pub enum VerifiedBlobs<E: EthSpec> {
PreDeneb, PreDeneb,
} }
impl<E: EthSpec> VerifiedBlobs<E> {
pub fn to_blobs(self) -> Option<BlobSidecarList<E>> {
match self {
Self::Available(blobs) => Some(blobs),
Self::NotRequired => None,
Self::EmptyBlobs => None,
Self::PreDeneb => None,
}
}
}
/// A fully available block that is ready to be imported into fork choice. /// A fully available block that is ready to be imported into fork choice.
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub struct AvailableBlock<E: EthSpec> { pub struct AvailableBlock<E: EthSpec> {

View File

@ -1067,7 +1067,7 @@ mod test {
log: Logger, log: Logger,
) -> ( ) -> (
AvailabilityPendingExecutedBlock<E>, AvailabilityPendingExecutedBlock<E>,
Vec<GossipVerifiedBlob<E>>, Vec<GossipVerifiedBlob<BaseHarnessType<E, Hot, Cold>>>,
) )
where where
E: EthSpec, E: EthSpec,

View File

@ -216,6 +216,7 @@ pub enum BeaconChainError {
BlsToExecutionConflictsWithPool, BlsToExecutionConflictsWithPool,
InconsistentFork(InconsistentFork), InconsistentFork(InconsistentFork),
ProposerHeadForkChoiceError(fork_choice::Error<proto_array::Error>), ProposerHeadForkChoiceError(fork_choice::Error<proto_array::Error>),
UnableToPublish,
AvailabilityCheckError(AvailabilityCheckError), AvailabilityCheckError(AvailabilityCheckError),
} }

View File

@ -10,7 +10,10 @@ use state_processing::{
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore}; use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore};
use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot}; use types::{
BeaconState, ChainSpec, EthSpec, ForkName, Hash256, ProgressiveBalancesMode, SignedBeaconBlock,
Slot,
};
const CORRUPT_DB_MESSAGE: &str = "The database could be corrupt. Check its file permissions or \ const CORRUPT_DB_MESSAGE: &str = "The database could be corrupt. Check its file permissions or \
consider deleting it by running with the --purge-db flag."; consider deleting it by running with the --purge-db flag.";
@ -100,6 +103,8 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
store: Arc<HotColdDB<E, Hot, Cold>>, store: Arc<HotColdDB<E, Hot, Cold>>,
current_slot: Option<Slot>, current_slot: Option<Slot>,
spec: &ChainSpec, spec: &ChainSpec,
progressive_balances_mode: ProgressiveBalancesMode,
log: &Logger,
) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> { ) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> {
// Fetch finalized block. // Fetch finalized block.
let finalized_checkpoint = head_state.finalized_checkpoint(); let finalized_checkpoint = head_state.finalized_checkpoint();
@ -197,7 +202,9 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
Duration::from_secs(0), Duration::from_secs(0),
&state, &state,
payload_verification_status, payload_verification_status,
progressive_balances_mode,
spec, spec,
log,
) )
.map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?;
} }

View File

@ -71,7 +71,7 @@ pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceSto
pub use block_verification::{ pub use block_verification::{
get_block_root, AvailabilityPendingExecutedBlock, BlockError, ExecutedBlock, get_block_root, AvailabilityPendingExecutedBlock, BlockError, ExecutedBlock,
ExecutionPayloadError, ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock, ExecutionPayloadError, ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock,
PayloadVerificationOutcome, PayloadVerificationStatus, IntoGossipVerifiedBlockContents, PayloadVerificationOutcome, PayloadVerificationStatus,
}; };
pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock};
pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend};
@ -81,6 +81,7 @@ pub use execution_payload::NotifyExecutionLayer;
pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters};
pub use kzg::TrustedSetup; pub use kzg::TrustedSetup;
pub use metrics::scrape_for_metrics; pub use metrics::scrape_for_metrics;
pub use migrate::MigratorConfig;
pub use parking_lot; pub use parking_lot;
pub use slot_clock; pub use slot_clock;
pub use state_processing::per_block_processing::errors::{ pub use state_processing::per_block_processing::errors::{

View File

@ -25,10 +25,15 @@ const MIN_COMPACTION_PERIOD_SECONDS: u64 = 7200;
/// Compact after a large finality gap, if we respect `MIN_COMPACTION_PERIOD_SECONDS`. /// Compact after a large finality gap, if we respect `MIN_COMPACTION_PERIOD_SECONDS`.
const COMPACTION_FINALITY_DISTANCE: u64 = 1024; const COMPACTION_FINALITY_DISTANCE: u64 = 1024;
/// Default number of epochs to wait between finalization migrations.
pub const DEFAULT_EPOCHS_PER_MIGRATION: u64 = 1;
/// The background migrator runs a thread to perform pruning and migrate state from the hot /// The background migrator runs a thread to perform pruning and migrate state from the hot
/// to the cold database. /// to the cold database.
pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> { pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
db: Arc<HotColdDB<E, Hot, Cold>>, db: Arc<HotColdDB<E, Hot, Cold>>,
/// Record of when the last migration ran, for enforcing `epochs_per_migration`.
prev_migration: Arc<Mutex<PrevMigration>>,
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
tx_thread: Option<Mutex<(mpsc::Sender<Notification>, thread::JoinHandle<()>)>>, tx_thread: Option<Mutex<(mpsc::Sender<Notification>, thread::JoinHandle<()>)>>,
/// Genesis block root, for persisting the `PersistedBeaconChain`. /// Genesis block root, for persisting the `PersistedBeaconChain`.
@ -36,9 +41,22 @@ pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>
log: Logger, log: Logger,
} }
#[derive(Debug, Default, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub struct MigratorConfig { pub struct MigratorConfig {
pub blocking: bool, pub blocking: bool,
/// Run migrations at most once per `epochs_per_migration`.
///
/// If set to 0 or 1, then run every finalization.
pub epochs_per_migration: u64,
}
impl Default for MigratorConfig {
fn default() -> Self {
Self {
blocking: false,
epochs_per_migration: DEFAULT_EPOCHS_PER_MIGRATION,
}
}
} }
impl MigratorConfig { impl MigratorConfig {
@ -46,6 +64,19 @@ impl MigratorConfig {
self.blocking = true; self.blocking = true;
self self
} }
pub fn epochs_per_migration(mut self, epochs_per_migration: u64) -> Self {
self.epochs_per_migration = epochs_per_migration;
self
}
}
/// Record of when the last migration ran.
pub struct PrevMigration {
/// The epoch at which the last finalization migration ran.
epoch: Epoch,
/// The number of epochs to wait between runs.
epochs_per_migration: u64,
} }
/// Pruning can be successful, or in rare cases deferred to a later point. /// Pruning can be successful, or in rare cases deferred to a later point.
@ -93,6 +124,7 @@ pub struct FinalizationNotification {
finalized_state_root: BeaconStateHash, finalized_state_root: BeaconStateHash,
finalized_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint,
head_tracker: Arc<HeadTracker>, head_tracker: Arc<HeadTracker>,
prev_migration: Arc<Mutex<PrevMigration>>,
genesis_block_root: Hash256, genesis_block_root: Hash256,
} }
@ -104,6 +136,11 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
genesis_block_root: Hash256, genesis_block_root: Hash256,
log: Logger, log: Logger,
) -> Self { ) -> Self {
// Estimate last migration run from DB split slot.
let prev_migration = Arc::new(Mutex::new(PrevMigration {
epoch: db.get_split_slot().epoch(E::slots_per_epoch()),
epochs_per_migration: config.epochs_per_migration,
}));
let tx_thread = if config.blocking { let tx_thread = if config.blocking {
None None
} else { } else {
@ -112,6 +149,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
Self { Self {
db, db,
tx_thread, tx_thread,
prev_migration,
genesis_block_root, genesis_block_root,
log, log,
} }
@ -132,6 +170,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
finalized_state_root, finalized_state_root,
finalized_checkpoint, finalized_checkpoint,
head_tracker, head_tracker,
prev_migration: self.prev_migration.clone(),
genesis_block_root: self.genesis_block_root, genesis_block_root: self.genesis_block_root,
}; };
@ -227,6 +266,26 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
notif: FinalizationNotification, notif: FinalizationNotification,
log: &Logger, log: &Logger,
) { ) {
// Do not run too frequently.
let epoch = notif.finalized_checkpoint.epoch;
let mut prev_migration = notif.prev_migration.lock();
if epoch < prev_migration.epoch + prev_migration.epochs_per_migration {
debug!(
log,
"Database consolidation deferred";
"last_finalized_epoch" => prev_migration.epoch,
"new_finalized_epoch" => epoch,
"epochs_per_migration" => prev_migration.epochs_per_migration,
);
return;
}
// Update the previous migration epoch immediately to avoid holding the lock. If the
// migration doesn't succeed then the next migration will be retried at the next scheduled
// run.
prev_migration.epoch = epoch;
drop(prev_migration);
debug!(log, "Database consolidation started"); debug!(log, "Database consolidation started");
let finalized_state_root = notif.finalized_state_root; let finalized_state_root = notif.finalized_state_root;

View File

@ -1,9 +1,10 @@
//! Provides the `ObservedBlockProducers` struct which allows for rejecting gossip blocks from //! Provides the `ObservedBlockProducers` struct which allows for rejecting gossip blocks from
//! validators that have already produced a block. //! validators that have already produced a block.
use std::collections::hash_map::Entry;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::marker::PhantomData; use std::marker::PhantomData;
use types::{BeaconBlockRef, Epoch, EthSpec, Slot, Unsigned}; use types::{BeaconBlockRef, Epoch, EthSpec, Hash256, Slot, Unsigned};
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum Error { pub enum Error {
@ -14,6 +15,12 @@ pub enum Error {
ValidatorIndexTooHigh(u64), ValidatorIndexTooHigh(u64),
} }
#[derive(Eq, Hash, PartialEq, Debug, Default)]
struct ProposalKey {
slot: Slot,
proposer: u64,
}
/// Maintains a cache of observed `(block.slot, block.proposer)`. /// Maintains a cache of observed `(block.slot, block.proposer)`.
/// ///
/// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you /// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you
@ -27,7 +34,7 @@ pub enum Error {
/// known_distinct_shufflings` which is much smaller. /// known_distinct_shufflings` which is much smaller.
pub struct ObservedBlockProducers<E: EthSpec> { pub struct ObservedBlockProducers<E: EthSpec> {
finalized_slot: Slot, finalized_slot: Slot,
items: HashMap<Slot, HashSet<u64>>, items: HashMap<ProposalKey, HashSet<Hash256>>,
_phantom: PhantomData<E>, _phantom: PhantomData<E>,
} }
@ -42,6 +49,24 @@ impl<E: EthSpec> Default for ObservedBlockProducers<E> {
} }
} }
pub enum SeenBlock {
Duplicate,
Slashable,
UniqueNonSlashable,
}
impl SeenBlock {
pub fn proposer_previously_observed(self) -> bool {
match self {
Self::Duplicate | Self::Slashable => true,
Self::UniqueNonSlashable => false,
}
}
pub fn is_slashable(&self) -> bool {
matches!(self, Self::Slashable)
}
}
impl<E: EthSpec> ObservedBlockProducers<E> { impl<E: EthSpec> ObservedBlockProducers<E> {
/// Observe that the `block` was produced by `block.proposer_index` at `block.slot`. This will /// Observe that the `block` was produced by `block.proposer_index` at `block.slot`. This will
/// update `self` so future calls to it indicate that this block is known. /// update `self` so future calls to it indicate that this block is known.
@ -52,16 +77,44 @@ impl<E: EthSpec> ObservedBlockProducers<E> {
/// ///
/// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
/// - `block.slot` is equal to or less than the latest pruned `finalized_slot`. /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
pub fn observe_proposer(&mut self, block: BeaconBlockRef<'_, E>) -> Result<bool, Error> { pub fn observe_proposal(
&mut self,
block_root: Hash256,
block: BeaconBlockRef<'_, E>,
) -> Result<SeenBlock, Error> {
self.sanitize_block(block)?; self.sanitize_block(block)?;
let did_not_exist = self let key = ProposalKey {
.items slot: block.slot(),
.entry(block.slot()) proposer: block.proposer_index(),
.or_insert_with(|| HashSet::with_capacity(E::SlotsPerEpoch::to_usize())) };
.insert(block.proposer_index());
Ok(!did_not_exist) let entry = self.items.entry(key);
let slashable_proposal = match entry {
Entry::Occupied(mut occupied_entry) => {
let block_roots = occupied_entry.get_mut();
let newly_inserted = block_roots.insert(block_root);
let is_equivocation = block_roots.len() > 1;
if is_equivocation {
SeenBlock::Slashable
} else if !newly_inserted {
SeenBlock::Duplicate
} else {
SeenBlock::UniqueNonSlashable
}
}
Entry::Vacant(vacant_entry) => {
let block_roots = HashSet::from([block_root]);
vacant_entry.insert(block_roots);
SeenBlock::UniqueNonSlashable
}
};
Ok(slashable_proposal)
} }
/// Returns `Ok(true)` if the `block` has been observed before, `Ok(false)` if not. Does not /// Returns `Ok(true)` if the `block` has been observed before, `Ok(false)` if not. Does not
@ -72,15 +125,33 @@ impl<E: EthSpec> ObservedBlockProducers<E> {
/// ///
/// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
/// - `block.slot` is equal to or less than the latest pruned `finalized_slot`. /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
pub fn proposer_has_been_observed(&self, block: BeaconBlockRef<'_, E>) -> Result<bool, Error> { pub fn proposer_has_been_observed(
&self,
block: BeaconBlockRef<'_, E>,
block_root: Hash256,
) -> Result<SeenBlock, Error> {
self.sanitize_block(block)?; self.sanitize_block(block)?;
let exists = self let key = ProposalKey {
.items slot: block.slot(),
.get(&block.slot()) proposer: block.proposer_index(),
.map_or(false, |set| set.contains(&block.proposer_index())); };
Ok(exists) if let Some(block_roots) = self.items.get(&key) {
let block_already_known = block_roots.contains(&block_root);
let no_prev_known_blocks =
block_roots.difference(&HashSet::from([block_root])).count() == 0;
if !no_prev_known_blocks {
Ok(SeenBlock::Slashable)
} else if block_already_known {
Ok(SeenBlock::Duplicate)
} else {
Ok(SeenBlock::UniqueNonSlashable)
}
} else {
Ok(SeenBlock::UniqueNonSlashable)
}
} }
/// Returns `Ok(())` if the given `block` is sane. /// Returns `Ok(())` if the given `block` is sane.
@ -112,15 +183,15 @@ impl<E: EthSpec> ObservedBlockProducers<E> {
} }
self.finalized_slot = finalized_slot; self.finalized_slot = finalized_slot;
self.items.retain(|slot, _set| *slot > finalized_slot); self.items.retain(|key, _| key.slot > finalized_slot);
} }
/// Returns `true` if the given `validator_index` has been stored in `self` at `epoch`. /// Returns `true` if the given `validator_index` has been stored in `self` at `epoch`.
/// ///
/// This is useful for doppelganger detection. /// This is useful for doppelganger detection.
pub fn index_seen_at_epoch(&self, validator_index: u64, epoch: Epoch) -> bool { pub fn index_seen_at_epoch(&self, validator_index: u64, epoch: Epoch) -> bool {
self.items.iter().any(|(slot, producers)| { self.items.iter().any(|(key, _)| {
slot.epoch(E::slots_per_epoch()) == epoch && producers.contains(&validator_index) key.slot.epoch(E::slots_per_epoch()) == epoch && key.proposer == validator_index
}) })
} }
} }
@ -148,9 +219,12 @@ mod tests {
// Slot 0, proposer 0 // Slot 0, proposer 0
let block_a = get_block(0, 0); let block_a = get_block(0, 0);
let block_root = block_a.canonical_root();
assert_eq!( assert_eq!(
cache.observe_proposer(block_a.to_ref()), cache
.observe_proposal(block_root, block_a.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(false), Ok(false),
"can observe proposer, indicates proposer unobserved" "can observe proposer, indicates proposer unobserved"
); );
@ -164,7 +238,10 @@ mod tests {
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(0)) .get(&ProposalKey {
slot: Slot::new(0),
proposer: 0
})
.expect("slot zero should be present") .expect("slot zero should be present")
.len(), .len(),
1, 1,
@ -182,7 +259,10 @@ mod tests {
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(0)) .get(&ProposalKey {
slot: Slot::new(0),
proposer: 0
})
.expect("slot zero should be present") .expect("slot zero should be present")
.len(), .len(),
1, 1,
@ -207,9 +287,12 @@ mod tests {
// First slot of finalized epoch, proposer 0 // First slot of finalized epoch, proposer 0
let block_b = get_block(E::slots_per_epoch(), 0); let block_b = get_block(E::slots_per_epoch(), 0);
let block_root_b = block_b.canonical_root();
assert_eq!( assert_eq!(
cache.observe_proposer(block_b.to_ref()), cache
.observe_proposal(block_root_b, block_b.to_ref())
.map(SeenBlock::proposer_previously_observed),
Err(Error::FinalizedBlock { Err(Error::FinalizedBlock {
slot: E::slots_per_epoch().into(), slot: E::slots_per_epoch().into(),
finalized_slot: E::slots_per_epoch().into(), finalized_slot: E::slots_per_epoch().into(),
@ -229,7 +312,9 @@ mod tests {
let block_b = get_block(three_epochs, 0); let block_b = get_block(three_epochs, 0);
assert_eq!( assert_eq!(
cache.observe_proposer(block_b.to_ref()), cache
.observe_proposal(block_root_b, block_b.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(false), Ok(false),
"can insert non-finalized block" "can insert non-finalized block"
); );
@ -238,7 +323,10 @@ mod tests {
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(three_epochs)) .get(&ProposalKey {
slot: Slot::new(three_epochs),
proposer: 0
})
.expect("the three epochs slot should be present") .expect("the three epochs slot should be present")
.len(), .len(),
1, 1,
@ -262,7 +350,10 @@ mod tests {
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(three_epochs)) .get(&ProposalKey {
slot: Slot::new(three_epochs),
proposer: 0
})
.expect("the three epochs slot should be present") .expect("the three epochs slot should be present")
.len(), .len(),
1, 1,
@ -276,24 +367,33 @@ mod tests {
// Slot 0, proposer 0 // Slot 0, proposer 0
let block_a = get_block(0, 0); let block_a = get_block(0, 0);
let block_root_a = block_a.canonical_root();
assert_eq!( assert_eq!(
cache.proposer_has_been_observed(block_a.to_ref()), cache
.proposer_has_been_observed(block_a.to_ref(), block_a.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(false), Ok(false),
"no observation in empty cache" "no observation in empty cache"
); );
assert_eq!( assert_eq!(
cache.observe_proposer(block_a.to_ref()), cache
.observe_proposal(block_root_a, block_a.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(false), Ok(false),
"can observe proposer, indicates proposer unobserved" "can observe proposer, indicates proposer unobserved"
); );
assert_eq!( assert_eq!(
cache.proposer_has_been_observed(block_a.to_ref()), cache
.proposer_has_been_observed(block_a.to_ref(), block_a.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(true), Ok(true),
"observed block is indicated as true" "observed block is indicated as true"
); );
assert_eq!( assert_eq!(
cache.observe_proposer(block_a.to_ref()), cache
.observe_proposal(block_root_a, block_a.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(true), Ok(true),
"observing again indicates true" "observing again indicates true"
); );
@ -303,7 +403,10 @@ mod tests {
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(0)) .get(&ProposalKey {
slot: Slot::new(0),
proposer: 0
})
.expect("slot zero should be present") .expect("slot zero should be present")
.len(), .len(),
1, 1,
@ -312,24 +415,33 @@ mod tests {
// Slot 1, proposer 0 // Slot 1, proposer 0
let block_b = get_block(1, 0); let block_b = get_block(1, 0);
let block_root_b = block_b.canonical_root();
assert_eq!( assert_eq!(
cache.proposer_has_been_observed(block_b.to_ref()), cache
.proposer_has_been_observed(block_b.to_ref(), block_b.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(false), Ok(false),
"no observation for new slot" "no observation for new slot"
); );
assert_eq!( assert_eq!(
cache.observe_proposer(block_b.to_ref()), cache
.observe_proposal(block_root_b, block_b.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(false), Ok(false),
"can observe proposer for new slot, indicates proposer unobserved" "can observe proposer for new slot, indicates proposer unobserved"
); );
assert_eq!( assert_eq!(
cache.proposer_has_been_observed(block_b.to_ref()), cache
.proposer_has_been_observed(block_b.to_ref(), block_b.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(true), Ok(true),
"observed block in slot 1 is indicated as true" "observed block in slot 1 is indicated as true"
); );
assert_eq!( assert_eq!(
cache.observe_proposer(block_b.to_ref()), cache
.observe_proposal(block_root_b, block_b.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(true), Ok(true),
"observing slot 1 again indicates true" "observing slot 1 again indicates true"
); );
@ -339,7 +451,10 @@ mod tests {
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(0)) .get(&ProposalKey {
slot: Slot::new(0),
proposer: 0
})
.expect("slot zero should be present") .expect("slot zero should be present")
.len(), .len(),
1, 1,
@ -348,7 +463,10 @@ mod tests {
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(1)) .get(&ProposalKey {
slot: Slot::new(1),
proposer: 0
})
.expect("slot zero should be present") .expect("slot zero should be present")
.len(), .len(),
1, 1,
@ -357,45 +475,54 @@ mod tests {
// Slot 0, proposer 1 // Slot 0, proposer 1
let block_c = get_block(0, 1); let block_c = get_block(0, 1);
let block_root_c = block_c.canonical_root();
assert_eq!( assert_eq!(
cache.proposer_has_been_observed(block_c.to_ref()), cache
.proposer_has_been_observed(block_c.to_ref(), block_c.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(false), Ok(false),
"no observation for new proposer" "no observation for new proposer"
); );
assert_eq!( assert_eq!(
cache.observe_proposer(block_c.to_ref()), cache
.observe_proposal(block_root_c, block_c.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(false), Ok(false),
"can observe new proposer, indicates proposer unobserved" "can observe new proposer, indicates proposer unobserved"
); );
assert_eq!( assert_eq!(
cache.proposer_has_been_observed(block_c.to_ref()), cache
.proposer_has_been_observed(block_c.to_ref(), block_c.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(true), Ok(true),
"observed new proposer block is indicated as true" "observed new proposer block is indicated as true"
); );
assert_eq!( assert_eq!(
cache.observe_proposer(block_c.to_ref()), cache
.observe_proposal(block_root_c, block_c.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(true), Ok(true),
"observing new proposer again indicates true" "observing new proposer again indicates true"
); );
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
assert_eq!(cache.items.len(), 2, "two slots should be present"); assert_eq!(cache.items.len(), 3, "three slots should be present");
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(0)) .iter()
.expect("slot zero should be present") .filter(|(k, _)| k.slot == cache.finalized_slot)
.len(), .count(),
2, 2,
"two proposers should be present in slot 0" "two proposers should be present in slot 0"
); );
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(1)) .iter()
.expect("slot zero should be present") .filter(|(k, _)| k.slot == Slot::new(1))
.len(), .count(),
1, 1,
"only one proposer should be present in slot 1" "only one proposer should be present in slot 1"
); );

View File

@ -519,6 +519,7 @@ where
let validator_keypairs = self let validator_keypairs = self
.validator_keypairs .validator_keypairs
.expect("cannot build without validator keypairs"); .expect("cannot build without validator keypairs");
let chain_config = self.chain_config.unwrap_or_default();
let trusted_setup: TrustedSetup = let trusted_setup: TrustedSetup =
serde_json::from_reader(eth2_network_config::get_trusted_setup::<E::Kzg>()) serde_json::from_reader(eth2_network_config::get_trusted_setup::<E::Kzg>())
.map_err(|e| format!("Unable to read trusted setup file: {}", e)) .map_err(|e| format!("Unable to read trusted setup file: {}", e))
@ -528,13 +529,17 @@ where
.logger(log.clone()) .logger(log.clone())
.custom_spec(spec) .custom_spec(spec)
.store(self.store.expect("cannot build without store")) .store(self.store.expect("cannot build without store"))
.store_migrator_config(MigratorConfig::default().blocking()) .store_migrator_config(
MigratorConfig::default()
.blocking()
.epochs_per_migration(chain_config.epochs_per_migration),
)
.task_executor(self.runtime.task_executor.clone()) .task_executor(self.runtime.task_executor.clone())
.execution_layer(self.execution_layer) .execution_layer(self.execution_layer)
.dummy_eth1_backend() .dummy_eth1_backend()
.expect("should build dummy backend") .expect("should build dummy backend")
.shutdown_sender(shutdown_tx) .shutdown_sender(shutdown_tx)
.chain_config(self.chain_config.unwrap_or_default()) .chain_config(chain_config)
.event_handler(Some(ServerSentEventHandler::new_with_capacity( .event_handler(Some(ServerSentEventHandler::new_with_capacity(
log.clone(), log.clone(),
5, 5,
@ -808,6 +813,15 @@ where
state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap()
} }
pub async fn make_blinded_block(
&self,
state: BeaconState<E>,
slot: Slot,
) -> (BlockContentsTuple<E, BlindedPayload<E>>, BeaconState<E>) {
let (unblinded, new_state) = self.make_block(state, slot).await;
((unblinded.0.into(), unblinded.1), new_state)
}
/// Returns a newly created block, signed by the proposer for the given slot. /// Returns a newly created block, signed by the proposer for the given slot.
pub async fn make_block( pub async fn make_block(
&self, &self,
@ -820,9 +834,7 @@ where
complete_state_advance(&mut state, None, slot, &self.spec) complete_state_advance(&mut state, None, slot, &self.spec)
.expect("should be able to advance state to slot"); .expect("should be able to advance state to slot");
state state.build_caches(&self.spec).expect("should build caches");
.build_all_caches(&self.spec)
.expect("should build caches");
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
@ -899,16 +911,14 @@ where
&self, &self,
mut state: BeaconState<E>, mut state: BeaconState<E>,
slot: Slot, slot: Slot,
) -> (SignedBeaconBlock<E>, BeaconState<E>) { ) -> (BlockContentsTuple<E, FullPayload<E>>, BeaconState<E>) {
assert_ne!(slot, 0, "can't produce a block at slot 0"); assert_ne!(slot, 0, "can't produce a block at slot 0");
assert!(slot >= state.slot()); assert!(slot >= state.slot());
complete_state_advance(&mut state, None, slot, &self.spec) complete_state_advance(&mut state, None, slot, &self.spec)
.expect("should be able to advance state to slot"); .expect("should be able to advance state to slot");
state state.build_caches(&self.spec).expect("should build caches");
.build_all_caches(&self.spec)
.expect("should build caches");
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
@ -941,7 +951,44 @@ where
&self.spec, &self.spec,
); );
(signed_block, pre_state) let block_contents: BlockContentsTuple<E, FullPayload<E>> = match &signed_block {
SignedBeaconBlock::Base(_)
| SignedBeaconBlock::Altair(_)
| SignedBeaconBlock::Merge(_)
| SignedBeaconBlock::Capella(_) => (signed_block, None),
SignedBeaconBlock::Deneb(_) => {
if let Some(blobs) = self
.chain
.proposal_blob_cache
.pop(&signed_block.canonical_root())
{
let signed_blobs: SignedBlobSidecarList<E> = Vec::from(blobs)
.into_iter()
.map(|blob| {
blob.sign(
&self.validator_keypairs[proposer_index].sk,
&state.fork(),
state.genesis_validators_root(),
&self.spec,
)
})
.collect::<Vec<_>>()
.into();
let mut guard = self.blob_signature_cache.write();
for blob in &signed_blobs {
guard.insert(
BlobSignatureKey::new(blob.message.block_root, blob.message.index),
blob.signature.clone(),
);
}
(signed_block, Some(signed_blobs))
} else {
(signed_block, None)
}
}
};
(block_contents, pre_state)
} }
/// Create a randao reveal for a block at `slot`. /// Create a randao reveal for a block at `slot`.
@ -1626,6 +1673,36 @@ where
.sign(sk, &fork, genesis_validators_root, &self.chain.spec) .sign(sk, &fork, genesis_validators_root, &self.chain.spec)
} }
pub fn add_proposer_slashing(&self, validator_index: u64) -> Result<(), String> {
let propposer_slashing = self.make_proposer_slashing(validator_index);
if let ObservationOutcome::New(verified_proposer_slashing) = self
.chain
.verify_proposer_slashing_for_gossip(propposer_slashing)
.expect("should verify proposer slashing for gossip")
{
self.chain
.import_proposer_slashing(verified_proposer_slashing);
Ok(())
} else {
Err("should observe new proposer slashing".to_string())
}
}
pub fn add_attester_slashing(&self, validator_indices: Vec<u64>) -> Result<(), String> {
let attester_slashing = self.make_attester_slashing(validator_indices);
if let ObservationOutcome::New(verified_attester_slashing) = self
.chain
.verify_attester_slashing_for_gossip(attester_slashing)
.expect("should verify attester slashing for gossip")
{
self.chain
.import_attester_slashing(verified_attester_slashing);
Ok(())
} else {
Err("should observe new attester slashing".to_string())
}
}
pub fn add_bls_to_execution_change( pub fn add_bls_to_execution_change(
&self, &self,
validator_index: u64, validator_index: u64,
@ -1702,11 +1779,12 @@ where
state: BeaconState<E>, state: BeaconState<E>,
slot: Slot, slot: Slot,
block_modifier: impl FnOnce(&mut BeaconBlock<E>), block_modifier: impl FnOnce(&mut BeaconBlock<E>),
) -> (SignedBeaconBlock<E>, BeaconState<E>) { ) -> (BlockContentsTuple<E, FullPayload<E>>, BeaconState<E>) {
assert_ne!(slot, 0, "can't produce a block at slot 0"); assert_ne!(slot, 0, "can't produce a block at slot 0");
assert!(slot >= state.slot()); assert!(slot >= state.slot());
let (block, state) = self.make_block_return_pre_state(state, slot).await; let ((block, blobs), state) = self.make_block_return_pre_state(state, slot).await;
let (mut block, _) = block.deconstruct(); let (mut block, _) = block.deconstruct();
block_modifier(&mut block); block_modifier(&mut block);
@ -1719,7 +1797,7 @@ where
state.genesis_validators_root(), state.genesis_validators_root(),
&self.spec, &self.spec,
); );
(signed_block, state) ((signed_block, blobs), state)
} }
pub fn make_deposits<'a>( pub fn make_deposits<'a>(
@ -1804,7 +1882,9 @@ where
self.set_current_slot(slot); self.set_current_slot(slot);
let block_hash: SignedBeaconBlockHash = self let block_hash: SignedBeaconBlockHash = self
.chain .chain
.process_block(block_root, block.into(), NotifyExecutionLayer::Yes) .process_block(block_root, block.into(), NotifyExecutionLayer::Yes, || {
Ok(())
})
.await? .await?
.try_into() .try_into()
.unwrap(); .unwrap();
@ -1823,6 +1903,7 @@ where
wrapped_block.canonical_root(), wrapped_block.canonical_root(),
wrapped_block, wrapped_block,
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await? .await?
.try_into() .try_into()

View File

@ -459,6 +459,7 @@ async fn assert_invalid_signature(
chain_segment_blobs[block_index].clone(), chain_segment_blobs[block_index].clone(),
), ),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await; .await;
assert!( assert!(
@ -526,6 +527,7 @@ async fn invalid_signature_gossip_block() {
signed_block.canonical_root(), signed_block.canonical_root(),
Arc::new(signed_block), Arc::new(signed_block),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await, .await,
Err(BlockError::InvalidSignature) Err(BlockError::InvalidSignature)
@ -849,7 +851,7 @@ async fn block_gossip_verification() {
{ {
let gossip_verified = harness let gossip_verified = harness
.chain .chain
.verify_block_for_gossip(snapshot.beacon_block.clone().into()) .verify_block_for_gossip(snapshot.beacon_block.clone())
.await .await
.expect("should obtain gossip verified block"); .expect("should obtain gossip verified block");
@ -859,6 +861,7 @@ async fn block_gossip_verification() {
gossip_verified.block_root, gossip_verified.block_root,
gossip_verified, gossip_verified,
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.expect("should import valid gossip verified block"); .expect("should import valid gossip verified block");
@ -1069,22 +1072,14 @@ async fn block_gossip_verification() {
assert!( assert!(
matches!( matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await), unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await),
BlockError::RepeatProposal { BlockError::BlockIsAlreadyKnown,
proposer,
slot,
}
if proposer == other_proposer && slot == block.message().slot()
), ),
"should register any valid signature against the proposer, even if the block failed later verification" "should register any valid signature against the proposer, even if the block failed later verification"
); );
let block = chain_segment[block_index].beacon_block.clone(); let block = chain_segment[block_index].beacon_block.clone();
assert!( assert!(
harness harness.chain.verify_block_for_gossip(block).await.is_ok(),
.chain
.verify_block_for_gossip(block.into())
.await
.is_ok(),
"the valid block should be processed" "the valid block should be processed"
); );
@ -1106,11 +1101,7 @@ async fn block_gossip_verification() {
.await .await
.err() .err()
.expect("should error when processing known block"), .expect("should error when processing known block"),
BlockError::RepeatProposal { BlockError::BlockIsAlreadyKnown
proposer,
slot,
}
if proposer == block.message().proposer_index() && slot == block.message().slot()
), ),
"the second proposal by this validator should be rejected" "the second proposal by this validator should be rejected"
); );
@ -1139,7 +1130,7 @@ async fn verify_block_for_gossip_slashing_detection() {
let verified_block = harness let verified_block = harness
.chain .chain
.verify_block_for_gossip(Arc::new(block1).into()) .verify_block_for_gossip(Arc::new(block1))
.await .await
.unwrap(); .unwrap();
@ -1159,13 +1150,14 @@ async fn verify_block_for_gossip_slashing_detection() {
verified_block.block_root, verified_block.block_root,
verified_block, verified_block,
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.unwrap(); .unwrap();
unwrap_err( unwrap_err(
harness harness
.chain .chain
.verify_block_for_gossip(Arc::new(block2).into()) .verify_block_for_gossip(Arc::new(block2))
.await, .await,
); );
@ -1188,7 +1180,7 @@ async fn verify_block_for_gossip_doppelganger_detection() {
let verified_block = harness let verified_block = harness
.chain .chain
.verify_block_for_gossip(Arc::new(block).into()) .verify_block_for_gossip(Arc::new(block))
.await .await
.unwrap(); .unwrap();
let attestations = verified_block.block.message().body().attestations().clone(); let attestations = verified_block.block.message().body().attestations().clone();
@ -1198,6 +1190,7 @@ async fn verify_block_for_gossip_doppelganger_detection() {
verified_block.block_root, verified_block.block_root,
verified_block, verified_block,
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.unwrap(); .unwrap();
@ -1345,6 +1338,7 @@ async fn add_base_block_to_altair_chain() {
base_block.canonical_root(), base_block.canonical_root(),
Arc::new(base_block.clone()), Arc::new(base_block.clone()),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.err() .err()
@ -1479,6 +1473,7 @@ async fn add_altair_block_to_base_chain() {
altair_block.canonical_root(), altair_block.canonical_root(),
Arc::new(altair_block.clone()), Arc::new(altair_block.clone()),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.err() .err()

View File

@ -133,13 +133,8 @@ async fn base_altair_merge_capella() {
for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() { for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() {
harness.extend_slots(1).await; harness.extend_slots(1).await;
let block = &harness.chain.head_snapshot().beacon_block; let block = &harness.chain.head_snapshot().beacon_block;
let full_payload: FullPayload<E> = block let full_payload: FullPayload<E> =
.message() block.message().body().execution_payload().unwrap().into();
.body()
.execution_payload()
.unwrap()
.clone()
.into();
// pre-capella shouldn't have withdrawals // pre-capella shouldn't have withdrawals
assert!(full_payload.withdrawals_root().is_err()); assert!(full_payload.withdrawals_root().is_err());
execution_payloads.push(full_payload); execution_payloads.push(full_payload);
@ -151,13 +146,8 @@ async fn base_altair_merge_capella() {
for _ in 0..16 { for _ in 0..16 {
harness.extend_slots(1).await; harness.extend_slots(1).await;
let block = &harness.chain.head_snapshot().beacon_block; let block = &harness.chain.head_snapshot().beacon_block;
let full_payload: FullPayload<E> = block let full_payload: FullPayload<E> =
.message() block.message().body().execution_payload().unwrap().into();
.body()
.execution_payload()
.unwrap()
.clone()
.into();
// post-capella should have withdrawals // post-capella should have withdrawals
assert!(full_payload.withdrawals_root().is_ok()); assert!(full_payload.withdrawals_root().is_ok());
execution_payloads.push(full_payload); execution_payloads.push(full_payload);

View File

@ -698,6 +698,7 @@ async fn invalidates_all_descendants() {
fork_block.canonical_root(), fork_block.canonical_root(),
Arc::new(fork_block), Arc::new(fork_block),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.unwrap() .unwrap()
@ -797,6 +798,7 @@ async fn switches_heads() {
fork_block.canonical_root(), fork_block.canonical_root(),
Arc::new(fork_block), Arc::new(fork_block),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.unwrap() .unwrap()
@ -1055,7 +1057,9 @@ async fn invalid_parent() {
// Ensure the block built atop an invalid payload is invalid for import. // Ensure the block built atop an invalid payload is invalid for import.
assert!(matches!( assert!(matches!(
rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes).await, rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes,
|| Ok(()),
).await,
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
if invalid_root == parent_root if invalid_root == parent_root
)); ));
@ -1069,8 +1073,9 @@ async fn invalid_parent() {
Duration::from_secs(0), Duration::from_secs(0),
&state, &state,
PayloadVerificationStatus::Optimistic, PayloadVerificationStatus::Optimistic,
rig.harness.chain.config.progressive_balances_mode,
&rig.harness.chain.spec, &rig.harness.chain.spec,
rig.harness.logger()
), ),
Err(ForkChoiceError::ProtoArrayStringError(message)) Err(ForkChoiceError::ProtoArrayStringError(message))
if message.contains(&format!( if message.contains(&format!(
@ -1341,7 +1346,12 @@ async fn build_optimistic_chain(
for block in blocks { for block in blocks {
rig.harness rig.harness
.chain .chain
.process_block(block.canonical_root(), block, NotifyExecutionLayer::Yes) .process_block(
block.canonical_root(),
block,
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await .await
.unwrap(); .unwrap();
} }
@ -1901,6 +1911,7 @@ async fn recover_from_invalid_head_by_importing_blocks() {
fork_block.canonical_root(), fork_block.canonical_root(),
fork_block.clone(), fork_block.clone(),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.unwrap(); .unwrap();

View File

@ -2178,6 +2178,7 @@ async fn weak_subjectivity_sync() {
full_block.canonical_root(), full_block.canonical_root(),
BlockWrapper::new(Arc::new(full_block), blobs), BlockWrapper::new(Arc::new(full_block), blobs),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.unwrap(); .unwrap();

View File

@ -686,6 +686,7 @@ async fn run_skip_slot_test(skip_slots: u64) {
harness_a.chain.head_snapshot().beacon_block_root, harness_a.chain.head_snapshot().beacon_block_root,
harness_a.get_head_block(), harness_a.get_head_block(),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.unwrap(); .unwrap();

View File

@ -0,0 +1,24 @@
[package]
name = "beacon_processor"
version = "0.1.0"
edition = "2021"
[dependencies]
slog = { version = "2.5.2", features = ["max_level_trace"] }
itertools = "0.10.0"
logging = { path = "../../common/logging" }
tokio = { version = "1.14.0", features = ["full"] }
tokio-util = { version = "0.6.3", features = ["time"] }
futures = "0.3.7"
fnv = "1.0.7"
strum = "0.24.0"
task_executor = { path = "../../common/task_executor" }
slot_clock = { path = "../../common/slot_clock" }
lighthouse_network = { path = "../lighthouse_network" }
hex = "0.4.2"
derivative = "2.2.0"
types = { path = "../../consensus/types" }
ethereum_ssz = "0.5.0"
lazy_static = "1.4.0"
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
parking_lot = "0.12.0"

View File

@ -0,0 +1,151 @@
pub use lighthouse_metrics::*;
lazy_static::lazy_static! {
/*
* Gossip processor
*/
pub static ref BEACON_PROCESSOR_WORK_EVENTS_RX_COUNT: Result<IntCounterVec> = try_create_int_counter_vec(
"beacon_processor_work_events_rx_count",
"Count of work events received (but not necessarily processed)",
&["type"]
);
pub static ref BEACON_PROCESSOR_WORK_EVENTS_IGNORED_COUNT: Result<IntCounterVec> = try_create_int_counter_vec(
"beacon_processor_work_events_ignored_count",
"Count of work events purposefully ignored",
&["type"]
);
pub static ref BEACON_PROCESSOR_WORK_EVENTS_STARTED_COUNT: Result<IntCounterVec> = try_create_int_counter_vec(
"beacon_processor_work_events_started_count",
"Count of work events which have been started by a worker",
&["type"]
);
pub static ref BEACON_PROCESSOR_WORKER_TIME: Result<HistogramVec> = try_create_histogram_vec(
"beacon_processor_worker_time",
"Time taken for a worker to fully process some parcel of work.",
&["type"]
);
pub static ref BEACON_PROCESSOR_WORKERS_SPAWNED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_workers_spawned_total",
"The number of workers ever spawned by the gossip processing pool."
);
pub static ref BEACON_PROCESSOR_WORKERS_ACTIVE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_workers_active_total",
"Count of active workers in the gossip processing pool."
);
pub static ref BEACON_PROCESSOR_IDLE_EVENTS_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_idle_events_total",
"Count of idle events processed by the gossip processor manager."
);
pub static ref BEACON_PROCESSOR_EVENT_HANDLING_SECONDS: Result<Histogram> = try_create_histogram(
"beacon_processor_event_handling_seconds",
"Time spent handling a new message and allocating it to a queue or worker."
);
// Gossip blocks.
pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_gossip_block_queue_total",
"Count of blocks from gossip waiting to be verified."
);
// Gossip blobs.
pub static ref BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_gossip_blob_queue_total",
"Count of blocks from gossip waiting to be verified."
);
// Gossip Exits.
pub static ref BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_exit_queue_total",
"Count of exits from gossip waiting to be verified."
);
// Gossip proposer slashings.
pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_proposer_slashing_queue_total",
"Count of proposer slashings from gossip waiting to be verified."
);
// Gossip attester slashings.
pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_attester_slashing_queue_total",
"Count of attester slashings from gossip waiting to be verified."
);
// Gossip BLS to execution changes.
pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_bls_to_execution_change_queue_total",
"Count of address changes from gossip waiting to be verified."
);
// Rpc blocks.
pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_rpc_block_queue_total",
"Count of blocks from the rpc waiting to be verified."
);
// Rpc blobs.
pub static ref BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_rpc_blob_queue_total",
"Count of blobs from the rpc waiting to be verified."
);
// Chain segments.
pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_chain_segment_queue_total",
"Count of chain segments from the rpc waiting to be verified."
);
pub static ref BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_backfill_chain_segment_queue_total",
"Count of backfill chain segments from the rpc waiting to be verified."
);
// Unaggregated attestations.
pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_unaggregated_attestation_queue_total",
"Count of unagg. attestations waiting to be processed."
);
// Aggregated attestations.
pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_aggregated_attestation_queue_total",
"Count of agg. attestations waiting to be processed."
);
// Sync committee messages.
pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_sync_message_queue_total",
"Count of sync committee messages waiting to be processed."
);
// Sync contribution.
pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_sync_contribution_queue_total",
"Count of sync committee contributions waiting to be processed."
);
/*
* Attestation reprocessing queue metrics.
*/
pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL: Result<IntGaugeVec> =
try_create_int_gauge_vec(
"beacon_processor_reprocessing_queue_total",
"Count of items in a reprocessing queue.",
&["type"]
);
pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS: Result<IntCounter> = try_create_int_counter(
"beacon_processor_reprocessing_queue_expired_attestations",
"Number of queued attestations which have expired before a matching block has been found."
);
pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS: Result<IntCounter> = try_create_int_counter(
"beacon_processor_reprocessing_queue_matched_attestations",
"Number of queued attestations where as matching block has been imported."
);
/*
* Light client update reprocessing queue metrics.
*/
pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES: Result<IntCounter> = try_create_int_counter(
"beacon_processor_reprocessing_queue_expired_optimistic_updates",
"Number of queued light client optimistic updates which have expired before a matching block has been found."
);
pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES: Result<IntCounter> = try_create_int_counter(
"beacon_processor_reprocessing_queue_matched_optimistic_updates",
"Number of queued light client optimistic updates where as matching block has been imported."
);
/// Errors and Debugging Stats
pub static ref BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE: Result<IntCounterVec> =
try_create_int_counter_vec(
"beacon_processor_send_error_per_work_type",
"Total number of beacon processor send error per work type",
&["type"]
);
}

View File

@ -10,17 +10,12 @@
//! //!
//! Aggregated and unaggregated attestations that failed verification due to referencing an unknown //! Aggregated and unaggregated attestations that failed verification due to referencing an unknown
//! block will be re-queued until their block is imported, or until they expire. //! block will be re-queued until their block is imported, or until they expire.
use super::MAX_SCHEDULED_WORK_QUEUE_LEN;
use crate::beacon_processor::{ChainSegmentProcessId, Work, WorkEvent};
use crate::metrics; use crate::metrics;
use crate::sync::manager::BlockProcessType; use crate::{AsyncFn, BlockingFn, Work, WorkEvent};
use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY};
use fnv::FnvHashMap; use fnv::FnvHashMap;
use futures::task::Poll; use futures::task::Poll;
use futures::{Stream, StreamExt}; use futures::{Stream, StreamExt};
use itertools::Itertools; use itertools::Itertools;
use lighthouse_network::{MessageId, PeerId};
use logging::TimeLatch; use logging::TimeLatch;
use slog::{crit, debug, error, trace, warn, Logger}; use slog::{crit, debug, error, trace, warn, Logger};
use slot_clock::SlotClock; use slot_clock::SlotClock;
@ -34,9 +29,7 @@ use task_executor::TaskExecutor;
use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::sync::mpsc::{self, Receiver, Sender};
use tokio::time::error::Error as TimeError; use tokio::time::error::Error as TimeError;
use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey};
use types::{ use types::{EthSpec, Hash256, Slot};
Attestation, EthSpec, Hash256, LightClientOptimisticUpdate, SignedAggregateAndProof, SubnetId,
};
const TASK_NAME: &str = "beacon_processor_reprocess_queue"; const TASK_NAME: &str = "beacon_processor_reprocess_queue";
const GOSSIP_BLOCKS: &str = "gossip_blocks"; const GOSSIP_BLOCKS: &str = "gossip_blocks";
@ -46,7 +39,7 @@ const LIGHT_CLIENT_UPDATES: &str = "lc_updates";
/// Queue blocks for re-processing with an `ADDITIONAL_QUEUED_BLOCK_DELAY` after the slot starts. /// Queue blocks for re-processing with an `ADDITIONAL_QUEUED_BLOCK_DELAY` after the slot starts.
/// This is to account for any slight drift in the system clock. /// This is to account for any slight drift in the system clock.
const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); pub const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5);
/// For how long to queue aggregated and unaggregated attestations for re-processing. /// For how long to queue aggregated and unaggregated attestations for re-processing.
pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12);
@ -83,12 +76,12 @@ pub const BACKFILL_SCHEDULE_IN_SLOT: [(u32, u32); 3] = [
/// Messages that the scheduler can receive. /// Messages that the scheduler can receive.
#[derive(AsRefStr)] #[derive(AsRefStr)]
pub enum ReprocessQueueMessage<T: BeaconChainTypes> { pub enum ReprocessQueueMessage {
/// A block that has been received early and we should queue for later processing. /// A block that has been received early and we should queue for later processing.
EarlyBlock(QueuedGossipBlock<T>), EarlyBlock(QueuedGossipBlock),
/// A gossip block for hash `X` is being imported, we should queue the rpc block for the same /// A gossip block for hash `X` is being imported, we should queue the rpc block for the same
/// hash until the gossip block is imported. /// hash until the gossip block is imported.
RpcBlock(QueuedRpcBlock<T::EthSpec>), RpcBlock(QueuedRpcBlock),
/// A block that was successfully processed. We use this to handle attestations and light client updates /// A block that was successfully processed. We use this to handle attestations and light client updates
/// for unknown blocks. /// for unknown blocks.
BlockImported { BlockImported {
@ -96,139 +89,127 @@ pub enum ReprocessQueueMessage<T: BeaconChainTypes> {
parent_root: Hash256, parent_root: Hash256,
}, },
/// An unaggregated attestation that references an unknown block. /// An unaggregated attestation that references an unknown block.
UnknownBlockUnaggregate(QueuedUnaggregate<T::EthSpec>), UnknownBlockUnaggregate(QueuedUnaggregate),
/// An aggregated attestation that references an unknown block. /// An aggregated attestation that references an unknown block.
UnknownBlockAggregate(QueuedAggregate<T::EthSpec>), UnknownBlockAggregate(QueuedAggregate),
/// A light client optimistic update that references a parent root that has not been seen as a parent. /// A light client optimistic update that references a parent root that has not been seen as a parent.
UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate<T::EthSpec>), UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate),
/// A new backfill batch that needs to be scheduled for processing. /// A new backfill batch that needs to be scheduled for processing.
BackfillSync(QueuedBackfillBatch<T::EthSpec>), BackfillSync(QueuedBackfillBatch),
} }
/// Events sent by the scheduler once they are ready for re-processing. /// Events sent by the scheduler once they are ready for re-processing.
pub enum ReadyWork<T: BeaconChainTypes> { pub enum ReadyWork {
GossipBlock(QueuedGossipBlock<T>), Block(QueuedGossipBlock),
RpcBlock(QueuedRpcBlock<T::EthSpec>), RpcBlock(QueuedRpcBlock),
Unaggregate(QueuedUnaggregate<T::EthSpec>), IgnoredRpcBlock(IgnoredRpcBlock),
Aggregate(QueuedAggregate<T::EthSpec>), Unaggregate(QueuedUnaggregate),
LightClientUpdate(QueuedLightClientUpdate<T::EthSpec>), Aggregate(QueuedAggregate),
BackfillSync(QueuedBackfillBatch<T::EthSpec>), LightClientUpdate(QueuedLightClientUpdate),
BackfillSync(QueuedBackfillBatch),
} }
/// An Attestation for which the corresponding block was not seen while processing, queued for /// An Attestation for which the corresponding block was not seen while processing, queued for
/// later. /// later.
pub struct QueuedUnaggregate<T: EthSpec> { pub struct QueuedUnaggregate {
pub peer_id: PeerId, pub beacon_block_root: Hash256,
pub message_id: MessageId, pub process_fn: BlockingFn,
pub attestation: Box<Attestation<T>>,
pub subnet_id: SubnetId,
pub should_import: bool,
pub seen_timestamp: Duration,
} }
/// An aggregated attestation for which the corresponding block was not seen while processing, queued for /// An aggregated attestation for which the corresponding block was not seen while processing, queued for
/// later. /// later.
pub struct QueuedAggregate<T: EthSpec> { pub struct QueuedAggregate {
pub peer_id: PeerId, pub beacon_block_root: Hash256,
pub message_id: MessageId, pub process_fn: BlockingFn,
pub attestation: Box<SignedAggregateAndProof<T>>,
pub seen_timestamp: Duration,
} }
/// A light client update for which the corresponding parent block was not seen while processing, /// A light client update for which the corresponding parent block was not seen while processing,
/// queued for later. /// queued for later.
pub struct QueuedLightClientUpdate<T: EthSpec> { pub struct QueuedLightClientUpdate {
pub peer_id: PeerId,
pub message_id: MessageId,
pub light_client_optimistic_update: Box<LightClientOptimisticUpdate<T>>,
pub parent_root: Hash256, pub parent_root: Hash256,
pub seen_timestamp: Duration, pub process_fn: BlockingFn,
} }
/// A block that arrived early and has been queued for later import. /// A block that arrived early and has been queued for later import.
pub struct QueuedGossipBlock<T: BeaconChainTypes> { pub struct QueuedGossipBlock {
pub peer_id: PeerId, pub beacon_block_slot: Slot,
pub block: Box<GossipVerifiedBlock<T>>, pub beacon_block_root: Hash256,
pub seen_timestamp: Duration, pub process_fn: AsyncFn,
} }
/// A block that arrived for processing when the same block was being imported over gossip. /// A block that arrived for processing when the same block was being imported over gossip.
/// It is queued for later import. /// It is queued for later import.
pub struct QueuedRpcBlock<T: EthSpec> { pub struct QueuedRpcBlock {
pub block_root: Hash256, pub beacon_block_root: Hash256,
pub block: BlockWrapper<T>, /// Processes/imports the block.
pub process_type: BlockProcessType, pub process_fn: AsyncFn,
pub seen_timestamp: Duration, /// Ignores the block.
/// Indicates if the beacon chain should process this block or not. pub ignore_fn: BlockingFn,
/// We use this to ignore block processing when rpc block queues are full. }
pub should_process: bool,
/// A block that arrived for processing when the same block was being imported over gossip.
/// It is queued for later import.
pub struct IgnoredRpcBlock {
pub process_fn: BlockingFn,
} }
/// A backfill batch work that has been queued for processing later. /// A backfill batch work that has been queued for processing later.
#[derive(Clone)] pub struct QueuedBackfillBatch(pub AsyncFn);
pub struct QueuedBackfillBatch<E: EthSpec> {
pub process_id: ChainSegmentProcessId,
pub blocks: Vec<BlockWrapper<E>>,
}
impl<T: BeaconChainTypes> TryFrom<WorkEvent<T>> for QueuedBackfillBatch<T::EthSpec> { impl<T: EthSpec> TryFrom<WorkEvent<T>> for QueuedBackfillBatch {
type Error = WorkEvent<T>; type Error = WorkEvent<T>;
fn try_from(event: WorkEvent<T>) -> Result<Self, WorkEvent<T>> { fn try_from(event: WorkEvent<T>) -> Result<Self, WorkEvent<T>> {
match event { match event {
WorkEvent { WorkEvent {
work: work: Work::ChainSegmentBackfill(process_fn),
Work::ChainSegment {
process_id: process_id @ ChainSegmentProcessId::BackSyncBatchId(_),
blocks,
},
.. ..
} => Ok(QueuedBackfillBatch { process_id, blocks }), } => Ok(QueuedBackfillBatch(process_fn)),
_ => Err(event), _ => Err(event),
} }
} }
} }
impl<T: BeaconChainTypes> From<QueuedBackfillBatch<T::EthSpec>> for WorkEvent<T> { impl<T: EthSpec> From<QueuedBackfillBatch> for WorkEvent<T> {
fn from(queued_backfill_batch: QueuedBackfillBatch<T::EthSpec>) -> WorkEvent<T> { fn from(queued_backfill_batch: QueuedBackfillBatch) -> WorkEvent<T> {
WorkEvent::chain_segment( WorkEvent {
queued_backfill_batch.process_id, drop_during_sync: false,
queued_backfill_batch.blocks, work: Work::ChainSegmentBackfill(queued_backfill_batch.0),
) }
} }
} }
/// Unifies the different messages processed by the block delay queue. /// Unifies the different messages processed by the block delay queue.
enum InboundEvent<T: BeaconChainTypes> { enum InboundEvent {
/// A gossip block that was queued for later processing and is ready for import. /// A gossip block that was queued for later processing and is ready for import.
ReadyGossipBlock(QueuedGossipBlock<T>), ReadyGossipBlock(QueuedGossipBlock),
/// A rpc block that was queued because the same gossip block was being imported /// A rpc block that was queued because the same gossip block was being imported
/// will now be retried for import. /// will now be retried for import.
ReadyRpcBlock(QueuedRpcBlock<T::EthSpec>), ReadyRpcBlock(QueuedRpcBlock),
/// An aggregated or unaggregated attestation is ready for re-processing. /// An aggregated or unaggregated attestation is ready for re-processing.
ReadyAttestation(QueuedAttestationId), ReadyAttestation(QueuedAttestationId),
/// A light client update that is ready for re-processing. /// A light client update that is ready for re-processing.
ReadyLightClientUpdate(QueuedLightClientUpdateId), ReadyLightClientUpdate(QueuedLightClientUpdateId),
/// A backfill batch that was queued is ready for processing. /// A backfill batch that was queued is ready for processing.
ReadyBackfillSync(QueuedBackfillBatch<T::EthSpec>), ReadyBackfillSync(QueuedBackfillBatch),
/// A `DelayQueue` returned an error. /// A `DelayQueue` returned an error.
DelayQueueError(TimeError, &'static str), DelayQueueError(TimeError, &'static str),
/// A message sent to the `ReprocessQueue` /// A message sent to the `ReprocessQueue`
Msg(ReprocessQueueMessage<T>), Msg(ReprocessQueueMessage),
} }
/// Manages scheduling works that need to be later re-processed. /// Manages scheduling works that need to be later re-processed.
struct ReprocessQueue<T: BeaconChainTypes> { struct ReprocessQueue<S> {
/// Receiver of messages relevant to schedule works for reprocessing. /// Receiver of messages relevant to schedule works for reprocessing.
work_reprocessing_rx: Receiver<ReprocessQueueMessage<T>>, work_reprocessing_rx: Receiver<ReprocessQueueMessage>,
/// Sender of works once they become ready /// Sender of works once they become ready
ready_work_tx: Sender<ReadyWork<T>>, ready_work_tx: Sender<ReadyWork>,
/* Queues */ /* Queues */
/// Queue to manage scheduled early blocks. /// Queue to manage scheduled early blocks.
gossip_block_delay_queue: DelayQueue<QueuedGossipBlock<T>>, gossip_block_delay_queue: DelayQueue<QueuedGossipBlock>,
/// Queue to manage scheduled early blocks. /// Queue to manage scheduled early blocks.
rpc_block_delay_queue: DelayQueue<QueuedRpcBlock<T::EthSpec>>, rpc_block_delay_queue: DelayQueue<QueuedRpcBlock>,
/// Queue to manage scheduled attestations. /// Queue to manage scheduled attestations.
attestations_delay_queue: DelayQueue<QueuedAttestationId>, attestations_delay_queue: DelayQueue<QueuedAttestationId>,
/// Queue to manage scheduled light client updates. /// Queue to manage scheduled light client updates.
@ -238,17 +219,17 @@ struct ReprocessQueue<T: BeaconChainTypes> {
/// Queued blocks. /// Queued blocks.
queued_gossip_block_roots: HashSet<Hash256>, queued_gossip_block_roots: HashSet<Hash256>,
/// Queued aggregated attestations. /// Queued aggregated attestations.
queued_aggregates: FnvHashMap<usize, (QueuedAggregate<T::EthSpec>, DelayKey)>, queued_aggregates: FnvHashMap<usize, (QueuedAggregate, DelayKey)>,
/// Queued attestations. /// Queued attestations.
queued_unaggregates: FnvHashMap<usize, (QueuedUnaggregate<T::EthSpec>, DelayKey)>, queued_unaggregates: FnvHashMap<usize, (QueuedUnaggregate, DelayKey)>,
/// Attestations (aggregated and unaggregated) per root. /// Attestations (aggregated and unaggregated) per root.
awaiting_attestations_per_root: HashMap<Hash256, Vec<QueuedAttestationId>>, awaiting_attestations_per_root: HashMap<Hash256, Vec<QueuedAttestationId>>,
/// Queued Light Client Updates. /// Queued Light Client Updates.
queued_lc_updates: FnvHashMap<usize, (QueuedLightClientUpdate<T::EthSpec>, DelayKey)>, queued_lc_updates: FnvHashMap<usize, (QueuedLightClientUpdate, DelayKey)>,
/// Light Client Updates per parent_root. /// Light Client Updates per parent_root.
awaiting_lc_updates_per_parent_root: HashMap<Hash256, Vec<QueuedLightClientUpdateId>>, awaiting_lc_updates_per_parent_root: HashMap<Hash256, Vec<QueuedLightClientUpdateId>>,
/// Queued backfill batches /// Queued backfill batches
queued_backfill_batches: Vec<QueuedBackfillBatch<T::EthSpec>>, queued_backfill_batches: Vec<QueuedBackfillBatch>,
/* Aux */ /* Aux */
/// Next attestation id, used for both aggregated and unaggregated attestations /// Next attestation id, used for both aggregated and unaggregated attestations
@ -259,7 +240,7 @@ struct ReprocessQueue<T: BeaconChainTypes> {
attestation_delay_debounce: TimeLatch, attestation_delay_debounce: TimeLatch,
lc_update_delay_debounce: TimeLatch, lc_update_delay_debounce: TimeLatch,
next_backfill_batch_event: Option<Pin<Box<tokio::time::Sleep>>>, next_backfill_batch_event: Option<Pin<Box<tokio::time::Sleep>>>,
slot_clock: Pin<Box<T::SlotClock>>, slot_clock: Pin<Box<S>>,
} }
pub type QueuedLightClientUpdateId = usize; pub type QueuedLightClientUpdateId = usize;
@ -270,20 +251,20 @@ enum QueuedAttestationId {
Unaggregate(usize), Unaggregate(usize),
} }
impl<T: EthSpec> QueuedAggregate<T> { impl QueuedAggregate {
pub fn beacon_block_root(&self) -> &Hash256 { pub fn beacon_block_root(&self) -> &Hash256 {
&self.attestation.message.aggregate.data.beacon_block_root &self.beacon_block_root
} }
} }
impl<T: EthSpec> QueuedUnaggregate<T> { impl QueuedUnaggregate {
pub fn beacon_block_root(&self) -> &Hash256 { pub fn beacon_block_root(&self) -> &Hash256 {
&self.attestation.data.beacon_block_root &self.beacon_block_root
} }
} }
impl<T: BeaconChainTypes> Stream for ReprocessQueue<T> { impl<S: SlotClock> Stream for ReprocessQueue<S> {
type Item = InboundEvent<T>; type Item = InboundEvent;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
// NOTE: implementing `Stream` is not necessary but allows to maintain the future selection // NOTE: implementing `Stream` is not necessary but allows to maintain the future selection
@ -374,16 +355,13 @@ impl<T: BeaconChainTypes> Stream for ReprocessQueue<T> {
/// Starts the job that manages scheduling works that need re-processing. The returned `Sender` /// Starts the job that manages scheduling works that need re-processing. The returned `Sender`
/// gives the communicating channel to receive those works. Once a work is ready, it is sent back /// gives the communicating channel to receive those works. Once a work is ready, it is sent back
/// via `ready_work_tx`. /// via `ready_work_tx`.
pub fn spawn_reprocess_scheduler<T: BeaconChainTypes>( pub fn spawn_reprocess_scheduler<S: SlotClock + 'static>(
ready_work_tx: Sender<ReadyWork<T>>, ready_work_tx: Sender<ReadyWork>,
work_reprocessing_rx: Receiver<ReprocessQueueMessage>,
executor: &TaskExecutor, executor: &TaskExecutor,
slot_clock: T::SlotClock, slot_clock: S,
log: Logger, log: Logger,
) -> Sender<ReprocessQueueMessage<T>> { ) {
let (work_reprocessing_tx, work_reprocessing_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN);
// Basic sanity check.
assert!(ADDITIONAL_QUEUED_BLOCK_DELAY < MAXIMUM_GOSSIP_CLOCK_DISPARITY);
let mut queue = ReprocessQueue { let mut queue = ReprocessQueue {
work_reprocessing_rx, work_reprocessing_rx,
ready_work_tx, ready_work_tx,
@ -422,19 +400,17 @@ pub fn spawn_reprocess_scheduler<T: BeaconChainTypes>(
}, },
TASK_NAME, TASK_NAME,
); );
work_reprocessing_tx
} }
impl<T: BeaconChainTypes> ReprocessQueue<T> { impl<S: SlotClock> ReprocessQueue<S> {
fn handle_message(&mut self, msg: InboundEvent<T>, slot_clock: &T::SlotClock, log: &Logger) { fn handle_message(&mut self, msg: InboundEvent, slot_clock: &S, log: &Logger) {
use ReprocessQueueMessage::*; use ReprocessQueueMessage::*;
match msg { match msg {
// Some block has been indicated as "early" and should be processed when the // Some block has been indicated as "early" and should be processed when the
// appropriate slot arrives. // appropriate slot arrives.
InboundEvent::Msg(EarlyBlock(early_block)) => { InboundEvent::Msg(EarlyBlock(early_block)) => {
let block_slot = early_block.block.block.slot(); let block_slot = early_block.beacon_block_slot;
let block_root = early_block.block.block_root; let block_root = early_block.beacon_block_root;
// Don't add the same block to the queue twice. This prevents DoS attacks. // Don't add the same block to the queue twice. This prevents DoS attacks.
if self.queued_gossip_block_roots.contains(&block_root) { if self.queued_gossip_block_roots.contains(&block_root) {
@ -478,7 +454,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> {
if block_slot <= now if block_slot <= now
&& self && self
.ready_work_tx .ready_work_tx
.try_send(ReadyWork::GossipBlock(early_block)) .try_send(ReadyWork::Block(early_block))
.is_err() .is_err()
{ {
error!( error!(
@ -493,7 +469,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> {
// for the same block hash is being imported. We wait for `QUEUED_RPC_BLOCK_DELAY` // for the same block hash is being imported. We wait for `QUEUED_RPC_BLOCK_DELAY`
// and then send the rpc block back for processing assuming the gossip import // and then send the rpc block back for processing assuming the gossip import
// has completed by then. // has completed by then.
InboundEvent::Msg(RpcBlock(mut rpc_block)) => { InboundEvent::Msg(RpcBlock(rpc_block)) => {
// Check to ensure this won't over-fill the queue. // Check to ensure this won't over-fill the queue.
if self.rpc_block_delay_queue.len() >= MAXIMUM_QUEUED_BLOCKS { if self.rpc_block_delay_queue.len() >= MAXIMUM_QUEUED_BLOCKS {
if self.rpc_block_debounce.elapsed() { if self.rpc_block_debounce.elapsed() {
@ -506,10 +482,11 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> {
} }
// Return the block to the beacon processor signalling to // Return the block to the beacon processor signalling to
// ignore processing for this block // ignore processing for this block
rpc_block.should_process = false;
if self if self
.ready_work_tx .ready_work_tx
.try_send(ReadyWork::RpcBlock(rpc_block)) .try_send(ReadyWork::IgnoredRpcBlock(IgnoredRpcBlock {
process_fn: rpc_block.ignore_fn,
}))
.is_err() .is_err()
{ {
error!( error!(
@ -528,7 +505,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> {
debug!( debug!(
log, log,
"Sending rpc block for reprocessing"; "Sending rpc block for reprocessing";
"block_root" => %queued_rpc_block.block_root "block_root" => %queued_rpc_block.beacon_block_root
); );
if self if self
.ready_work_tx .ready_work_tx
@ -766,7 +743,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> {
} }
// A block that was queued for later processing is now ready to be processed. // A block that was queued for later processing is now ready to be processed.
InboundEvent::ReadyGossipBlock(ready_block) => { InboundEvent::ReadyGossipBlock(ready_block) => {
let block_root = ready_block.block.block_root; let block_root = ready_block.beacon_block_root;
if !self.queued_gossip_block_roots.remove(&block_root) { if !self.queued_gossip_block_roots.remove(&block_root) {
// Log an error to alert that we've made a bad assumption about how this // Log an error to alert that we've made a bad assumption about how this
@ -780,7 +757,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> {
if self if self
.ready_work_tx .ready_work_tx
.try_send(ReadyWork::GossipBlock(ready_block)) .try_send(ReadyWork::Block(ready_block))
.is_err() .is_err()
{ {
error!( error!(
@ -884,18 +861,28 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> {
"millis_from_slot_start" => millis_from_slot_start "millis_from_slot_start" => millis_from_slot_start
); );
if self match self
.ready_work_tx .ready_work_tx
.try_send(ReadyWork::BackfillSync(queued_backfill_batch.clone())) .try_send(ReadyWork::BackfillSync(queued_backfill_batch))
.is_err()
{ {
error!( // The message was sent successfully.
Ok(()) => (),
// The message was not sent, recover it from the returned `Err`.
Err(mpsc::error::TrySendError::Full(ReadyWork::BackfillSync(batch)))
| Err(mpsc::error::TrySendError::Closed(ReadyWork::BackfillSync(batch))) => {
error!(
log,
"Failed to send scheduled backfill work";
"info" => "sending work back to queue"
);
self.queued_backfill_batches.insert(0, batch)
}
// The message was not sent and we didn't get the correct
// return result. This is a logic error.
_ => crit!(
log, log,
"Failed to send scheduled backfill work"; "Unexpected return from try_send error";
"info" => "sending work back to queue" ),
);
self.queued_backfill_batches
.insert(0, queued_backfill_batch);
} }
} }
} }
@ -926,7 +913,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> {
// only recompute the `next_backfill_batch_event` if there are backfill batches in the queue // only recompute the `next_backfill_batch_event` if there are backfill batches in the queue
if !self.queued_backfill_batches.is_empty() { if !self.queued_backfill_batches.is_empty() {
self.next_backfill_batch_event = Some(Box::pin(tokio::time::sleep( self.next_backfill_batch_event = Some(Box::pin(tokio::time::sleep(
ReprocessQueue::<T>::duration_until_next_backfill_batch_event(&self.slot_clock), ReprocessQueue::<S>::duration_until_next_backfill_batch_event(&self.slot_clock),
))); )));
} else { } else {
self.next_backfill_batch_event = None self.next_backfill_batch_event = None
@ -935,7 +922,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> {
/// Returns duration until the next scheduled processing time. The schedule ensure that backfill /// Returns duration until the next scheduled processing time. The schedule ensure that backfill
/// processing is done in windows of time that aren't critical /// processing is done in windows of time that aren't critical
fn duration_until_next_backfill_batch_event(slot_clock: &T::SlotClock) -> Duration { fn duration_until_next_backfill_batch_event(slot_clock: &S) -> Duration {
let slot_duration = slot_clock.slot_duration(); let slot_duration = slot_clock.slot_duration();
slot_clock slot_clock
.millis_from_current_slot_start() .millis_from_current_slot_start()
@ -965,16 +952,9 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use beacon_chain::builder::Witness;
use beacon_chain::eth1_chain::CachingEth1Backend;
use slot_clock::TestingSlotClock; use slot_clock::TestingSlotClock;
use store::MemoryStore;
use types::MainnetEthSpec as E;
use types::Slot; use types::Slot;
type TestBeaconChainType =
Witness<TestingSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
#[test] #[test]
fn backfill_processing_schedule_calculation() { fn backfill_processing_schedule_calculation() {
let slot_duration = Duration::from_secs(12); let slot_duration = Duration::from_secs(12);
@ -987,7 +967,7 @@ mod tests {
for &event_duration_from_slot_start in event_times.iter() { for &event_duration_from_slot_start in event_times.iter() {
let duration_to_next_event = let duration_to_next_event =
ReprocessQueue::<TestBeaconChainType>::duration_until_next_backfill_batch_event( ReprocessQueue::<TestingSlotClock>::duration_until_next_backfill_batch_event(
&slot_clock, &slot_clock,
); );
@ -1004,7 +984,7 @@ mod tests {
// check for next event beyond the current slot // check for next event beyond the current slot
let duration_to_next_slot = slot_clock.duration_to_next_slot().unwrap(); let duration_to_next_slot = slot_clock.duration_to_next_slot().unwrap();
let duration_to_next_event = let duration_to_next_event =
ReprocessQueue::<TestBeaconChainType>::duration_until_next_backfill_batch_event( ReprocessQueue::<TestingSlotClock>::duration_until_next_backfill_batch_event(
&slot_clock, &slot_clock,
); );
assert_eq!( assert_eq!(

View File

@ -1,7 +1,7 @@
use eth2::types::builder_bid::SignedBuilderBid; use eth2::types::builder_bid::SignedBuilderBid;
use eth2::types::{ use eth2::types::{
AbstractExecPayload, BlindedPayload, EthSpec, ExecutionBlockHash, ExecutionPayload, AbstractExecPayload, BlindedPayload, EthSpec, ExecutionBlockHash, ExecutionPayload,
ForkVersionedResponse, PublicKeyBytes, SignedBeaconBlock, SignedValidatorRegistrationData, ForkVersionedResponse, PublicKeyBytes, SignedBlockContents, SignedValidatorRegistrationData,
Slot, Slot,
}; };
pub use eth2::Error; pub use eth2::Error;
@ -140,7 +140,7 @@ impl BuilderHttpClient {
/// `POST /eth/v1/builder/blinded_blocks` /// `POST /eth/v1/builder/blinded_blocks`
pub async fn post_builder_blinded_blocks<E: EthSpec>( pub async fn post_builder_blinded_blocks<E: EthSpec>(
&self, &self,
blinded_block: &SignedBeaconBlock<E, BlindedPayload<E>>, blinded_block: &SignedBlockContents<E, BlindedPayload<E>>,
) -> Result<ForkVersionedResponse<ExecutionPayload<E>>, Error> { ) -> Result<ForkVersionedResponse<ExecutionPayload<E>>, Error> {
let mut path = self.server.full.clone(); let mut path = self.server.full.clone();

View File

@ -43,3 +43,5 @@ slasher = { path = "../../slasher" }
slasher_service = { path = "../../slasher/service" } slasher_service = { path = "../../slasher/service" }
monitoring_api = {path = "../../common/monitoring_api"} monitoring_api = {path = "../../common/monitoring_api"}
execution_layer = { path = "../execution_layer" } execution_layer = { path = "../execution_layer" }
beacon_processor = { path = "../beacon_processor" }
num_cpus = "1.13.0"

View File

@ -12,7 +12,11 @@ use beacon_chain::{
slot_clock::{SlotClock, SystemTimeSlotClock}, slot_clock::{SlotClock, SystemTimeSlotClock},
state_advance_timer::spawn_state_advance_timer, state_advance_timer::spawn_state_advance_timer,
store::{HotColdDB, ItemStore, LevelDB, StoreConfig}, store::{HotColdDB, ItemStore, LevelDB, StoreConfig},
BeaconChain, BeaconChainTypes, Eth1ChainBackend, ServerSentEventHandler, BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler,
};
use beacon_processor::{
work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessor, BeaconProcessorSend,
WorkEvent, MAX_SCHEDULED_WORK_QUEUE_LEN, MAX_WORK_EVENT_QUEUE_LEN,
}; };
use environment::RuntimeContext; use environment::RuntimeContext;
use eth1::{Config as Eth1Config, Service as Eth1Service}; use eth1::{Config as Eth1Config, Service as Eth1Service};
@ -29,12 +33,13 @@ use slasher::Slasher;
use slasher_service::SlasherService; use slasher_service::SlasherService;
use slog::{debug, info, warn, Logger}; use slog::{debug, info, warn, Logger};
use state_processing::per_slot_processing; use state_processing::per_slot_processing;
use std::cmp;
use std::net::TcpListener; use std::net::TcpListener;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use timer::spawn_timer; use timer::spawn_timer;
use tokio::sync::oneshot; use tokio::sync::{mpsc, oneshot};
use types::{ use types::{
test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec, test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec,
ExecutionBlockHash, Hash256, SignedBeaconBlock, ExecutionBlockHash, Hash256, SignedBeaconBlock,
@ -75,6 +80,10 @@ pub struct ClientBuilder<T: BeaconChainTypes> {
http_metrics_config: http_metrics::Config, http_metrics_config: http_metrics::Config,
slasher: Option<Arc<Slasher<T::EthSpec>>>, slasher: Option<Arc<Slasher<T::EthSpec>>>,
eth_spec_instance: T::EthSpec, eth_spec_instance: T::EthSpec,
beacon_processor_send: BeaconProcessorSend<T::EthSpec>,
beacon_processor_receive: mpsc::Receiver<WorkEvent<T::EthSpec>>,
work_reprocessing_tx: mpsc::Sender<ReprocessQueueMessage>,
work_reprocessing_rx: mpsc::Receiver<ReprocessQueueMessage>,
} }
impl<TSlotClock, TEth1Backend, TEthSpec, THotStore, TColdStore> impl<TSlotClock, TEth1Backend, TEthSpec, THotStore, TColdStore>
@ -90,6 +99,10 @@ where
/// ///
/// The `eth_spec_instance` parameter is used to concretize `TEthSpec`. /// The `eth_spec_instance` parameter is used to concretize `TEthSpec`.
pub fn new(eth_spec_instance: TEthSpec) -> Self { pub fn new(eth_spec_instance: TEthSpec) -> Self {
let (beacon_processor_send, beacon_processor_receive) =
mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN);
let (work_reprocessing_tx, work_reprocessing_rx) =
mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN);
Self { Self {
slot_clock: None, slot_clock: None,
store: None, store: None,
@ -108,6 +121,10 @@ where
http_metrics_config: <_>::default(), http_metrics_config: <_>::default(),
slasher: None, slasher: None,
eth_spec_instance, eth_spec_instance,
beacon_processor_send: BeaconProcessorSend(beacon_processor_send),
beacon_processor_receive,
work_reprocessing_tx,
work_reprocessing_rx,
} }
} }
@ -171,6 +188,9 @@ where
.store(store) .store(store)
.task_executor(context.executor.clone()) .task_executor(context.executor.clone())
.custom_spec(spec.clone()) .custom_spec(spec.clone())
.store_migrator_config(
MigratorConfig::default().epochs_per_migration(chain_config.epochs_per_migration),
)
.chain_config(chain_config) .chain_config(chain_config)
.graffiti(graffiti) .graffiti(graffiti)
.event_handler(event_handler) .event_handler(event_handler)
@ -551,6 +571,8 @@ where
gossipsub_registry gossipsub_registry
.as_mut() .as_mut()
.map(|registry| registry.sub_registry_with_prefix("gossipsub")), .map(|registry| registry.sub_registry_with_prefix("gossipsub")),
self.beacon_processor_send.clone(),
self.work_reprocessing_tx.clone(),
) )
.await .await
.map_err(|e| format!("Failed to start network: {:?}", e))?; .map_err(|e| format!("Failed to start network: {:?}", e))?;
@ -738,6 +760,27 @@ where
} }
if let Some(beacon_chain) = self.beacon_chain.as_ref() { if let Some(beacon_chain) = self.beacon_chain.as_ref() {
if let Some(network_globals) = &self.network_globals {
let beacon_processor_context = runtime_context.service_context("bproc".into());
BeaconProcessor {
network_globals: network_globals.clone(),
executor: beacon_processor_context.executor.clone(),
max_workers: cmp::max(1, num_cpus::get()),
current_workers: 0,
enable_backfill_rate_limiting: beacon_chain
.config
.enable_backfill_rate_limiting,
log: beacon_processor_context.log().clone(),
}
.spawn_manager(
self.beacon_processor_receive,
self.work_reprocessing_tx,
self.work_reprocessing_rx,
None,
beacon_chain.slot_clock.clone(),
);
}
let state_advance_context = runtime_context.service_context("state_advance".into()); let state_advance_context = runtime_context.service_context("state_advance".into());
let state_advance_log = state_advance_context.log().clone(); let state_advance_log = state_advance_context.log().clone();
spawn_state_advance_timer( spawn_state_advance_timer(

View File

@ -42,9 +42,9 @@ lazy_static = "1.4.0"
ethers-core = "1.0.2" ethers-core = "1.0.2"
builder_client = { path = "../builder_client" } builder_client = { path = "../builder_client" }
fork_choice = { path = "../../consensus/fork_choice" } fork_choice = { path = "../../consensus/fork_choice" }
mev-rs = { git = "https://github.com/ralexstokes/mev-rs" } mev-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "216657016d5c0889b505857c89ae42c7aa2764af" }
ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus" } ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "e380108" }
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs" } ssz_rs = "0.9.0"
tokio-stream = { version = "0.1.9", features = [ "sync" ] } tokio-stream = { version = "0.1.9", features = [ "sync" ] }
strum = "0.24.0" strum = "0.24.0"
keccak-hash = "0.10.0" keccak-hash = "0.10.0"

View File

@ -13,6 +13,7 @@ pub use engine_api::*;
pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc};
use engines::{Engine, EngineError}; use engines::{Engine, EngineError};
pub use engines::{EngineState, ForkchoiceState}; pub use engines::{EngineState, ForkchoiceState};
use eth2::types::SignedBlockContents;
use eth2::types::{builder_bid::SignedBuilderBid, ForkVersionedResponse}; use eth2::types::{builder_bid::SignedBuilderBid, ForkVersionedResponse};
use ethers_core::abi::ethereum_types::FromStrRadixErr; use ethers_core::abi::ethereum_types::FromStrRadixErr;
use ethers_core::types::Transaction as EthersTransaction; use ethers_core::types::Transaction as EthersTransaction;
@ -41,16 +42,14 @@ use tokio_stream::wrappers::WatchStream;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::beacon_block_body::KzgCommitments; use types::beacon_block_body::KzgCommitments;
use types::blob_sidecar::Blobs; use types::blob_sidecar::Blobs;
use types::{AbstractExecPayload, BeaconStateError, ExecPayload, VersionedHash}; use types::KzgProofs;
use types::{ use types::{
BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ExecutionPayload, AbstractExecPayload, BeaconStateError, ExecPayload, ExecutionPayloadDeneb, VersionedHash,
ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge, ForkName,
}; };
use types::{KzgProofs, Withdrawals};
use types::{ use types::{
ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Transaction, BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionPayloadCapella, ExecutionPayloadMerge,
Uint256,
}; };
use types::{ProposerPreparationData, PublicKeyBytes, Signature, Slot, Transaction};
mod block_hash; mod block_hash;
mod engine_api; mod engine_api;
@ -1876,7 +1875,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
pub async fn propose_blinded_beacon_block( pub async fn propose_blinded_beacon_block(
&self, &self,
block_root: Hash256, block_root: Hash256,
block: &SignedBeaconBlock<T, BlindedPayload<T>>, block: &SignedBlockContents<T, BlindedPayload<T>>,
) -> Result<ExecutionPayload<T>, Error> { ) -> Result<ExecutionPayload<T>, Error> {
debug!( debug!(
self.log(), self.log(),
@ -1924,6 +1923,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
"relay_response_ms" => duration.as_millis(), "relay_response_ms" => duration.as_millis(),
"block_root" => ?block_root, "block_root" => ?block_root,
"parent_hash" => ?block "parent_hash" => ?block
.signed_block()
.message() .message()
.execution_payload() .execution_payload()
.map(|payload| format!("{}", payload.parent_hash())) .map(|payload| format!("{}", payload.parent_hash()))
@ -2126,6 +2126,15 @@ async fn timed_future<F: Future<Output = T>, T>(metric: &str, future: F) -> (T,
(result, duration) (result, duration)
} }
#[cfg(test)]
/// Returns the duration since the unix epoch.
fn timestamp_now() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_else(|_| Duration::from_secs(0))
.as_secs()
}
#[derive(Debug)] #[derive(Debug)]
pub enum BlobTxConversionError { pub enum BlobTxConversionError {
/// The transaction type was not set. /// The transaction type was not set.
@ -2356,12 +2365,3 @@ mod test {
.await; .await;
} }
} }
#[cfg(test)]
/// Returns the duration since the unix epoch.
fn timestamp_now() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_else(|_| Duration::from_secs(0))
.as_secs()
}

View File

@ -11,11 +11,17 @@ use ethereum_consensus::{
}; };
use fork_choice::ForkchoiceUpdateParameters; use fork_choice::ForkchoiceUpdateParameters;
use mev_rs::{ use mev_rs::{
bellatrix::{BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix}, blinded_block_provider::Server as BlindedBlockProviderServer,
capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, signing::{sign_builder_message, verify_signed_builder_message},
sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError, types::{
BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload, bellatrix::{
SignedBlindedBeaconBlock, SignedBuilderBid, SignedValidatorRegistration, BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix,
},
capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella},
BidRequest, BuilderBid, ExecutionPayload as ServerPayload, SignedBlindedBeaconBlock,
SignedBuilderBid, SignedValidatorRegistration,
},
Error as MevError,
}; };
use parking_lot::RwLock; use parking_lot::RwLock;
use sensitive_url::SensitiveUrl; use sensitive_url::SensitiveUrl;
@ -47,7 +53,7 @@ pub enum Operation {
} }
impl Operation { impl Operation {
fn apply<B: BidStuff>(self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { fn apply<B: BidStuff>(self, bid: &mut B) -> Result<(), MevError> {
match self { match self {
Operation::FeeRecipient(fee_recipient) => { Operation::FeeRecipient(fee_recipient) => {
*bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)? *bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)?
@ -73,7 +79,7 @@ pub trait BidStuff {
fn prev_randao_mut(&mut self) -> &mut Hash32; fn prev_randao_mut(&mut self) -> &mut Hash32;
fn block_number_mut(&mut self) -> &mut u64; fn block_number_mut(&mut self) -> &mut u64;
fn timestamp_mut(&mut self) -> &mut u64; fn timestamp_mut(&mut self) -> &mut u64;
fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError>; fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError>;
fn sign_builder_message( fn sign_builder_message(
&mut self, &mut self,
@ -134,11 +140,9 @@ impl BidStuff for BuilderBid {
} }
} }
fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError> { fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError> {
match self { match self {
Self::Bellatrix(_) => Err(BlindedBlockProviderError::Custom( Self::Bellatrix(_) => Err(MevError::InvalidFork),
"withdrawals_root called on bellatrix bid".to_string(),
)),
Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root), Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root),
} }
} }
@ -274,7 +278,7 @@ impl<E: EthSpec> MockBuilder<E> {
*self.invalidate_signatures.write() = false; *self.invalidate_signatures.write() = false;
} }
fn apply_operations<B: BidStuff>(&self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { fn apply_operations<B: BidStuff>(&self, bid: &mut B) -> Result<(), MevError> {
let mut guard = self.operations.write(); let mut guard = self.operations.write();
while let Some(op) = guard.pop() { while let Some(op) = guard.pop() {
op.apply(bid)?; op.apply(bid)?;
@ -288,7 +292,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
async fn register_validators( async fn register_validators(
&self, &self,
registrations: &mut [SignedValidatorRegistration], registrations: &mut [SignedValidatorRegistration],
) -> Result<(), BlindedBlockProviderError> { ) -> Result<(), MevError> {
for registration in registrations { for registration in registrations {
let pubkey = registration.message.public_key.clone(); let pubkey = registration.message.public_key.clone();
let message = &mut registration.message; let message = &mut registration.message;
@ -307,10 +311,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
Ok(()) Ok(())
} }
async fn fetch_best_bid( async fn fetch_best_bid(&self, bid_request: &BidRequest) -> Result<SignedBuilderBid, MevError> {
&self,
bid_request: &BidRequest,
) -> Result<SignedBuilderBid, BlindedBlockProviderError> {
let slot = Slot::new(bid_request.slot); let slot = Slot::new(bid_request.slot);
let fork = self.spec.fork_name_at_slot::<E>(slot); let fork = self.spec.fork_name_at_slot::<E>(slot);
let signed_cached_data = self let signed_cached_data = self
@ -336,7 +337,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
.map_err(convert_err)? .map_err(convert_err)?
.block_hash(); .block_hash();
if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? {
return Err(BlindedBlockProviderError::Custom(format!( return Err(custom_err(format!(
"head mismatch: {} {}", "head mismatch: {} {}",
head_execution_hash, bid_request.parent_hash head_execution_hash, bid_request.parent_hash
))); )));
@ -396,7 +397,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
.get_debug_beacon_states(StateId::Head) .get_debug_beacon_states(StateId::Head)
.await .await
.map_err(convert_err)? .map_err(convert_err)?
.ok_or_else(|| BlindedBlockProviderError::Custom("missing head state".to_string()))? .ok_or_else(|| custom_err("missing head state".to_string()))?
.data; .data;
let prev_randao = head_state let prev_randao = head_state
.get_randao_mix(head_state.current_epoch()) .get_randao_mix(head_state.current_epoch())
@ -409,10 +410,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![])) PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![]))
} }
ForkName::Base | ForkName::Altair => { ForkName::Base | ForkName::Altair => {
return Err(BlindedBlockProviderError::Custom(format!( return Err(MevError::InvalidFork);
"Unsupported fork: {}",
fork
)));
} }
}; };
@ -453,10 +451,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
public_key: self.builder_sk.public_key(), public_key: self.builder_sk.public_key(),
}), }),
ForkName::Base | ForkName::Altair | ForkName::Deneb => { ForkName::Base | ForkName::Altair | ForkName::Deneb => {
return Err(BlindedBlockProviderError::Custom(format!( return Err(MevError::InvalidFork)
"Unsupported fork: {}",
fork
)))
} }
}; };
*message.gas_limit_mut() = cached_data.gas_limit; *message.gas_limit_mut() = cached_data.gas_limit;
@ -475,7 +470,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
async fn open_bid( async fn open_bid(
&self, &self,
signed_block: &mut SignedBlindedBeaconBlock, signed_block: &mut SignedBlindedBeaconBlock,
) -> Result<ServerPayload, BlindedBlockProviderError> { ) -> Result<ServerPayload, MevError> {
let node = match signed_block { let node = match signed_block {
SignedBlindedBeaconBlock::Bellatrix(block) => { SignedBlindedBeaconBlock::Bellatrix(block) => {
block.message.body.execution_payload_header.hash_tree_root() block.message.body.execution_payload_header.hash_tree_root()
@ -496,9 +491,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
} }
} }
pub fn from_ssz_rs<T: SimpleSerialize, U: Decode>( pub fn from_ssz_rs<T: SimpleSerialize, U: Decode>(ssz_rs_data: &T) -> Result<U, MevError> {
ssz_rs_data: &T,
) -> Result<U, BlindedBlockProviderError> {
U::from_ssz_bytes( U::from_ssz_bytes(
ssz_rs::serialize(ssz_rs_data) ssz_rs::serialize(ssz_rs_data)
.map_err(convert_err)? .map_err(convert_err)?
@ -507,12 +500,17 @@ pub fn from_ssz_rs<T: SimpleSerialize, U: Decode>(
.map_err(convert_err) .map_err(convert_err)
} }
pub fn to_ssz_rs<T: Encode, U: SimpleSerialize>( pub fn to_ssz_rs<T: Encode, U: SimpleSerialize>(ssz_data: &T) -> Result<U, MevError> {
ssz_data: &T,
) -> Result<U, BlindedBlockProviderError> {
ssz_rs::deserialize::<U>(&ssz_data.as_ssz_bytes()).map_err(convert_err) ssz_rs::deserialize::<U>(&ssz_data.as_ssz_bytes()).map_err(convert_err)
} }
fn convert_err<E: Debug>(e: E) -> BlindedBlockProviderError { fn convert_err<E: Debug>(e: E) -> MevError {
BlindedBlockProviderError::Custom(format!("{e:?}")) custom_err(format!("{e:?}"))
}
// This is a bit of a hack since the `Custom` variant was removed from `mev_rs::Error`.
fn custom_err(s: String) -> MevError {
MevError::Consensus(ethereum_consensus::state_transition::Error::Io(
std::io::Error::new(std::io::ErrorKind::Other, s),
))
} }

View File

@ -75,7 +75,7 @@ impl<T: EthSpec> PackingEfficiencyHandler<T> {
available_attestations: HashSet::new(), available_attestations: HashSet::new(),
included_attestations: HashMap::new(), included_attestations: HashMap::new(),
committee_store: CommitteeStore::new(), committee_store: CommitteeStore::new(),
_phantom: PhantomData::default(), _phantom: PhantomData,
}; };
handler.compute_epoch(start_epoch, &starting_state, spec)?; handler.compute_epoch(start_epoch, &starting_state, spec)?;

View File

@ -49,7 +49,7 @@ pub fn get_block_rewards<T: BeaconChainTypes>(
.map_err(beacon_chain_error)?; .map_err(beacon_chain_error)?;
state state
.build_all_caches(&chain.spec) .build_caches(&chain.spec)
.map_err(beacon_state_error)?; .map_err(beacon_state_error)?;
let mut reward_cache = Default::default(); let mut reward_cache = Default::default();

View File

@ -32,8 +32,8 @@ use beacon_chain::{
pub use block_id::BlockId; pub use block_id::BlockId;
use directory::DEFAULT_ROOT_DIR; use directory::DEFAULT_ROOT_DIR;
use eth2::types::{ use eth2::types::{
self as api_types, EndpointVersion, ForkChoice, ForkChoiceNode, SignedBlockContents, self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode,
SkipRandaoVerification, ValidatorId, ValidatorStatus, SignedBlockContents, SkipRandaoVerification, ValidatorId, ValidatorStatus,
}; };
use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
use lighthouse_version::version_with_platform; use lighthouse_version::version_with_platform;
@ -41,7 +41,9 @@ use logging::SSELoggingComponents;
use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage};
use operation_pool::ReceivedPreCapella; use operation_pool::ReceivedPreCapella;
use parking_lot::RwLock; use parking_lot::RwLock;
use publish_blocks::ProvenancedBlock; pub use publish_blocks::{
publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock,
};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use slog::{crit, debug, error, info, warn, Logger}; use slog::{crit, debug, error, info, warn, Logger};
use slot_clock::SlotClock; use slot_clock::SlotClock;
@ -61,9 +63,8 @@ use types::{
Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError,
BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload,
ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof,
SignedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, SignedBlsToExecutionChange, SignedContributionAndProof, SignedValidatorRegistrationData,
SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData,
SyncContributionData,
}; };
use version::{ use version::{
add_consensus_version_header, execution_optimistic_finalized_fork_versioned_response, add_consensus_version_header, execution_optimistic_finalized_fork_versioned_response,
@ -325,6 +326,7 @@ pub fn serve<T: BeaconChainTypes>(
}; };
let eth_v1 = single_version(V1); let eth_v1 = single_version(V1);
let eth_v2 = single_version(V2);
// Create a `warp` filter that provides access to the network globals. // Create a `warp` filter that provides access to the network globals.
let inner_network_globals = ctx.network_globals.clone(); let inner_network_globals = ctx.network_globals.clone();
@ -1223,16 +1225,59 @@ pub fn serve<T: BeaconChainTypes>(
log: Logger| async move { log: Logger| async move {
publish_blocks::publish_block( publish_blocks::publish_block(
None, None,
ProvenancedBlock::Local(block_contents), ProvenancedBlock::local(block_contents),
chain, chain,
&network_tx, &network_tx,
log, log,
BroadcastValidation::default(),
) )
.await .await
.map(|()| warp::reply().into_response()) .map(|()| warp::reply().into_response())
}, },
); );
let post_beacon_blocks_v2 = eth_v2
.and(warp::path("beacon"))
.and(warp::path("blocks"))
.and(warp::query::<api_types::BroadcastValidationQuery>())
.and(warp::path::end())
.and(warp::body::json())
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|validation_level: api_types::BroadcastValidationQuery,
block_contents: SignedBlockContents<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move {
match publish_blocks::publish_block(
None,
ProvenancedBlock::local(block_contents),
chain,
&network_tx,
log,
validation_level.broadcast_validation,
)
.await
{
Ok(()) => warp::reply().into_response(),
Err(e) => match warp_utils::reject::handle_rejection(e).await {
Ok(reply) => reply.into_response(),
Err(_) => warp::reply::with_status(
StatusCode::INTERNAL_SERVER_ERROR,
eth2::StatusCode::INTERNAL_SERVER_ERROR,
)
.into_response(),
},
}
},
);
/*
* beacon/blocks
*/
// POST beacon/blinded_blocks // POST beacon/blinded_blocks
let post_beacon_blinded_blocks = eth_v1 let post_beacon_blinded_blocks = eth_v1
.and(warp::path("beacon")) .and(warp::path("beacon"))
@ -1243,13 +1288,56 @@ pub fn serve<T: BeaconChainTypes>(
.and(network_tx_filter.clone()) .and(network_tx_filter.clone())
.and(log_filter.clone()) .and(log_filter.clone())
.and_then( .and_then(
|block: SignedBeaconBlock<T::EthSpec, BlindedPayload<_>>, |block: SignedBlockContents<T::EthSpec, BlindedPayload<_>>,
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>, network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move { log: Logger| async move {
publish_blocks::publish_blinded_block(block, chain, &network_tx, log) publish_blocks::publish_blinded_block(
.await block,
.map(|()| warp::reply().into_response()) chain,
&network_tx,
log,
BroadcastValidation::default(),
)
.await
.map(|()| warp::reply().into_response())
},
);
let post_beacon_blinded_blocks_v2 = eth_v2
.and(warp::path("beacon"))
.and(warp::path("blinded_blocks"))
.and(warp::query::<api_types::BroadcastValidationQuery>())
.and(warp::path::end())
.and(warp::body::json())
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|validation_level: api_types::BroadcastValidationQuery,
block_contents: SignedBlockContents<T::EthSpec, BlindedPayload<_>>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move {
match publish_blocks::publish_blinded_block(
block_contents,
chain,
&network_tx,
log,
validation_level.broadcast_validation,
)
.await
{
Ok(()) => warp::reply().into_response(),
Err(e) => match warp_utils::reject::handle_rejection(e).await {
Ok(reply) => reply.into_response(),
Err(_) => warp::reply::with_status(
StatusCode::INTERNAL_SERVER_ERROR,
eth2::StatusCode::INTERNAL_SERVER_ERROR,
)
.into_response(),
},
}
}, },
); );
@ -2369,24 +2457,41 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path("health")) .and(warp::path("health"))
.and(warp::path::end()) .and(warp::path::end())
.and(network_globals.clone()) .and(network_globals.clone())
.and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| { .and(chain_filter.clone())
blocking_response_task(move || match *network_globals.sync_state.read() { .and_then(
SyncState::SyncingFinalized { .. } |network_globals: Arc<NetworkGlobals<T::EthSpec>>, chain: Arc<BeaconChain<T>>| {
| SyncState::SyncingHead { .. } async move {
| SyncState::SyncTransition let el_offline = if let Some(el) = &chain.execution_layer {
| SyncState::BackFillSyncing { .. } => Ok(warp::reply::with_status( el.is_offline_or_erroring().await
warp::reply(), } else {
warp::http::StatusCode::PARTIAL_CONTENT, true
)), };
SyncState::Synced => Ok(warp::reply::with_status(
warp::reply(), blocking_response_task(move || {
warp::http::StatusCode::OK, let is_optimistic = chain
)), .is_optimistic_or_invalid_head()
SyncState::Stalled => Err(warp_utils::reject::not_synced( .map_err(warp_utils::reject::beacon_chain_error)?;
"sync stalled, beacon chain may not yet be initialized.".to_string(),
)), let is_syncing = !network_globals.sync_state.read().is_synced();
})
}); if el_offline {
Err(warp_utils::reject::not_synced("execution layer is offline".to_string()))
} else if is_syncing || is_optimistic {
Ok(warp::reply::with_status(
warp::reply(),
warp::http::StatusCode::PARTIAL_CONTENT,
))
} else {
Ok(warp::reply::with_status(
warp::reply(),
warp::http::StatusCode::OK,
))
}
})
.await
}
},
);
// GET node/peers/{peer_id} // GET node/peers/{peer_id}
let get_node_peers_by_id = eth_v1 let get_node_peers_by_id = eth_v1
@ -3866,6 +3971,8 @@ pub fn serve<T: BeaconChainTypes>(
warp::post().and( warp::post().and(
post_beacon_blocks post_beacon_blocks
.uor(post_beacon_blinded_blocks) .uor(post_beacon_blinded_blocks)
.uor(post_beacon_blocks_v2)
.uor(post_beacon_blinded_blocks_v2)
.uor(post_beacon_pool_attestations) .uor(post_beacon_pool_attestations)
.uor(post_beacon_pool_attester_slashings) .uor(post_beacon_pool_attester_slashings)
.uor(post_beacon_pool_proposer_slashings) .uor(post_beacon_pool_proposer_slashings)

View File

@ -1,100 +1,197 @@
use crate::metrics; use crate::metrics;
use beacon_chain::blob_verification::{AsBlock, BlockWrapper}; use beacon_chain::blob_verification::AsBlock;
use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
use beacon_chain::{AvailabilityProcessingStatus, NotifyExecutionLayer}; use beacon_chain::{
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError}; AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError,
IntoGossipVerifiedBlockContents, NotifyExecutionLayer,
};
use eth2::types::BroadcastValidation;
use eth2::types::SignedBlockContents; use eth2::types::SignedBlockContents;
use execution_layer::ProvenancedPayload; use execution_layer::ProvenancedPayload;
use lighthouse_network::PubsubMessage; use lighthouse_network::PubsubMessage;
use network::NetworkMessage; use network::NetworkMessage;
use slog::{debug, error, info, warn, Logger}; use slog::{debug, error, info, warn, Logger};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::marker::PhantomData;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use store::FixedVector;
use tokio::sync::mpsc::UnboundedSender; use tokio::sync::mpsc::UnboundedSender;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::{ use types::{
AbstractExecPayload, BeaconBlockRef, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, AbstractExecPayload, BeaconBlockRef, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash,
FullPayload, Hash256, SignedBeaconBlock, FullPayload, Hash256, SignedBeaconBlock, SignedBlobSidecarList,
}; };
use warp::Rejection; use warp::Rejection;
pub enum ProvenancedBlock<T: EthSpec> { pub enum ProvenancedBlock<T: BeaconChainTypes, B: IntoGossipVerifiedBlockContents<T>> {
/// The payload was built using a local EE. /// The payload was built using a local EE.
Local(SignedBlockContents<T, FullPayload<T>>), Local(B, PhantomData<T>),
/// The payload was build using a remote builder (e.g., via a mev-boost /// The payload was build using a remote builder (e.g., via a mev-boost
/// compatible relay). /// compatible relay).
Builder(SignedBlockContents<T, FullPayload<T>>), Builder(B, PhantomData<T>),
}
impl<T: BeaconChainTypes, B: IntoGossipVerifiedBlockContents<T>> ProvenancedBlock<T, B> {
pub fn local(block: B) -> Self {
Self::Local(block, PhantomData)
}
pub fn builder(block: B) -> Self {
Self::Builder(block, PhantomData)
}
} }
/// Handles a request from the HTTP API for full blocks. /// Handles a request from the HTTP API for full blocks.
pub async fn publish_block<T: BeaconChainTypes>( pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlockContents<T>>(
block_root: Option<Hash256>, block_root: Option<Hash256>,
provenanced_block: ProvenancedBlock<T::EthSpec>, provenanced_block: ProvenancedBlock<T, B>,
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>, network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger, log: Logger,
validation_level: BroadcastValidation,
) -> Result<(), Rejection> { ) -> Result<(), Rejection> {
let seen_timestamp = timestamp_now(); let seen_timestamp = timestamp_now();
let (block, maybe_blobs, is_locally_built_block) = match provenanced_block {
ProvenancedBlock::Local(block_contents) => { let (block_contents, is_locally_built_block) = match provenanced_block {
let (block, maybe_blobs) = block_contents.deconstruct(); ProvenancedBlock::Local(block_contents, _) => (block_contents, true),
(Arc::new(block), maybe_blobs, true) ProvenancedBlock::Builder(block_contents, _) => (block_contents, false),
}
ProvenancedBlock::Builder(block_contents) => {
let (block, maybe_blobs) = block_contents.deconstruct();
(Arc::new(block), maybe_blobs, false)
}
}; };
let block = block_contents.inner_block();
let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock);
debug!(log, "Signed block received in HTTP API"; "slot" => block.slot());
//FIXME(sean) have to move this to prior to publishing because it's included in the blobs sidecar message. /* actually publish a block */
//this may skew metrics let publish_block = move |block: Arc<SignedBeaconBlock<T::EthSpec>>,
let block_root = block_root.unwrap_or_else(|| block.canonical_root()); blobs_opt: Option<SignedBlobSidecarList<T::EthSpec>>,
debug!( sender,
log, log,
"Signed block published to HTTP API"; seen_timestamp| {
"slot" => block.slot() let publish_timestamp = timestamp_now();
); let publish_delay = publish_timestamp
.checked_sub(seen_timestamp)
.unwrap_or_else(|| Duration::from_secs(0));
// Send the block, regardless of whether or not it is valid. The API info!(log, "Signed block published to network via HTTP API"; "slot" => block.slot(), "publish_delay" => ?publish_delay);
// specification is very clear that this is the desired behaviour. // Send the block, regardless of whether or not it is valid. The API
let wrapped_block: BlockWrapper<T::EthSpec> = match block.as_ref() { // specification is very clear that this is the desired behaviour.
SignedBeaconBlock::Base(_) match block.as_ref() {
| SignedBeaconBlock::Altair(_) SignedBeaconBlock::Base(_)
| SignedBeaconBlock::Merge(_) | SignedBeaconBlock::Altair(_)
| SignedBeaconBlock::Capella(_) => { | SignedBeaconBlock::Merge(_)
crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?; | SignedBeaconBlock::Capella(_) => {
block.into() crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone()))
} .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?;
SignedBeaconBlock::Deneb(_) => { }
crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?; SignedBeaconBlock::Deneb(_) => {
if let Some(signed_blobs) = maybe_blobs { crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone()))
for (blob_index, blob) in signed_blobs.clone().into_iter().enumerate() { .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?;
crate::publish_pubsub_message( if let Some(signed_blobs) = blobs_opt {
network_tx, for (blob_index, blob) in signed_blobs.into_iter().enumerate() {
PubsubMessage::BlobSidecar(Box::new((blob_index as u64, blob))), crate::publish_pubsub_message(
)?; &sender,
PubsubMessage::BlobSidecar(Box::new((blob_index as u64, blob))),
)
.map_err(|_| {
BlockError::BeaconChainError(BeaconChainError::UnableToPublish)
})?;
}
} }
let blobs = signed_blobs }
.into_iter() };
.map(|blob| Some(blob.message)) Ok(())
.collect::<Vec<_>>(); };
BlockWrapper::BlockAndBlobs(block, FixedVector::from(blobs))
/* only publish if gossip- and consensus-valid and equivocation-free */
let chain_clone = chain.clone();
let slot = block.message().slot();
let proposer_index = block.message().proposer_index();
let sender_clone = network_tx.clone();
let log_clone = log.clone();
// We can clone this because the blobs are `Arc`'d in `BlockContents`, but the block is not,
// so we avoid cloning the block at this point.
let blobs_opt = block_contents.inner_blobs();
/* if we can form a `GossipVerifiedBlock`, we've passed our basic gossip checks */
let (gossip_verified_block, gossip_verified_blobs) = block_contents
.into_gossip_verified_block(&chain)
.map_err(|e| {
warn!(log, "Not publishing block, not gossip verified"; "slot" => slot, "error" => ?e);
warp_utils::reject::custom_bad_request(e.to_string())
})?;
// Clone here, so we can take advantage of the `Arc`. The block in `BlockContents` is not,
// `Arc`'d but blobs are.
let block = gossip_verified_block.block.block_cloned();
let block_root = block_root.unwrap_or(gossip_verified_block.block_root);
if let BroadcastValidation::Gossip = validation_level {
publish_block(
block.clone(),
blobs_opt.clone(),
sender_clone.clone(),
log.clone(),
seen_timestamp,
)
.map_err(|_| warp_utils::reject::custom_server_error("unable to publish".into()))?;
}
let block_clone = block.clone();
let publish_fn = move || match validation_level {
BroadcastValidation::Gossip => Ok(()),
BroadcastValidation::Consensus => publish_block(
block_clone,
blobs_opt,
sender_clone,
log_clone,
seen_timestamp,
),
BroadcastValidation::ConsensusAndEquivocation => {
if chain_clone
.observed_block_producers
.read()
.proposer_has_been_observed(block_clone.message(), block_root)
.map_err(|e| BlockError::BeaconChainError(e.into()))?
.is_slashable()
{
warn!(
log_clone,
"Not publishing equivocating block";
"slot" => block_clone.slot()
);
Err(BlockError::Slashable)
} else { } else {
block.into() publish_block(
block_clone,
blobs_opt,
sender_clone,
log_clone,
seen_timestamp,
)
} }
} }
}; };
// Determine the delay after the start of the slot, register it with metrics.
let block_clone = wrapped_block.block_cloned(); if let Some(gossip_verified_blobs) = gossip_verified_blobs {
let slot = block_clone.message().slot(); for blob in gossip_verified_blobs {
let proposer_index = block_clone.message().proposer_index(); if let Err(e) = chain.process_blob(blob).await {
return Err(warp_utils::reject::custom_bad_request(format!(
"Invalid blob: {e}"
)));
}
}
}
match chain match chain
.process_block(block_root, wrapped_block, NotifyExecutionLayer::Yes) .process_block(
block_root,
gossip_verified_block,
NotifyExecutionLayer::Yes,
publish_fn,
)
.await .await
{ {
Ok(AvailabilityProcessingStatus::Imported(root)) => { Ok(AvailabilityProcessingStatus::Imported(root)) => {
@ -110,7 +207,7 @@ pub async fn publish_block<T: BeaconChainTypes>(
// Notify the validator monitor. // Notify the validator monitor.
chain.validator_monitor.read().register_api_block( chain.validator_monitor.read().register_api_block(
seen_timestamp, seen_timestamp,
block_clone.message(), block.message(),
root, root,
&chain.slot_clock, &chain.slot_clock,
); );
@ -123,14 +220,7 @@ pub async fn publish_block<T: BeaconChainTypes>(
// blocks built with builders we consider the broadcast time to be // blocks built with builders we consider the broadcast time to be
// when the blinded block is published to the builder. // when the blinded block is published to the builder.
if is_locally_built_block { if is_locally_built_block {
late_block_logging( late_block_logging(&chain, seen_timestamp, block.message(), root, "local", &log)
&chain,
seen_timestamp,
block_clone.message(),
root,
"local",
&log,
)
} }
Ok(()) Ok(())
@ -144,35 +234,32 @@ pub async fn publish_block<T: BeaconChainTypes>(
); );
Err(warp_utils::reject::broadcast_without_import(msg)) Err(warp_utils::reject::broadcast_without_import(msg))
} }
Err(BlockError::BlockIsAlreadyKnown) => { Err(BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) => {
info!( Err(warp_utils::reject::custom_server_error(
log, "unable to publish to network channel".to_string(),
"Block from HTTP API already known"; ))
"block" => ?block_root,
"slot" => slot,
);
Ok(())
} }
Err(BlockError::RepeatProposal { proposer, slot }) => { Err(BlockError::Slashable) => Err(warp_utils::reject::custom_bad_request(
warn!( "proposal for this slot and proposer has already been seen".to_string(),
log, )),
"Block ignored due to repeat proposal"; Err(BlockError::BlockIsAlreadyKnown) => {
"msg" => "this can happen when a VC uses fallback BNs. \ info!(log, "Block from HTTP API already known"; "block" => ?block_root);
whilst this is not necessarily an error, it can indicate issues with a BN \
or between the VC and BN.",
"slot" => slot,
"proposer" => proposer,
);
Ok(()) Ok(())
} }
Err(e) => { Err(e) => {
let msg = format!("{:?}", e); if let BroadcastValidation::Gossip = validation_level {
error!( Err(warp_utils::reject::broadcast_without_import(format!("{e}")))
log, } else {
"Invalid block provided to HTTP API"; let msg = format!("{:?}", e);
"reason" => &msg error!(
); log,
Err(warp_utils::reject::broadcast_without_import(msg)) "Invalid block provided to HTTP API";
"reason" => &msg
);
Err(warp_utils::reject::custom_bad_request(format!(
"Invalid block: {e}"
)))
}
} }
} }
} }
@ -180,26 +267,38 @@ pub async fn publish_block<T: BeaconChainTypes>(
/// Handles a request from the HTTP API for blinded blocks. This converts blinded blocks into full /// Handles a request from the HTTP API for blinded blocks. This converts blinded blocks into full
/// blocks before publishing. /// blocks before publishing.
pub async fn publish_blinded_block<T: BeaconChainTypes>( pub async fn publish_blinded_block<T: BeaconChainTypes>(
block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>, block: SignedBlockContents<T::EthSpec, BlindedPayload<T::EthSpec>>,
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>, network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger, log: Logger,
validation_level: BroadcastValidation,
) -> Result<(), Rejection> { ) -> Result<(), Rejection> {
let block_root = block.canonical_root(); let block_root = block.signed_block().canonical_root();
let full_block = reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; let full_block: ProvenancedBlock<T, SignedBlockContents<T::EthSpec>> =
publish_block::<T>(Some(block_root), full_block, chain, network_tx, log).await reconstruct_block(chain.clone(), block_root, block, log.clone()).await?;
publish_block::<T, _>(
Some(block_root),
full_block,
chain,
network_tx,
log,
validation_level,
)
.await
} }
/// Deconstruct the given blinded block, and construct a full block. This attempts to use the /// Deconstruct the given blinded block, and construct a full block. This attempts to use the
/// execution layer's payload cache, and if that misses, attempts a blind block proposal to retrieve /// execution layer's payload cache, and if that misses, attempts a blind block proposal to retrieve
/// the full payload. /// the full payload.
async fn reconstruct_block<T: BeaconChainTypes>( pub async fn reconstruct_block<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
block_root: Hash256, block_root: Hash256,
block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>, block: SignedBlockContents<T::EthSpec, BlindedPayload<T::EthSpec>>,
log: Logger, log: Logger,
) -> Result<ProvenancedBlock<T::EthSpec>, Rejection> { ) -> Result<ProvenancedBlock<T, SignedBlockContents<T::EthSpec>>, Rejection> {
let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { let full_payload_opt = if let Ok(payload_header) =
block.signed_block().message().body().execution_payload()
{
let el = chain.execution_layer.as_ref().ok_or_else(|| { let el = chain.execution_layer.as_ref().ok_or_else(|| {
warp_utils::reject::custom_server_error("Missing execution layer".to_string()) warp_utils::reject::custom_server_error("Missing execution layer".to_string())
})?; })?;
@ -207,9 +306,12 @@ async fn reconstruct_block<T: BeaconChainTypes>(
// If the execution block hash is zero, use an empty payload. // If the execution block hash is zero, use an empty payload.
let full_payload = if payload_header.block_hash() == ExecutionBlockHash::zero() { let full_payload = if payload_header.block_hash() == ExecutionBlockHash::zero() {
let payload = FullPayload::default_at_fork( let payload = FullPayload::default_at_fork(
chain chain.spec.fork_name_at_epoch(
.spec block
.fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())), .signed_block()
.slot()
.epoch(T::EthSpec::slots_per_epoch()),
),
) )
.map_err(|e| { .map_err(|e| {
warp_utils::reject::custom_server_error(format!( warp_utils::reject::custom_server_error(format!(
@ -234,7 +336,7 @@ async fn reconstruct_block<T: BeaconChainTypes>(
late_block_logging( late_block_logging(
&chain, &chain,
timestamp_now(), timestamp_now(),
block.message(), block.signed_block().message(),
block_root, block_root,
"builder", "builder",
&log, &log,
@ -262,17 +364,23 @@ async fn reconstruct_block<T: BeaconChainTypes>(
// A block without a payload is pre-merge and we consider it locally // A block without a payload is pre-merge and we consider it locally
// built. // built.
None => block None => block
.deconstruct()
.0
.try_into_full_block(None) .try_into_full_block(None)
.map(SignedBlockContents::Block) .map(SignedBlockContents::Block)
.map(ProvenancedBlock::Local), .map(ProvenancedBlock::local),
Some(ProvenancedPayload::Local(full_payload)) => block Some(ProvenancedPayload::Local(full_payload)) => block
.deconstruct()
.0
.try_into_full_block(Some(full_payload)) .try_into_full_block(Some(full_payload))
.map(SignedBlockContents::Block) .map(SignedBlockContents::Block)
.map(ProvenancedBlock::Local), .map(ProvenancedBlock::local),
Some(ProvenancedPayload::Builder(full_payload)) => block Some(ProvenancedPayload::Builder(full_payload)) => block
.deconstruct()
.0
.try_into_full_block(Some(full_payload)) .try_into_full_block(Some(full_payload))
.map(SignedBlockContents::Block) .map(SignedBlockContents::Block)
.map(ProvenancedBlock::Builder), .map(ProvenancedBlock::builder),
} }
.ok_or_else(|| { .ok_or_else(|| {
warp_utils::reject::custom_server_error("Unable to add payload to block".to_string()) warp_utils::reject::custom_server_error("Unable to add payload to block".to_string())

View File

@ -70,15 +70,32 @@ impl StateId {
.map_err(BeaconChainError::DBError) .map_err(BeaconChainError::DBError)
.map_err(warp_utils::reject::beacon_chain_error)? .map_err(warp_utils::reject::beacon_chain_error)?
{ {
let execution_optimistic = chain let finalization_status = chain
.canonical_head .state_finalization_and_canonicity(root, hot_summary.slot)
.fork_choice_read_lock()
.is_optimistic_or_invalid_block_no_fallback(&hot_summary.latest_block_root)
.map_err(BeaconChainError::ForkChoiceError)
.map_err(warp_utils::reject::beacon_chain_error)?;
let finalized = chain
.is_finalized_state(root, hot_summary.slot)
.map_err(warp_utils::reject::beacon_chain_error)?; .map_err(warp_utils::reject::beacon_chain_error)?;
let finalized = finalization_status.is_finalized();
let fork_choice = chain.canonical_head.fork_choice_read_lock();
let execution_optimistic = if finalization_status.slot_is_finalized
&& !finalization_status.canonical
{
// This block is permanently orphaned and has likely been pruned from fork
// choice. If it isn't found in fork choice, mark it optimistic to be on the
// safe side.
fork_choice
.is_optimistic_or_invalid_block_no_fallback(
&hot_summary.latest_block_root,
)
.unwrap_or(true)
} else {
// This block is either old and finalized, or recent and unfinalized, so
// it's safe to fallback to the optimistic status of the finalized block.
chain
.canonical_head
.fork_choice_read_lock()
.is_optimistic_or_invalid_block(&hot_summary.latest_block_root)
.map_err(BeaconChainError::ForkChoiceError)
.map_err(warp_utils::reject::beacon_chain_error)?
};
return Ok((*root, execution_optimistic, finalized)); return Ok((*root, execution_optimistic, finalized));
} else if let Some(_cold_state_slot) = chain } else if let Some(_cold_state_slot) = chain
.store .store

File diff suppressed because it is too large Load Diff

View File

@ -327,11 +327,8 @@ async fn sync_committee_indices_across_fork() {
/// Assert that an HTTP API error has the given status code and indexed errors for the given indices. /// Assert that an HTTP API error has the given status code and indexed errors for the given indices.
fn assert_server_indexed_error(error: eth2::Error, status_code: u16, indices: Vec<usize>) { fn assert_server_indexed_error(error: eth2::Error, status_code: u16, indices: Vec<usize>) {
let eth2::Error::ServerIndexedMessage(IndexedErrorMessage { let eth2::Error::ServerIndexedMessage(IndexedErrorMessage { code, failures, .. }) = error
code, else {
failures,
..
}) = error else {
panic!("wrong error, expected ServerIndexedMessage, got: {error:?}") panic!("wrong error, expected ServerIndexedMessage, got: {error:?}")
}; };
assert_eq!(code, status_code); assert_eq!(code, status_code);

View File

@ -2,8 +2,9 @@
use beacon_chain::{ use beacon_chain::{
chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, chain_config::{DisallowedReOrgOffsets, ReOrgThreshold},
test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy},
ChainConfig,
}; };
use eth2::types::DepositContractData; use eth2::types::{DepositContractData, StateId};
use execution_layer::{ForkchoiceState, PayloadAttributes}; use execution_layer::{ForkchoiceState, PayloadAttributes};
use http_api::test_utils::InteractiveTester; use http_api::test_utils::InteractiveTester;
use parking_lot::Mutex; use parking_lot::Mutex;
@ -17,7 +18,7 @@ use std::time::Duration;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::{ use types::{
Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload,
MainnetEthSpec, ProposerPreparationData, Slot, MainnetEthSpec, MinimalEthSpec, ProposerPreparationData, Slot,
}; };
type E = MainnetEthSpec; type E = MainnetEthSpec;
@ -48,6 +49,76 @@ async fn deposit_contract_custom_network() {
assert_eq!(result, expected); assert_eq!(result, expected);
} }
// Test that state lookups by root function correctly for states that are finalized but still
// present in the hot database, and have had their block pruned from fork choice.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn state_by_root_pruned_from_fork_choice() {
type E = MinimalEthSpec;
let validator_count = 24;
let spec = ForkName::latest().make_genesis_spec(E::default_spec());
let tester = InteractiveTester::<E>::new_with_initializer_and_mutator(
Some(spec.clone()),
validator_count,
Some(Box::new(move |builder| {
builder
.deterministic_keypairs(validator_count)
.fresh_ephemeral_store()
.chain_config(ChainConfig {
epochs_per_migration: 1024,
..ChainConfig::default()
})
})),
None,
)
.await;
let client = &tester.client;
let harness = &tester.harness;
// Create some chain depth and finalize beyond fork choice's pruning depth.
let num_epochs = 8_u64;
let num_initial = num_epochs * E::slots_per_epoch();
harness.advance_slot();
harness
.extend_chain_with_sync(
num_initial as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
SyncCommitteeStrategy::NoValidators,
)
.await;
// Should now be finalized.
let finalized_epoch = harness.finalized_checkpoint().epoch;
assert_eq!(finalized_epoch, num_epochs - 2);
// The split slot should still be at 0.
assert_eq!(harness.chain.store.get_split_slot(), 0);
// States that are between the split and the finalized slot should be able to be looked up by
// state root.
for slot in 0..finalized_epoch.start_slot(E::slots_per_epoch()).as_u64() {
let state_root = harness
.chain
.state_root_at_slot(Slot::new(slot))
.unwrap()
.unwrap();
let response = client
.get_debug_beacon_states::<E>(StateId::Root(state_root))
.await
.unwrap()
.unwrap();
assert!(response.finalized.unwrap());
assert!(!response.execution_optimistic.unwrap());
let mut state = response.data;
assert_eq!(state.update_tree_hash_cache().unwrap(), state_root);
}
}
/// Data structure for tracking fork choice updates received by the mock execution layer. /// Data structure for tracking fork choice updates received by the mock execution layer.
#[derive(Debug, Default)] #[derive(Debug, Default)]
struct ForkChoiceUpdates { struct ForkChoiceUpdates {

View File

@ -1,5 +1,6 @@
#![cfg(not(debug_assertions))] // Tests are too slow in debug. #![cfg(not(debug_assertions))] // Tests are too slow in debug.
pub mod broadcast_validation_tests;
pub mod fork_tests; pub mod fork_tests;
pub mod interactive_tests; pub mod interactive_tests;
pub mod status_tests; pub mod status_tests;

View File

@ -3,6 +3,7 @@ use beacon_chain::{
test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy},
BlockError, BlockError,
}; };
use eth2::StatusCode;
use execution_layer::{PayloadStatusV1, PayloadStatusV1Status}; use execution_layer::{PayloadStatusV1, PayloadStatusV1Status};
use http_api::test_utils::InteractiveTester; use http_api::test_utils::InteractiveTester;
use types::{EthSpec, ExecPayload, ForkName, MinimalEthSpec, Slot}; use types::{EthSpec, ExecPayload, ForkName, MinimalEthSpec, Slot};
@ -149,3 +150,82 @@ async fn el_error_on_new_payload() {
assert_eq!(api_response.is_optimistic, Some(false)); assert_eq!(api_response.is_optimistic, Some(false));
assert_eq!(api_response.is_syncing, false); assert_eq!(api_response.is_syncing, false);
} }
/// Check `node health` endpoint when the EL is offline.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn node_health_el_offline() {
let num_blocks = E::slots_per_epoch() / 2;
let num_validators = E::slots_per_epoch();
let tester = post_merge_tester(num_blocks, num_validators).await;
let harness = &tester.harness;
let mock_el = harness.mock_execution_layer.as_ref().unwrap();
// EL offline
mock_el.server.set_syncing_response(Err("offline".into()));
mock_el.el.upcheck().await;
let status = tester.client.get_node_health().await;
match status {
Ok(_) => {
panic!("should return 503 error status code");
}
Err(e) => {
assert_eq!(e.status().unwrap(), 503);
}
}
}
/// Check `node health` endpoint when the EL is online and synced.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn node_health_el_online_and_synced() {
let num_blocks = E::slots_per_epoch() / 2;
let num_validators = E::slots_per_epoch();
let tester = post_merge_tester(num_blocks, num_validators).await;
let harness = &tester.harness;
let mock_el = harness.mock_execution_layer.as_ref().unwrap();
// EL synced
mock_el.server.set_syncing_response(Ok(false));
mock_el.el.upcheck().await;
let status = tester.client.get_node_health().await;
match status {
Ok(response) => {
assert_eq!(response, StatusCode::OK);
}
Err(_) => {
panic!("should return 200 status code");
}
}
}
/// Check `node health` endpoint when the EL is online but not synced.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn node_health_el_online_and_not_synced() {
let num_blocks = E::slots_per_epoch() / 2;
let num_validators = E::slots_per_epoch();
let tester = post_merge_tester(num_blocks, num_validators).await;
let harness = &tester.harness;
let mock_el = harness.mock_execution_layer.as_ref().unwrap();
// EL not synced
harness.advance_slot();
mock_el.server.all_payloads_syncing(true);
harness
.extend_chain(
1,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
let status = tester.client.get_node_health().await;
match status {
Ok(response) => {
assert_eq!(response, StatusCode::PARTIAL_CONTENT);
}
Err(_) => {
panic!("should return 206 status code");
}
}
}

View File

@ -8,7 +8,7 @@ use eth2::{
mixin::{RequestAccept, ResponseForkName, ResponseOptional}, mixin::{RequestAccept, ResponseForkName, ResponseOptional},
reqwest::RequestBuilder, reqwest::RequestBuilder,
types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *}, types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *},
BeaconNodeHttpClient, Error, StatusCode, Timeouts, BeaconNodeHttpClient, Error, Timeouts,
}; };
use execution_layer::test_utils::TestingBuilder; use execution_layer::test_utils::TestingBuilder;
use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI; use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI;
@ -160,7 +160,7 @@ impl ApiTester {
// `make_block` adds random graffiti, so this will produce an alternate block // `make_block` adds random graffiti, so this will produce an alternate block
let (reorg_block, _reorg_state) = harness let (reorg_block, _reorg_state) = harness
.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap() + 1)
.await; .await;
let reorg_block = SignedBlockContents::from(reorg_block); let reorg_block = SignedBlockContents::from(reorg_block);
@ -1252,18 +1252,27 @@ impl ApiTester {
} }
pub async fn test_post_beacon_blocks_invalid(mut self) -> Self { pub async fn test_post_beacon_blocks_invalid(mut self) -> Self {
let mut next_block = self.next_block.clone().deconstruct().0; let block = self
*next_block.message_mut().proposer_index_mut() += 1; .harness
.make_block_with_modifier(
self.harness.get_current_state(),
self.harness.get_current_slot(),
|b| {
*b.state_root_mut() = Hash256::zero();
},
)
.await
.0;
assert!(self assert!(self
.client .client
.post_beacon_blocks(&SignedBlockContents::from(next_block)) .post_beacon_blocks(&SignedBlockContents::from(block))
.await .await
.is_err()); .is_err());
assert!( assert!(
self.network_rx.network_recv.recv().await.is_some(), self.network_rx.network_recv.recv().await.is_some(),
"invalid blocks should be sent to network" "gossip valid blocks should be sent to network"
); );
self self
@ -1761,9 +1770,15 @@ impl ApiTester {
} }
pub async fn test_get_node_health(self) -> Self { pub async fn test_get_node_health(self) -> Self {
let status = self.client.get_node_health().await.unwrap(); let status = self.client.get_node_health().await;
assert_eq!(status, StatusCode::OK); match status {
Ok(_) => {
panic!("should return 503 error status code");
}
Err(e) => {
assert_eq!(e.status().unwrap(), 503);
}
}
self self
} }
@ -4142,7 +4157,7 @@ impl ApiTester {
.unwrap(); .unwrap();
let expected_reorg = EventKind::ChainReorg(SseChainReorg { let expected_reorg = EventKind::ChainReorg(SseChainReorg {
slot: self.next_block.signed_block().slot(), slot: self.reorg_block.signed_block().slot(),
depth: 1, depth: 1,
old_head_block: self.next_block.signed_block().canonical_root(), old_head_block: self.next_block.signed_block().canonical_root(),
old_head_state: self.next_block.signed_block().state_root(), old_head_state: self.next_block.signed_block().state_root(),
@ -4156,6 +4171,8 @@ impl ApiTester {
execution_optimistic: false, execution_optimistic: false,
}); });
self.harness.advance_slot();
self.client self.client
.post_beacon_blocks(&self.reorg_block) .post_beacon_blocks(&self.reorg_block)
.await .await

View File

@ -1,7 +1,10 @@
//! ENR extension trait to support libp2p integration. //! ENR extension trait to support libp2p integration.
use crate::{Enr, Multiaddr, PeerId}; use crate::{Enr, Multiaddr, PeerId};
use discv5::enr::{CombinedKey, CombinedPublicKey}; use discv5::enr::{CombinedKey, CombinedPublicKey};
use libp2p::core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol}; use libp2p::{
core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol},
identity::secp256k1,
};
use tiny_keccak::{Hasher, Keccak}; use tiny_keccak::{Hasher, Keccak};
/// Extend ENR for libp2p types. /// Extend ENR for libp2p types.
@ -36,6 +39,8 @@ pub trait CombinedKeyPublicExt {
pub trait CombinedKeyExt { pub trait CombinedKeyExt {
/// Converts a libp2p key into an ENR combined key. /// Converts a libp2p key into an ENR combined key.
fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str>; fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str>;
/// Converts a [`secp256k1::Keypair`] into and Enr [`CombinedKey`].
fn from_secp256k1(key: &secp256k1::Keypair) -> CombinedKey;
} }
impl EnrExt for Enr { impl EnrExt for Enr {
@ -220,12 +225,7 @@ impl CombinedKeyPublicExt for CombinedPublicKey {
impl CombinedKeyExt for CombinedKey { impl CombinedKeyExt for CombinedKey {
fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str> { fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str> {
match key { match key {
Keypair::Secp256k1(key) => { Keypair::Secp256k1(key) => Ok(CombinedKey::from_secp256k1(key)),
let secret =
discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes())
.expect("libp2p key must be valid");
Ok(CombinedKey::Secp256k1(secret))
}
Keypair::Ed25519(key) => { Keypair::Ed25519(key) => {
let ed_keypair = discv5::enr::ed25519_dalek::SigningKey::from_bytes( let ed_keypair = discv5::enr::ed25519_dalek::SigningKey::from_bytes(
&(key.encode()[..32]) &(key.encode()[..32])
@ -237,6 +237,11 @@ impl CombinedKeyExt for CombinedKey {
Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"), Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"),
} }
} }
fn from_secp256k1(key: &secp256k1::Keypair) -> Self {
let secret = discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes())
.expect("libp2p key must be valid");
CombinedKey::Secp256k1(secret)
}
} }
// helper function to convert a peer_id to a node_id. This is only possible for secp256k1/ed25519 libp2p // helper function to convert a peer_id to a node_id. This is only possible for secp256k1/ed25519 libp2p

View File

@ -1101,6 +1101,7 @@ mod tests {
use super::*; use super::*;
use crate::rpc::methods::{MetaData, MetaDataV2}; use crate::rpc::methods::{MetaData, MetaDataV2};
use enr::EnrBuilder; use enr::EnrBuilder;
use libp2p::identity::secp256k1;
use slog::{o, Drain}; use slog::{o, Drain};
use types::{BitVector, MinimalEthSpec, SubnetId}; use types::{BitVector, MinimalEthSpec, SubnetId};
@ -1119,10 +1120,10 @@ mod tests {
} }
async fn build_discovery() -> Discovery<E> { async fn build_discovery() -> Discovery<E> {
let keypair = libp2p::identity::Keypair::generate_secp256k1(); let keypair = secp256k1::Keypair::generate();
let mut config = NetworkConfig::default(); let mut config = NetworkConfig::default();
config.set_listening_addr(crate::ListenAddress::unused_v4_ports()); config.set_listening_addr(crate::ListenAddress::unused_v4_ports());
let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair);
let enr: Enr = build_enr::<E>(&enr_key, &config, &EnrForkId::default()).unwrap(); let enr: Enr = build_enr::<E>(&enr_key, &config, &EnrForkId::default()).unwrap();
let log = build_log(slog::Level::Debug, false); let log = build_log(slog::Level::Debug, false);
let globals = NetworkGlobals::new( let globals = NetworkGlobals::new(
@ -1138,6 +1139,7 @@ mod tests {
false, false,
&log, &log,
); );
let keypair = Keypair::Secp256k1(keypair);
Discovery::new(&keypair, &config, Arc::new(globals), &log) Discovery::new(&keypair, &config, Arc::new(globals), &log)
.await .await
.unwrap() .unwrap()
@ -1184,8 +1186,8 @@ mod tests {
fn make_enr(subnet_ids: Vec<usize>) -> Enr { fn make_enr(subnet_ids: Vec<usize>) -> Enr {
let mut builder = EnrBuilder::new("v4"); let mut builder = EnrBuilder::new("v4");
let keypair = libp2p::identity::Keypair::generate_secp256k1(); let keypair = secp256k1::Keypair::generate();
let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair);
// set the "attnets" field on our ENR // set the "attnets" field on our ENR
let mut bitfield = BitVector::<ssz_types::typenum::U64>::new(); let mut bitfield = BitVector::<ssz_types::typenum::U64>::new();

View File

@ -134,9 +134,8 @@ impl<TSpec: EthSpec> NetworkGlobals<TSpec> {
log: &slog::Logger, log: &slog::Logger,
) -> NetworkGlobals<TSpec> { ) -> NetworkGlobals<TSpec> {
use crate::CombinedKeyExt; use crate::CombinedKeyExt;
let keypair = libp2p::identity::Keypair::generate_secp256k1(); let keypair = libp2p::identity::secp256k1::Keypair::generate();
let enr_key: discv5::enr::CombinedKey = let enr_key: discv5::enr::CombinedKey = discv5::enr::CombinedKey::from_secp256k1(&keypair);
discv5::enr::CombinedKey::from_libp2p(&keypair).unwrap();
let enr = discv5::enr::EnrBuilder::new("v4").build(&enr_key).unwrap(); let enr = discv5::enr::EnrBuilder::new("v4").build(&enr_key).unwrap();
NetworkGlobals::new( NetworkGlobals::new(
enr, enr,

View File

@ -11,7 +11,6 @@ matches = "0.1.8"
exit-future = "0.2.0" exit-future = "0.2.0"
slog-term = "2.6.0" slog-term = "2.6.0"
slog-async = "2.5.0" slog-async = "2.5.0"
environment = { path = "../../lighthouse/environment" }
[dependencies] [dependencies]
beacon_chain = { path = "../beacon_chain" } beacon_chain = { path = "../beacon_chain" }
@ -47,6 +46,9 @@ delay_map = "0.3.0"
ethereum-types = { version = "0.14.1", optional = true } ethereum-types = { version = "0.14.1", optional = true }
operation_pool = { path = "../operation_pool" } operation_pool = { path = "../operation_pool" }
execution_layer = { path = "../execution_layer" } execution_layer = { path = "../execution_layer" }
beacon_processor = { path = "../beacon_processor" }
parking_lot = "0.12.0"
environment = { path = "../../lighthouse/environment" }
[features] [features]
fork_from_env = ["beacon_chain/fork_from_env"] fork_from_env = ["beacon_chain/fork_from_env"]

View File

@ -1,51 +0,0 @@
use super::work_reprocessing_queue::ReprocessQueueMessage;
use crate::{service::NetworkMessage, sync::SyncMessage};
use beacon_chain::{BeaconChain, BeaconChainTypes};
use slog::{debug, Logger};
use std::sync::Arc;
use tokio::sync::mpsc;
mod gossip_methods;
mod rpc_methods;
mod sync_methods;
pub use gossip_methods::{GossipAggregatePackage, GossipAttestationPackage};
pub use sync_methods::ChainSegmentProcessId;
pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1;
/// Contains the context necessary to import blocks, attestations, etc to the beacon chain.
pub struct Worker<T: BeaconChainTypes> {
pub chain: Arc<BeaconChain<T>>,
pub network_tx: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
pub sync_tx: mpsc::UnboundedSender<SyncMessage<T::EthSpec>>,
pub log: Logger,
}
impl<T: BeaconChainTypes> Worker<T> {
/// Send a message to `sync_tx`.
///
/// Creates a log if there is an internal error.
fn send_sync_message(&self, message: SyncMessage<T::EthSpec>) {
self.sync_tx.send(message).unwrap_or_else(|e| {
debug!(self.log, "Could not send message to the sync service";
"error" => %e)
});
}
/// Send a message to `network_tx`.
///
/// Creates a log if there is an internal error.
fn send_network_message(&self, message: NetworkMessage<T::EthSpec>) {
self.network_tx.send(message).unwrap_or_else(|e| {
debug!(self.log, "Could not send message to the network service. Likely shutdown";
"error" => %e)
});
}
}
/// Contains the necessary items for a worker to do their job.
pub struct Toolbox<T: BeaconChainTypes> {
pub idle_tx: mpsc::Sender<()>,
pub work_reprocessing_tx: mpsc::Sender<ReprocessQueueMessage<T>>,
}

View File

@ -6,10 +6,10 @@ pub mod error;
#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy
pub mod service; pub mod service;
mod beacon_processor;
#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy
mod metrics; mod metrics;
mod nat; mod nat;
mod network_beacon_processor;
mod persisted_dht; mod persisted_dht;
mod router; mod router;
mod status; mod status;

View File

@ -49,47 +49,8 @@ lazy_static! {
/* /*
* Gossip processor * Gossip processor
*/ */
pub static ref BEACON_PROCESSOR_WORK_EVENTS_RX_COUNT: Result<IntCounterVec> = try_create_int_counter_vec(
"beacon_processor_work_events_rx_count",
"Count of work events received (but not necessarily processed)",
&["type"]
);
pub static ref BEACON_PROCESSOR_WORK_EVENTS_IGNORED_COUNT: Result<IntCounterVec> = try_create_int_counter_vec(
"beacon_processor_work_events_ignored_count",
"Count of work events purposefully ignored",
&["type"]
);
pub static ref BEACON_PROCESSOR_WORK_EVENTS_STARTED_COUNT: Result<IntCounterVec> = try_create_int_counter_vec(
"beacon_processor_work_events_started_count",
"Count of work events which have been started by a worker",
&["type"]
);
pub static ref BEACON_PROCESSOR_WORKER_TIME: Result<HistogramVec> = try_create_histogram_vec(
"beacon_processor_worker_time",
"Time taken for a worker to fully process some parcel of work.",
&["type"]
);
pub static ref BEACON_PROCESSOR_WORKERS_SPAWNED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_workers_spawned_total",
"The number of workers ever spawned by the gossip processing pool."
);
pub static ref BEACON_PROCESSOR_WORKERS_ACTIVE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_workers_active_total",
"Count of active workers in the gossip processing pool."
);
pub static ref BEACON_PROCESSOR_IDLE_EVENTS_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_idle_events_total",
"Count of idle events processed by the gossip processor manager."
);
pub static ref BEACON_PROCESSOR_EVENT_HANDLING_SECONDS: Result<Histogram> = try_create_histogram(
"beacon_processor_event_handling_seconds",
"Time spent handling a new message and allocating it to a queue or worker."
);
// Gossip blocks. // Gossip blocks.
pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_gossip_block_queue_total",
"Count of blocks from gossip waiting to be verified."
);
pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter( pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_gossip_block_verified_total", "beacon_processor_gossip_block_verified_total",
"Total number of gossip blocks verified for propagation." "Total number of gossip blocks verified for propagation."
@ -106,11 +67,6 @@ lazy_static! {
"beacon_processor_gossip_block_early_seconds", "beacon_processor_gossip_block_early_seconds",
"Whenever a gossip block is received early this metrics is set to how early that block was." "Whenever a gossip block is received early this metrics is set to how early that block was."
); );
// Gossip blobs.
pub static ref BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_gossip_blob_queue_total",
"Count of blocks from gossip waiting to be verified."
);
pub static ref BEACON_PROCESSOR_GOSSIP_BLOB_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter( pub static ref BEACON_PROCESSOR_GOSSIP_BLOB_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_gossip_blob_verified_total", "beacon_processor_gossip_blob_verified_total",
"Total number of gossip blob verified for propagation." "Total number of gossip blob verified for propagation."
@ -120,10 +76,6 @@ lazy_static! {
"Total number of gossip blobs imported to fork choice, etc." "Total number of gossip blobs imported to fork choice, etc."
); );
// Gossip Exits. // Gossip Exits.
pub static ref BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_exit_queue_total",
"Count of exits from gossip waiting to be verified."
);
pub static ref BEACON_PROCESSOR_EXIT_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter( pub static ref BEACON_PROCESSOR_EXIT_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_exit_verified_total", "beacon_processor_exit_verified_total",
"Total number of voluntary exits verified for propagation." "Total number of voluntary exits verified for propagation."
@ -133,10 +85,6 @@ lazy_static! {
"Total number of voluntary exits imported to the op pool." "Total number of voluntary exits imported to the op pool."
); );
// Gossip proposer slashings. // Gossip proposer slashings.
pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_proposer_slashing_queue_total",
"Count of proposer slashings from gossip waiting to be verified."
);
pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter( pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_proposer_slashing_verified_total", "beacon_processor_proposer_slashing_verified_total",
"Total number of proposer slashings verified for propagation." "Total number of proposer slashings verified for propagation."
@ -146,10 +94,6 @@ lazy_static! {
"Total number of proposer slashings imported to the op pool." "Total number of proposer slashings imported to the op pool."
); );
// Gossip attester slashings. // Gossip attester slashings.
pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_attester_slashing_queue_total",
"Count of attester slashings from gossip waiting to be verified."
);
pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter( pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_attester_slashing_verified_total", "beacon_processor_attester_slashing_verified_total",
"Total number of attester slashings verified for propagation." "Total number of attester slashings verified for propagation."
@ -159,10 +103,6 @@ lazy_static! {
"Total number of attester slashings imported to the op pool." "Total number of attester slashings imported to the op pool."
); );
// Gossip BLS to execution changes. // Gossip BLS to execution changes.
pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_bls_to_execution_change_queue_total",
"Count of address changes from gossip waiting to be verified."
);
pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter( pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_bls_to_execution_change_verified_total", "beacon_processor_bls_to_execution_change_verified_total",
"Total number of address changes verified for propagation." "Total number of address changes verified for propagation."
@ -171,33 +111,20 @@ lazy_static! {
"beacon_processor_bls_to_execution_change_imported_total", "beacon_processor_bls_to_execution_change_imported_total",
"Total number of address changes imported to the op pool." "Total number of address changes imported to the op pool."
); );
}
// Need to split up this `lazy_static!` due to recursion limits.
lazy_static! {
// Rpc blocks. // Rpc blocks.
pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_rpc_block_queue_total",
"Count of blocks from the rpc waiting to be verified."
);
pub static ref BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL: Result<IntCounter> = try_create_int_counter( pub static ref BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_rpc_block_imported_total", "beacon_processor_rpc_block_imported_total",
"Total number of gossip blocks imported to fork choice, etc." "Total number of gossip blocks imported to fork choice, etc."
); );
// Rpc blobs.
pub static ref BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_rpc_blob_queue_total",
"Count of blobs from the rpc waiting to be verified."
);
pub static ref BEACON_PROCESSOR_RPC_BLOB_IMPORTED_TOTAL: Result<IntCounter> = try_create_int_counter( pub static ref BEACON_PROCESSOR_RPC_BLOB_IMPORTED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_rpc_blob_imported_total", "beacon_processor_rpc_blob_imported_total",
"Total number of gossip blobs imported." "Total number of gossip blobs imported."
); );
// Chain segments. // Chain segments.
pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_chain_segment_queue_total",
"Count of chain segments from the rpc waiting to be verified."
);
pub static ref BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_backfill_chain_segment_queue_total",
"Count of backfill chain segments from the rpc waiting to be verified."
);
pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL: Result<IntCounter> = try_create_int_counter( pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_chain_segment_success_total", "beacon_processor_chain_segment_success_total",
"Total number of chain segments successfully processed." "Total number of chain segments successfully processed."
@ -215,10 +142,6 @@ lazy_static! {
"Total number of backfill chain segments that failed processing." "Total number of backfill chain segments that failed processing."
); );
// Unaggregated attestations. // Unaggregated attestations.
pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_unaggregated_attestation_queue_total",
"Count of unagg. attestations waiting to be processed."
);
pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter( pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_unaggregated_attestation_verified_total", "beacon_processor_unaggregated_attestation_verified_total",
"Total number of unaggregated attestations verified for gossip." "Total number of unaggregated attestations verified for gossip."
@ -232,10 +155,6 @@ lazy_static! {
"Total number of unaggregated attestations that referenced an unknown block and were re-queued." "Total number of unaggregated attestations that referenced an unknown block and were re-queued."
); );
// Aggregated attestations. // Aggregated attestations.
pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_aggregated_attestation_queue_total",
"Count of agg. attestations waiting to be processed."
);
pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter( pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_aggregated_attestation_verified_total", "beacon_processor_aggregated_attestation_verified_total",
"Total number of aggregated attestations verified for gossip." "Total number of aggregated attestations verified for gossip."
@ -249,10 +168,6 @@ lazy_static! {
"Total number of aggregated attestations that referenced an unknown block and were re-queued." "Total number of aggregated attestations that referenced an unknown block and were re-queued."
); );
// Sync committee messages. // Sync committee messages.
pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_sync_message_queue_total",
"Count of sync committee messages waiting to be processed."
);
pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter( pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_sync_message_verified_total", "beacon_processor_sync_message_verified_total",
"Total number of sync committee messages verified for gossip." "Total number of sync committee messages verified for gossip."
@ -262,10 +177,6 @@ lazy_static! {
"Total number of sync committee messages imported to fork choice, etc." "Total number of sync committee messages imported to fork choice, etc."
); );
// Sync contribution. // Sync contribution.
pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_processor_sync_contribution_queue_total",
"Count of sync committee contributions waiting to be processed."
);
pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter( pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter(
"beacon_processor_sync_contribution_verified_total", "beacon_processor_sync_contribution_verified_total",
"Total number of sync committee contributions verified for gossip." "Total number of sync committee contributions verified for gossip."
@ -416,35 +327,9 @@ lazy_static! {
"Count of times when a gossip blob arrived from the network later than the attestation deadline.", "Count of times when a gossip blob arrived from the network later than the attestation deadline.",
); );
/*
* Attestation reprocessing queue metrics.
*/
pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL: Result<IntGaugeVec> =
try_create_int_gauge_vec(
"beacon_processor_reprocessing_queue_total",
"Count of items in a reprocessing queue.",
&["type"]
);
pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS: Result<IntCounter> = try_create_int_counter(
"beacon_processor_reprocessing_queue_expired_attestations",
"Number of queued attestations which have expired before a matching block has been found."
);
pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS: Result<IntCounter> = try_create_int_counter(
"beacon_processor_reprocessing_queue_matched_attestations",
"Number of queued attestations where as matching block has been imported."
);
/* /*
* Light client update reprocessing queue metrics. * Light client update reprocessing queue metrics.
*/ */
pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES: Result<IntCounter> = try_create_int_counter(
"beacon_processor_reprocessing_queue_expired_optimistic_updates",
"Number of queued light client optimistic updates which have expired before a matching block has been found."
);
pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES: Result<IntCounter> = try_create_int_counter(
"beacon_processor_reprocessing_queue_matched_optimistic_updates",
"Number of queued light client optimistic updates where as matching block has been imported."
);
pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES: Result<IntCounter> = try_create_int_counter( pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES: Result<IntCounter> = try_create_int_counter(
"beacon_processor_reprocessing_queue_sent_optimistic_updates", "beacon_processor_reprocessing_queue_sent_optimistic_updates",
"Number of queued light client optimistic updates where as matching block has been imported." "Number of queued light client optimistic updates where as matching block has been imported."

View File

@ -1,6 +1,12 @@
use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use crate::{
metrics,
network_beacon_processor::{InvalidBlockStorage, NetworkBeaconProcessor},
service::NetworkMessage,
sync::SyncMessage,
};
use beacon_chain::blob_verification::{AsBlock, BlobError, BlockWrapper, GossipVerifiedBlob}; use beacon_chain::blob_verification::AsBlock;
use beacon_chain::blob_verification::{BlobError, GossipVerifiedBlob};
use beacon_chain::store::Error; use beacon_chain::store::Error;
use beacon_chain::{ use beacon_chain::{
attestation_verification::{self, Error as AttnError, VerifiedAttestation}, attestation_verification::{self, Error as AttnError, VerifiedAttestation},
@ -20,6 +26,7 @@ use ssz::Encode;
use std::fs; use std::fs;
use std::io::Write; use std::io::Write;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH}; use std::time::{Duration, SystemTime, UNIX_EPOCH};
use store::hot_cold_store::HotColdDBError; use store::hot_cold_store::HotColdDBError;
use tokio::sync::mpsc; use tokio::sync::mpsc;
@ -30,14 +37,13 @@ use types::{
Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId,
}; };
use super::{ use beacon_processor::{
super::work_reprocessing_queue::{ work_reprocessing_queue::{
QueuedAggregate, QueuedGossipBlock, QueuedLightClientUpdate, QueuedUnaggregate, QueuedAggregate, QueuedGossipBlock, QueuedLightClientUpdate, QueuedUnaggregate,
ReprocessQueueMessage, ReprocessQueueMessage,
}, },
Worker, DuplicateCache, GossipAggregatePackage, GossipAttestationPackage,
}; };
use crate::beacon_processor::{DuplicateCache, InvalidBlockStorage};
/// Set to `true` to introduce stricter penalties for peers who send some types of late consensus /// Set to `true` to introduce stricter penalties for peers who send some types of late consensus
/// messages. /// messages.
@ -144,65 +150,7 @@ impl<T: EthSpec> FailedAtt<T> {
} }
} }
/// Items required to verify a batch of unaggregated gossip attestations. impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
#[derive(Debug)]
pub struct GossipAttestationPackage<E: EthSpec> {
message_id: MessageId,
peer_id: PeerId,
attestation: Box<Attestation<E>>,
subnet_id: SubnetId,
should_import: bool,
seen_timestamp: Duration,
}
impl<E: EthSpec> GossipAttestationPackage<E> {
pub fn new(
message_id: MessageId,
peer_id: PeerId,
attestation: Box<Attestation<E>>,
subnet_id: SubnetId,
should_import: bool,
seen_timestamp: Duration,
) -> Self {
Self {
message_id,
peer_id,
attestation,
subnet_id,
should_import,
seen_timestamp,
}
}
}
/// Items required to verify a batch of aggregated gossip attestations.
#[derive(Debug)]
pub struct GossipAggregatePackage<E: EthSpec> {
message_id: MessageId,
peer_id: PeerId,
aggregate: Box<SignedAggregateAndProof<E>>,
beacon_block_root: Hash256,
seen_timestamp: Duration,
}
impl<E: EthSpec> GossipAggregatePackage<E> {
pub fn new(
message_id: MessageId,
peer_id: PeerId,
aggregate: Box<SignedAggregateAndProof<E>>,
seen_timestamp: Duration,
) -> Self {
Self {
message_id,
peer_id,
beacon_block_root: aggregate.message.aggregate.data.beacon_block_root,
aggregate,
seen_timestamp,
}
}
}
impl<T: BeaconChainTypes> Worker<T> {
/* Auxiliary functions */ /* Auxiliary functions */
/// Penalizes a peer for misbehaviour. /// Penalizes a peer for misbehaviour.
@ -245,13 +193,13 @@ impl<T: BeaconChainTypes> Worker<T> {
/// Raises a log if there are errors. /// Raises a log if there are errors.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn process_gossip_attestation( pub fn process_gossip_attestation(
self, self: Arc<Self>,
message_id: MessageId, message_id: MessageId,
peer_id: PeerId, peer_id: PeerId,
attestation: Box<Attestation<T::EthSpec>>, attestation: Box<Attestation<T::EthSpec>>,
subnet_id: SubnetId, subnet_id: SubnetId,
should_import: bool, should_import: bool,
reprocess_tx: Option<mpsc::Sender<ReprocessQueueMessage<T>>>, reprocess_tx: Option<mpsc::Sender<ReprocessQueueMessage>>,
seen_timestamp: Duration, seen_timestamp: Duration,
) { ) {
let result = match self let result = match self
@ -277,9 +225,9 @@ impl<T: BeaconChainTypes> Worker<T> {
} }
pub fn process_gossip_attestation_batch( pub fn process_gossip_attestation_batch(
self, self: Arc<Self>,
packages: Vec<GossipAttestationPackage<T::EthSpec>>, packages: Vec<GossipAttestationPackage<T::EthSpec>>,
reprocess_tx: Option<mpsc::Sender<ReprocessQueueMessage<T>>>, reprocess_tx: Option<mpsc::Sender<ReprocessQueueMessage>>,
) { ) {
let attestations_and_subnets = packages let attestations_and_subnets = packages
.iter() .iter()
@ -348,12 +296,12 @@ impl<T: BeaconChainTypes> Worker<T> {
// cant' be mixed-up) and creating a struct would result in more complexity. // cant' be mixed-up) and creating a struct would result in more complexity.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
fn process_gossip_attestation_result( fn process_gossip_attestation_result(
&self, self: &Arc<Self>,
result: Result<VerifiedUnaggregate<T>, RejectedUnaggregate<T::EthSpec>>, result: Result<VerifiedUnaggregate<T>, RejectedUnaggregate<T::EthSpec>>,
message_id: MessageId, message_id: MessageId,
peer_id: PeerId, peer_id: PeerId,
subnet_id: SubnetId, subnet_id: SubnetId,
reprocess_tx: Option<mpsc::Sender<ReprocessQueueMessage<T>>>, reprocess_tx: Option<mpsc::Sender<ReprocessQueueMessage>>,
should_import: bool, should_import: bool,
seen_timestamp: Duration, seen_timestamp: Duration,
) { ) {
@ -456,11 +404,11 @@ impl<T: BeaconChainTypes> Worker<T> {
/// ///
/// Raises a log if there are errors. /// Raises a log if there are errors.
pub fn process_gossip_aggregate( pub fn process_gossip_aggregate(
self, self: Arc<Self>,
message_id: MessageId, message_id: MessageId,
peer_id: PeerId, peer_id: PeerId,
aggregate: Box<SignedAggregateAndProof<T::EthSpec>>, aggregate: Box<SignedAggregateAndProof<T::EthSpec>>,
reprocess_tx: Option<mpsc::Sender<ReprocessQueueMessage<T>>>, reprocess_tx: Option<mpsc::Sender<ReprocessQueueMessage>>,
seen_timestamp: Duration, seen_timestamp: Duration,
) { ) {
let beacon_block_root = aggregate.message.aggregate.data.beacon_block_root; let beacon_block_root = aggregate.message.aggregate.data.beacon_block_root;
@ -490,9 +438,9 @@ impl<T: BeaconChainTypes> Worker<T> {
} }
pub fn process_gossip_aggregate_batch( pub fn process_gossip_aggregate_batch(
self, self: Arc<Self>,
packages: Vec<GossipAggregatePackage<T::EthSpec>>, packages: Vec<GossipAggregatePackage<T::EthSpec>>,
reprocess_tx: Option<mpsc::Sender<ReprocessQueueMessage<T>>>, reprocess_tx: Option<mpsc::Sender<ReprocessQueueMessage>>,
) { ) {
let aggregates = packages.iter().map(|package| package.aggregate.as_ref()); let aggregates = packages.iter().map(|package| package.aggregate.as_ref());
@ -555,12 +503,12 @@ impl<T: BeaconChainTypes> Worker<T> {
} }
fn process_gossip_aggregate_result( fn process_gossip_aggregate_result(
&self, self: &Arc<Self>,
result: Result<VerifiedAggregate<T>, RejectedAggregate<T::EthSpec>>, result: Result<VerifiedAggregate<T>, RejectedAggregate<T::EthSpec>>,
beacon_block_root: Hash256, beacon_block_root: Hash256,
message_id: MessageId, message_id: MessageId,
peer_id: PeerId, peer_id: PeerId,
reprocess_tx: Option<mpsc::Sender<ReprocessQueueMessage<T>>>, reprocess_tx: Option<mpsc::Sender<ReprocessQueueMessage>>,
seen_timestamp: Duration, seen_timestamp: Duration,
) { ) {
match result { match result {
@ -653,7 +601,7 @@ impl<T: BeaconChainTypes> Worker<T> {
// TODO: docs // TODO: docs
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub async fn process_gossip_blob( pub async fn process_gossip_blob(
self, self: &Arc<Self>,
message_id: MessageId, message_id: MessageId,
peer_id: PeerId, peer_id: PeerId,
_peer_client: Client, _peer_client: Client,
@ -752,15 +700,15 @@ impl<T: BeaconChainTypes> Worker<T> {
} }
pub async fn process_gossip_verified_blob( pub async fn process_gossip_verified_blob(
self, self: &Arc<Self>,
peer_id: PeerId, peer_id: PeerId,
verified_blob: GossipVerifiedBlob<T::EthSpec>, verified_blob: GossipVerifiedBlob<T>,
// This value is not used presently, but it might come in handy for debugging. // This value is not used presently, but it might come in handy for debugging.
_seen_duration: Duration, _seen_duration: Duration,
) { ) {
let blob_root = verified_blob.block_root(); let blob_root = verified_blob.block_root();
let blob_slot = verified_blob.slot(); let blob_slot = verified_blob.slot();
let blob_clone = verified_blob.clone().to_blob(); let blob_index = verified_blob.id().index;
match self.chain.process_blob(verified_blob).await { match self.chain.process_blob(verified_blob).await {
Ok(AvailabilityProcessingStatus::Imported(_hash)) => { Ok(AvailabilityProcessingStatus::Imported(_hash)) => {
//TODO(sean) add metrics and logging //TODO(sean) add metrics and logging
@ -778,7 +726,7 @@ impl<T: BeaconChainTypes> Worker<T> {
"outcome" => ?err, "outcome" => ?err,
"block root" => ?blob_root, "block root" => ?blob_root,
"block slot" => blob_slot, "block slot" => blob_slot,
"blob index" => blob_clone.index, "blob index" => blob_index,
); );
self.gossip_penalize_peer( self.gossip_penalize_peer(
peer_id, peer_id,
@ -788,7 +736,6 @@ impl<T: BeaconChainTypes> Worker<T> {
trace!( trace!(
self.log, self.log,
"Invalid gossip blob ssz"; "Invalid gossip blob ssz";
"ssz" => format_args!("0x{}", hex::encode(blob_clone.as_ssz_bytes())),
); );
} }
} }
@ -803,12 +750,12 @@ impl<T: BeaconChainTypes> Worker<T> {
/// Raises a log if there are errors. /// Raises a log if there are errors.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub async fn process_gossip_block( pub async fn process_gossip_block(
self, self: Arc<Self>,
message_id: MessageId, message_id: MessageId,
peer_id: PeerId, peer_id: PeerId,
peer_client: Client, peer_client: Client,
block: BlockWrapper<T::EthSpec>, block: Arc<SignedBeaconBlock<T::EthSpec>>,
reprocess_tx: mpsc::Sender<ReprocessQueueMessage<T>>, reprocess_tx: mpsc::Sender<ReprocessQueueMessage>,
duplicate_cache: DuplicateCache, duplicate_cache: DuplicateCache,
invalid_block_storage: InvalidBlockStorage, invalid_block_storage: InvalidBlockStorage,
seen_duration: Duration, seen_duration: Duration,
@ -852,12 +799,12 @@ impl<T: BeaconChainTypes> Worker<T> {
/// ///
/// Returns the `GossipVerifiedBlock` if verification passes and raises a log if there are errors. /// Returns the `GossipVerifiedBlock` if verification passes and raises a log if there are errors.
pub async fn process_gossip_unverified_block( pub async fn process_gossip_unverified_block(
&self, self: &Arc<Self>,
message_id: MessageId, message_id: MessageId,
peer_id: PeerId, peer_id: PeerId,
peer_client: Client, peer_client: Client,
block: BlockWrapper<T::EthSpec>, block: Arc<SignedBeaconBlock<T::EthSpec>>,
reprocess_tx: mpsc::Sender<ReprocessQueueMessage<T>>, reprocess_tx: mpsc::Sender<ReprocessQueueMessage>,
seen_duration: Duration, seen_duration: Duration,
) -> Option<GossipVerifiedBlock<T>> { ) -> Option<GossipVerifiedBlock<T>> {
let block_delay = let block_delay =
@ -881,7 +828,7 @@ impl<T: BeaconChainTypes> Worker<T> {
let block_root = if let Ok(verified_block) = &verification_result { let block_root = if let Ok(verified_block) = &verification_result {
verified_block.block_root verified_block.block_root
} else { } else {
block.as_block().canonical_root() block.canonical_root()
}; };
// Write the time the block was observed into delay cache. // Write the time the block was observed into delay cache.
@ -929,6 +876,20 @@ impl<T: BeaconChainTypes> Worker<T> {
verified_block verified_block
} }
Err(e @ BlockError::Slashable) => {
warn!(
self.log,
"Received equivocating block from peer";
"error" => ?e
);
/* punish peer for submitting an equivocation, but not too harshly as honest peers may conceivably forward equivocating blocks to us from time to time */
self.gossip_penalize_peer(
peer_id,
PeerAction::MidToleranceError,
"gossip_block_mid",
);
return None;
}
Err(BlockError::ParentUnknown(block)) => { Err(BlockError::ParentUnknown(block)) => {
debug!( debug!(
self.log, self.log,
@ -950,7 +911,6 @@ impl<T: BeaconChainTypes> Worker<T> {
Err(e @ BlockError::FutureSlot { .. }) Err(e @ BlockError::FutureSlot { .. })
| Err(e @ BlockError::WouldRevertFinalizedSlot { .. }) | Err(e @ BlockError::WouldRevertFinalizedSlot { .. })
| Err(e @ BlockError::BlockIsAlreadyKnown) | Err(e @ BlockError::BlockIsAlreadyKnown)
| Err(e @ BlockError::RepeatProposal { .. })
| Err(e @ BlockError::NotFinalizedDescendant { .. }) => { | Err(e @ BlockError::NotFinalizedDescendant { .. }) => {
debug!(self.log, "Could not verify block for gossip. Ignoring the block"; debug!(self.log, "Could not verify block for gossip. Ignoring the block";
"error" => %e); "error" => %e);
@ -1053,11 +1013,25 @@ impl<T: BeaconChainTypes> Worker<T> {
metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_REQUEUED_TOTAL); metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_REQUEUED_TOTAL);
let inner_self = self.clone();
let process_fn = Box::pin(async move {
let reprocess_tx = inner_self.reprocess_tx.clone();
let invalid_block_storage = inner_self.invalid_block_storage.clone();
inner_self
.process_gossip_verified_block(
peer_id,
verified_block,
reprocess_tx,
invalid_block_storage,
seen_duration,
)
.await;
});
if reprocess_tx if reprocess_tx
.try_send(ReprocessQueueMessage::EarlyBlock(QueuedGossipBlock { .try_send(ReprocessQueueMessage::EarlyBlock(QueuedGossipBlock {
peer_id, beacon_block_slot: block_slot,
block: Box::new(verified_block), beacon_block_root: block_root,
seen_timestamp: seen_duration, process_fn,
})) }))
.is_err() .is_err()
{ {
@ -1090,10 +1064,10 @@ impl<T: BeaconChainTypes> Worker<T> {
/// ///
/// Raises a log if there are errors. /// Raises a log if there are errors.
pub async fn process_gossip_verified_block( pub async fn process_gossip_verified_block(
self, self: Arc<Self>,
peer_id: PeerId, peer_id: PeerId,
verified_block: GossipVerifiedBlock<T>, verified_block: GossipVerifiedBlock<T>,
reprocess_tx: mpsc::Sender<ReprocessQueueMessage<T>>, reprocess_tx: mpsc::Sender<ReprocessQueueMessage>,
invalid_block_storage: InvalidBlockStorage, invalid_block_storage: InvalidBlockStorage,
// This value is not used presently, but it might come in handy for debugging. // This value is not used presently, but it might come in handy for debugging.
_seen_duration: Duration, _seen_duration: Duration,
@ -1103,7 +1077,12 @@ impl<T: BeaconChainTypes> Worker<T> {
let result = self let result = self
.chain .chain
.process_block(block_root, verified_block, NotifyExecutionLayer::Yes) .process_block(
block_root,
verified_block,
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await; .await;
match &result { match &result {
@ -1196,7 +1175,7 @@ impl<T: BeaconChainTypes> Worker<T> {
} }
pub fn process_gossip_voluntary_exit( pub fn process_gossip_voluntary_exit(
self, self: &Arc<Self>,
message_id: MessageId, message_id: MessageId,
peer_id: PeerId, peer_id: PeerId,
voluntary_exit: SignedVoluntaryExit, voluntary_exit: SignedVoluntaryExit,
@ -1254,7 +1233,7 @@ impl<T: BeaconChainTypes> Worker<T> {
} }
pub fn process_gossip_proposer_slashing( pub fn process_gossip_proposer_slashing(
self, self: &Arc<Self>,
message_id: MessageId, message_id: MessageId,
peer_id: PeerId, peer_id: PeerId,
proposer_slashing: ProposerSlashing, proposer_slashing: ProposerSlashing,
@ -1316,7 +1295,7 @@ impl<T: BeaconChainTypes> Worker<T> {
} }
pub fn process_gossip_attester_slashing( pub fn process_gossip_attester_slashing(
self, self: &Arc<Self>,
message_id: MessageId, message_id: MessageId,
peer_id: PeerId, peer_id: PeerId,
attester_slashing: AttesterSlashing<T::EthSpec>, attester_slashing: AttesterSlashing<T::EthSpec>,
@ -1370,7 +1349,7 @@ impl<T: BeaconChainTypes> Worker<T> {
} }
pub fn process_gossip_bls_to_execution_change( pub fn process_gossip_bls_to_execution_change(
self, self: &Arc<Self>,
message_id: MessageId, message_id: MessageId,
peer_id: PeerId, peer_id: PeerId,
bls_to_execution_change: SignedBlsToExecutionChange, bls_to_execution_change: SignedBlsToExecutionChange,
@ -1453,7 +1432,7 @@ impl<T: BeaconChainTypes> Worker<T> {
/// ///
/// Raises a log if there are errors. /// Raises a log if there are errors.
pub fn process_gossip_sync_committee_signature( pub fn process_gossip_sync_committee_signature(
self, self: &Arc<Self>,
message_id: MessageId, message_id: MessageId,
peer_id: PeerId, peer_id: PeerId,
sync_signature: SyncCommitteeMessage, sync_signature: SyncCommitteeMessage,
@ -1516,7 +1495,7 @@ impl<T: BeaconChainTypes> Worker<T> {
/// ///
/// Raises a log if there are errors. /// Raises a log if there are errors.
pub fn process_sync_committee_contribution( pub fn process_sync_committee_contribution(
self, self: &Arc<Self>,
message_id: MessageId, message_id: MessageId,
peer_id: PeerId, peer_id: PeerId,
sync_contribution: SignedContributionAndProof<T::EthSpec>, sync_contribution: SignedContributionAndProof<T::EthSpec>,
@ -1571,7 +1550,7 @@ impl<T: BeaconChainTypes> Worker<T> {
} }
pub fn process_gossip_finality_update( pub fn process_gossip_finality_update(
self, self: &Arc<Self>,
message_id: MessageId, message_id: MessageId,
peer_id: PeerId, peer_id: PeerId,
light_client_finality_update: LightClientFinalityUpdate<T::EthSpec>, light_client_finality_update: LightClientFinalityUpdate<T::EthSpec>,
@ -1637,11 +1616,11 @@ impl<T: BeaconChainTypes> Worker<T> {
} }
pub fn process_gossip_optimistic_update( pub fn process_gossip_optimistic_update(
self, self: &Arc<Self>,
message_id: MessageId, message_id: MessageId,
peer_id: PeerId, peer_id: PeerId,
light_client_optimistic_update: LightClientOptimisticUpdate<T::EthSpec>, light_client_optimistic_update: LightClientOptimisticUpdate<T::EthSpec>,
reprocess_tx: Option<mpsc::Sender<ReprocessQueueMessage<T>>>, reprocess_tx: Option<mpsc::Sender<ReprocessQueueMessage>>,
seen_timestamp: Duration, seen_timestamp: Duration,
) { ) {
match self.chain.verify_optimistic_update_for_gossip( match self.chain.verify_optimistic_update_for_gossip(
@ -1672,15 +1651,19 @@ impl<T: BeaconChainTypes> Worker<T> {
); );
if let Some(sender) = reprocess_tx { if let Some(sender) = reprocess_tx {
let processor = self.clone();
let msg = ReprocessQueueMessage::UnknownLightClientOptimisticUpdate( let msg = ReprocessQueueMessage::UnknownLightClientOptimisticUpdate(
QueuedLightClientUpdate { QueuedLightClientUpdate {
peer_id,
message_id,
light_client_optimistic_update: Box::new(
light_client_optimistic_update,
),
parent_root, parent_root,
seen_timestamp, process_fn: Box::new(move || {
processor.process_gossip_optimistic_update(
message_id,
peer_id,
light_client_optimistic_update,
None, // Do not reprocess this message again.
seen_timestamp,
)
}),
}, },
); );
@ -1769,11 +1752,11 @@ impl<T: BeaconChainTypes> Worker<T> {
/// Handle an error whilst verifying an `Attestation` or `SignedAggregateAndProof` from the /// Handle an error whilst verifying an `Attestation` or `SignedAggregateAndProof` from the
/// network. /// network.
fn handle_attestation_verification_failure( fn handle_attestation_verification_failure(
&self, self: &Arc<Self>,
peer_id: PeerId, peer_id: PeerId,
message_id: MessageId, message_id: MessageId,
failed_att: FailedAtt<T::EthSpec>, failed_att: FailedAtt<T::EthSpec>,
reprocess_tx: Option<mpsc::Sender<ReprocessQueueMessage<T>>>, reprocess_tx: Option<mpsc::Sender<ReprocessQueueMessage>>,
error: AttnError, error: AttnError,
seen_timestamp: Duration, seen_timestamp: Duration,
) { ) {
@ -2008,11 +1991,18 @@ impl<T: BeaconChainTypes> Worker<T> {
metrics::inc_counter( metrics::inc_counter(
&metrics::BEACON_PROCESSOR_AGGREGATED_ATTESTATION_REQUEUED_TOTAL, &metrics::BEACON_PROCESSOR_AGGREGATED_ATTESTATION_REQUEUED_TOTAL,
); );
let processor = self.clone();
ReprocessQueueMessage::UnknownBlockAggregate(QueuedAggregate { ReprocessQueueMessage::UnknownBlockAggregate(QueuedAggregate {
peer_id, beacon_block_root: *beacon_block_root,
message_id, process_fn: Box::new(move || {
attestation, processor.process_gossip_aggregate(
seen_timestamp, message_id,
peer_id,
attestation,
None, // Do not allow this attestation to be re-processed beyond this point.
seen_timestamp,
)
}),
}) })
} }
FailedAtt::Unaggregate { FailedAtt::Unaggregate {
@ -2024,13 +2014,20 @@ impl<T: BeaconChainTypes> Worker<T> {
metrics::inc_counter( metrics::inc_counter(
&metrics::BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_REQUEUED_TOTAL, &metrics::BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_REQUEUED_TOTAL,
); );
let processor = self.clone();
ReprocessQueueMessage::UnknownBlockUnaggregate(QueuedUnaggregate { ReprocessQueueMessage::UnknownBlockUnaggregate(QueuedUnaggregate {
peer_id, beacon_block_root: *beacon_block_root,
message_id, process_fn: Box::new(move || {
attestation, processor.process_gossip_attestation(
subnet_id, message_id,
should_import, peer_id,
seen_timestamp, attestation,
subnet_id,
should_import,
None, // Do not allow this attestation to be re-processed beyond this point.
seen_timestamp,
)
}),
}) })
} }
}; };

View File

@ -0,0 +1,680 @@
use crate::{
service::NetworkMessage,
sync::{manager::BlockProcessType, SyncMessage},
};
use beacon_chain::blob_verification::BlockWrapper;
use beacon_chain::{
builder::Witness, eth1_chain::CachingEth1Backend, test_utils::BeaconChainHarness, BeaconChain,
};
use beacon_chain::{BeaconChainTypes, NotifyExecutionLayer};
use beacon_processor::{
work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache,
GossipAggregatePackage, GossipAttestationPackage, Work, WorkEvent as BeaconWorkEvent,
MAX_SCHEDULED_WORK_QUEUE_LEN, MAX_WORK_EVENT_QUEUE_LEN,
};
use environment::null_logger;
use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest};
use lighthouse_network::{
rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage},
Client, MessageId, NetworkGlobals, PeerId, PeerRequestId,
};
use slog::{debug, Logger};
use slot_clock::ManualSlotClock;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use store::MemoryStore;
use task_executor::test_utils::TestRuntime;
use task_executor::TaskExecutor;
use tokio::sync::mpsc::{self, error::TrySendError};
use types::*;
pub use sync_methods::ChainSegmentProcessId;
use types::blob_sidecar::FixedBlobSidecarList;
pub type Error<T> = TrySendError<BeaconWorkEvent<T>>;
mod gossip_methods;
mod rpc_methods;
mod sync_methods;
mod tests;
pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1;
/// Defines if and where we will store the SSZ files of invalid blocks.
#[derive(Clone)]
pub enum InvalidBlockStorage {
Enabled(PathBuf),
Disabled,
}
/// Provides an interface to a `BeaconProcessor` running in some other thread.
/// The wider `networking` crate should use this struct to interface with the
/// beacon processor.
pub struct NetworkBeaconProcessor<T: BeaconChainTypes> {
pub beacon_processor_send: BeaconProcessorSend<T::EthSpec>,
pub duplicate_cache: DuplicateCache,
pub chain: Arc<BeaconChain<T>>,
pub network_tx: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
pub sync_tx: mpsc::UnboundedSender<SyncMessage<T::EthSpec>>,
pub reprocess_tx: mpsc::Sender<ReprocessQueueMessage>,
pub network_globals: Arc<NetworkGlobals<T::EthSpec>>,
pub invalid_block_storage: InvalidBlockStorage,
pub executor: TaskExecutor,
pub log: Logger,
}
impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
fn try_send(&self, event: BeaconWorkEvent<T::EthSpec>) -> Result<(), Error<T::EthSpec>> {
self.beacon_processor_send
.try_send(event)
.map_err(Into::into)
}
/// Create a new `Work` event for some unaggregated attestation.
pub fn send_unaggregated_attestation(
self: &Arc<Self>,
message_id: MessageId,
peer_id: PeerId,
attestation: Attestation<T::EthSpec>,
subnet_id: SubnetId,
should_import: bool,
seen_timestamp: Duration,
) -> Result<(), Error<T::EthSpec>> {
// Define a closure for processing individual attestations.
let processor = self.clone();
let process_individual = move |package: GossipAttestationPackage<T::EthSpec>| {
let reprocess_tx = processor.reprocess_tx.clone();
processor.process_gossip_attestation(
package.message_id,
package.peer_id,
package.attestation,
package.subnet_id,
package.should_import,
Some(reprocess_tx),
package.seen_timestamp,
)
};
// Define a closure for processing batches of attestations.
let processor = self.clone();
let process_batch = move |attestations| {
let reprocess_tx = processor.reprocess_tx.clone();
processor.process_gossip_attestation_batch(attestations, Some(reprocess_tx))
};
self.try_send(BeaconWorkEvent {
drop_during_sync: true,
work: Work::GossipAttestation {
attestation: GossipAttestationPackage {
message_id,
peer_id,
attestation: Box::new(attestation),
subnet_id,
should_import,
seen_timestamp,
},
process_individual: Box::new(process_individual),
process_batch: Box::new(process_batch),
},
})
}
/// Create a new `Work` event for some aggregated attestation.
pub fn send_aggregated_attestation(
self: &Arc<Self>,
message_id: MessageId,
peer_id: PeerId,
aggregate: SignedAggregateAndProof<T::EthSpec>,
seen_timestamp: Duration,
) -> Result<(), Error<T::EthSpec>> {
// Define a closure for processing individual attestations.
let processor = self.clone();
let process_individual = move |package: GossipAggregatePackage<T::EthSpec>| {
let reprocess_tx = processor.reprocess_tx.clone();
processor.process_gossip_aggregate(
package.message_id,
package.peer_id,
package.aggregate,
Some(reprocess_tx),
package.seen_timestamp,
)
};
// Define a closure for processing batches of attestations.
let processor = self.clone();
let process_batch = move |aggregates| {
let reprocess_tx = processor.reprocess_tx.clone();
processor.process_gossip_aggregate_batch(aggregates, Some(reprocess_tx))
};
let beacon_block_root = aggregate.message.aggregate.data.beacon_block_root;
self.try_send(BeaconWorkEvent {
drop_during_sync: true,
work: Work::GossipAggregate {
aggregate: GossipAggregatePackage {
message_id,
peer_id,
aggregate: Box::new(aggregate),
beacon_block_root,
seen_timestamp,
},
process_individual: Box::new(process_individual),
process_batch: Box::new(process_batch),
},
})
}
/// Create a new `Work` event for some block.
pub fn send_gossip_beacon_block(
self: &Arc<Self>,
message_id: MessageId,
peer_id: PeerId,
peer_client: Client,
block: Arc<SignedBeaconBlock<T::EthSpec>>,
seen_timestamp: Duration,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
let process_fn = async move {
let reprocess_tx = processor.reprocess_tx.clone();
let invalid_block_storage = processor.invalid_block_storage.clone();
let duplicate_cache = processor.duplicate_cache.clone();
processor
.process_gossip_block(
message_id,
peer_id,
peer_client,
block,
reprocess_tx,
duplicate_cache,
invalid_block_storage,
seen_timestamp,
)
.await
};
self.try_send(BeaconWorkEvent {
drop_during_sync: false,
work: Work::GossipBlock(Box::pin(process_fn)),
})
}
/// Create a new `Work` event for some blob sidecar.
pub fn send_gossip_blob_sidecar(
self: &Arc<Self>,
message_id: MessageId,
peer_id: PeerId,
peer_client: Client,
blob_index: u64,
blob: SignedBlobSidecar<T::EthSpec>,
seen_timestamp: Duration,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
let process_fn = async move {
processor
.process_gossip_blob(
message_id,
peer_id,
peer_client,
blob_index,
blob,
seen_timestamp,
)
.await
};
self.try_send(BeaconWorkEvent {
drop_during_sync: false,
work: Work::GossipSignedBlobSidecar(Box::pin(process_fn)),
})
}
/// Create a new `Work` event for some sync committee signature.
pub fn send_gossip_sync_signature(
self: &Arc<Self>,
message_id: MessageId,
peer_id: PeerId,
sync_signature: SyncCommitteeMessage,
subnet_id: SyncSubnetId,
seen_timestamp: Duration,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
let process_fn = move || {
processor.process_gossip_sync_committee_signature(
message_id,
peer_id,
sync_signature,
subnet_id,
seen_timestamp,
)
};
self.try_send(BeaconWorkEvent {
drop_during_sync: true,
work: Work::GossipSyncSignature(Box::new(process_fn)),
})
}
/// Create a new `Work` event for some sync committee contribution.
pub fn send_gossip_sync_contribution(
self: &Arc<Self>,
message_id: MessageId,
peer_id: PeerId,
sync_contribution: SignedContributionAndProof<T::EthSpec>,
seen_timestamp: Duration,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
let process_fn = move || {
processor.process_sync_committee_contribution(
message_id,
peer_id,
sync_contribution,
seen_timestamp,
)
};
self.try_send(BeaconWorkEvent {
drop_during_sync: true,
work: Work::GossipSyncContribution(Box::new(process_fn)),
})
}
/// Create a new `Work` event for some exit.
pub fn send_gossip_voluntary_exit(
self: &Arc<Self>,
message_id: MessageId,
peer_id: PeerId,
voluntary_exit: Box<SignedVoluntaryExit>,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
let process_fn =
move || processor.process_gossip_voluntary_exit(message_id, peer_id, *voluntary_exit);
self.try_send(BeaconWorkEvent {
drop_during_sync: false,
work: Work::GossipVoluntaryExit(Box::new(process_fn)),
})
}
/// Create a new `Work` event for some proposer slashing.
pub fn send_gossip_proposer_slashing(
self: &Arc<Self>,
message_id: MessageId,
peer_id: PeerId,
proposer_slashing: Box<ProposerSlashing>,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
let process_fn = move || {
processor.process_gossip_proposer_slashing(message_id, peer_id, *proposer_slashing)
};
self.try_send(BeaconWorkEvent {
drop_during_sync: false,
work: Work::GossipProposerSlashing(Box::new(process_fn)),
})
}
/// Create a new `Work` event for some light client finality update.
pub fn send_gossip_light_client_finality_update(
self: &Arc<Self>,
message_id: MessageId,
peer_id: PeerId,
light_client_finality_update: LightClientFinalityUpdate<T::EthSpec>,
seen_timestamp: Duration,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
let process_fn = move || {
processor.process_gossip_finality_update(
message_id,
peer_id,
light_client_finality_update,
seen_timestamp,
)
};
self.try_send(BeaconWorkEvent {
drop_during_sync: true,
work: Work::GossipLightClientFinalityUpdate(Box::new(process_fn)),
})
}
/// Create a new `Work` event for some light client optimistic update.
pub fn send_gossip_light_client_optimistic_update(
self: &Arc<Self>,
message_id: MessageId,
peer_id: PeerId,
light_client_optimistic_update: LightClientOptimisticUpdate<T::EthSpec>,
seen_timestamp: Duration,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
let process_fn = move || {
let reprocess_tx = processor.reprocess_tx.clone();
processor.process_gossip_optimistic_update(
message_id,
peer_id,
light_client_optimistic_update,
Some(reprocess_tx),
seen_timestamp,
)
};
self.try_send(BeaconWorkEvent {
drop_during_sync: true,
work: Work::GossipLightClientOptimisticUpdate(Box::new(process_fn)),
})
}
/// Create a new `Work` event for some attester slashing.
pub fn send_gossip_attester_slashing(
self: &Arc<Self>,
message_id: MessageId,
peer_id: PeerId,
attester_slashing: Box<AttesterSlashing<T::EthSpec>>,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
let process_fn = move || {
processor.process_gossip_attester_slashing(message_id, peer_id, *attester_slashing)
};
self.try_send(BeaconWorkEvent {
drop_during_sync: false,
work: Work::GossipAttesterSlashing(Box::new(process_fn)),
})
}
/// Create a new `Work` event for some BLS to execution change.
pub fn send_gossip_bls_to_execution_change(
self: &Arc<Self>,
message_id: MessageId,
peer_id: PeerId,
bls_to_execution_change: Box<SignedBlsToExecutionChange>,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
let process_fn = move || {
processor.process_gossip_bls_to_execution_change(
message_id,
peer_id,
*bls_to_execution_change,
)
};
self.try_send(BeaconWorkEvent {
drop_during_sync: false,
work: Work::GossipBlsToExecutionChange(Box::new(process_fn)),
})
}
/// Create a new `Work` event for some block, where the result from computation (if any) is
/// sent to the other side of `result_tx`.
pub fn send_rpc_beacon_block(
self: &Arc<Self>,
block_root: Hash256,
block: BlockWrapper<T::EthSpec>,
seen_timestamp: Duration,
process_type: BlockProcessType,
) -> Result<(), Error<T::EthSpec>> {
let process_fn = self.clone().generate_rpc_beacon_block_process_fn(
block_root,
block,
seen_timestamp,
process_type,
);
self.try_send(BeaconWorkEvent {
drop_during_sync: false,
work: Work::RpcBlock { process_fn },
})
}
/// Create a new `Work` event for some blobs, where the result from computation (if any) is
/// sent to the other side of `result_tx`.
pub fn send_rpc_blobs(
self: &Arc<Self>,
block_root: Hash256,
blobs: FixedBlobSidecarList<T::EthSpec>,
seen_timestamp: Duration,
process_type: BlockProcessType,
) -> Result<(), Error<T::EthSpec>> {
let process_fn = self.clone().generate_rpc_blobs_process_fn(
block_root,
blobs,
seen_timestamp,
process_type,
);
self.try_send(BeaconWorkEvent {
drop_during_sync: false,
work: Work::RpcBlobs { process_fn },
})
}
/// Create a new work event to import `blocks` as a beacon chain segment.
pub fn send_chain_segment(
self: &Arc<Self>,
process_id: ChainSegmentProcessId,
blocks: Vec<BlockWrapper<T::EthSpec>>,
) -> Result<(), Error<T::EthSpec>> {
let is_backfill = matches!(&process_id, ChainSegmentProcessId::BackSyncBatchId { .. });
let processor = self.clone();
let process_fn = async move {
let notify_execution_layer = if processor
.network_globals
.sync_state
.read()
.is_syncing_finalized()
{
NotifyExecutionLayer::No
} else {
NotifyExecutionLayer::Yes
};
processor
.process_chain_segment(process_id, blocks, notify_execution_layer)
.await;
};
let process_fn = Box::pin(process_fn);
// Back-sync batches are dispatched with a different `Work` variant so
// they can be rate-limited.
let work = if is_backfill {
Work::ChainSegmentBackfill(process_fn)
} else {
Work::ChainSegment(process_fn)
};
self.try_send(BeaconWorkEvent {
drop_during_sync: false,
work,
})
}
/// Create a new work event to process `StatusMessage`s from the RPC network.
pub fn send_status_message(
self: &Arc<Self>,
peer_id: PeerId,
message: StatusMessage,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
let process_fn = move || processor.process_status(peer_id, message);
self.try_send(BeaconWorkEvent {
drop_during_sync: false,
work: Work::Status(Box::new(process_fn)),
})
}
/// Create a new work event to process `BlocksByRangeRequest`s from the RPC network.
pub fn send_blocks_by_range_request(
self: &Arc<Self>,
peer_id: PeerId,
request_id: PeerRequestId,
request: BlocksByRangeRequest,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
let process_fn = move |send_idle_on_drop| {
let executor = processor.executor.clone();
processor.handle_blocks_by_range_request(
executor,
send_idle_on_drop,
peer_id,
request_id,
request,
)
};
self.try_send(BeaconWorkEvent {
drop_during_sync: false,
work: Work::BlocksByRangeRequest(Box::new(process_fn)),
})
}
/// Create a new work event to process `BlocksByRootRequest`s from the RPC network.
pub fn send_blocks_by_roots_request(
self: &Arc<Self>,
peer_id: PeerId,
request_id: PeerRequestId,
request: BlocksByRootRequest,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
let process_fn = move |send_idle_on_drop| {
let executor = processor.executor.clone();
processor.handle_blocks_by_root_request(
executor,
send_idle_on_drop,
peer_id,
request_id,
request,
)
};
self.try_send(BeaconWorkEvent {
drop_during_sync: false,
work: Work::BlocksByRootsRequest(Box::new(process_fn)),
})
}
/// Create a new work event to process `BlobsByRangeRequest`s from the RPC network.
pub fn send_blobs_by_range_request(
self: &Arc<Self>,
peer_id: PeerId,
request_id: PeerRequestId,
request: BlobsByRangeRequest,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
let process_fn = move |send_idle_on_drop| {
processor.handle_blobs_by_range_request(send_idle_on_drop, peer_id, request_id, request)
};
self.try_send(BeaconWorkEvent {
drop_during_sync: false,
work: Work::BlobsByRangeRequest(Box::new(process_fn)),
})
}
/// Create a new work event to process `BlobsByRootRequest`s from the RPC network.
pub fn send_blobs_by_roots_request(
self: &Arc<Self>,
peer_id: PeerId,
request_id: PeerRequestId,
request: BlobsByRootRequest,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
let process_fn = move |send_idle_on_drop| {
processor.handle_blobs_by_root_request(send_idle_on_drop, peer_id, request_id, request)
};
self.try_send(BeaconWorkEvent {
drop_during_sync: false,
work: Work::BlobsByRootsRequest(Box::new(process_fn)),
})
}
/// Create a new work event to process `LightClientBootstrap`s from the RPC network.
pub fn send_lightclient_bootstrap_request(
self: &Arc<Self>,
peer_id: PeerId,
request_id: PeerRequestId,
request: LightClientBootstrapRequest,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
let process_fn =
move || processor.handle_light_client_bootstrap(peer_id, request_id, request);
self.try_send(BeaconWorkEvent {
drop_during_sync: true,
work: Work::LightClientBootstrapRequest(Box::new(process_fn)),
})
}
/// Send a message to `sync_tx`.
///
/// Creates a log if there is an internal error.
fn send_sync_message(&self, message: SyncMessage<T::EthSpec>) {
self.sync_tx.send(message).unwrap_or_else(|e| {
debug!(self.log, "Could not send message to the sync service";
"error" => %e)
});
}
/// Send a message to `network_tx`.
///
/// Creates a log if there is an internal error.
fn send_network_message(&self, message: NetworkMessage<T::EthSpec>) {
self.network_tx.send(message).unwrap_or_else(|e| {
debug!(self.log, "Could not send message to the network service. Likely shutdown";
"error" => %e)
});
}
}
type TestBeaconChainType<E> =
Witness<ManualSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
impl<E: EthSpec> NetworkBeaconProcessor<TestBeaconChainType<E>> {
// Instantiates a mostly non-functional version of `Self` and returns the
// event receiver that would normally go to the beacon processor. This is
// useful for testing that messages are actually being sent to the beacon
// processor (but not much else).
pub fn null_for_testing(
network_globals: Arc<NetworkGlobals<E>>,
) -> (Self, mpsc::Receiver<BeaconWorkEvent<E>>) {
let (beacon_processor_send, beacon_processor_receive) =
mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN);
let (network_tx, _network_rx) = mpsc::unbounded_channel();
let (sync_tx, _sync_rx) = mpsc::unbounded_channel();
let (reprocess_tx, _reprocess_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN);
let log = null_logger().unwrap();
let harness: BeaconChainHarness<TestBeaconChainType<E>> =
BeaconChainHarness::builder(E::default())
.spec(E::default_spec())
.deterministic_keypairs(8)
.logger(log.clone())
.fresh_ephemeral_store()
.mock_execution_layer()
.build();
let runtime = TestRuntime::default();
let network_beacon_processor = Self {
beacon_processor_send: BeaconProcessorSend(beacon_processor_send),
duplicate_cache: DuplicateCache::default(),
chain: harness.chain,
network_tx,
sync_tx,
reprocess_tx,
network_globals,
invalid_block_storage: InvalidBlockStorage::Disabled,
executor: runtime.task_executor.clone(),
log,
};
(network_beacon_processor, beacon_processor_receive)
}
}
#[cfg(test)]
mod test {
#[test]
fn queued_block_delay_is_sane() {
assert!(
beacon_processor::work_reprocessing_queue::ADDITIONAL_QUEUED_BLOCK_DELAY
< beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY
);
}
}

View File

@ -1,8 +1,9 @@
use crate::beacon_processor::{worker::FUTURE_SLOT_TOLERANCE, SendOnDrop}; use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERANCE};
use crate::service::NetworkMessage; use crate::service::NetworkMessage;
use crate::status::ToStatusMessage; use crate::status::ToStatusMessage;
use crate::sync::SyncMessage; use crate::sync::SyncMessage;
use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped};
use beacon_processor::SendOnDrop;
use itertools::process_results; use itertools::process_results;
use lighthouse_network::rpc::methods::{ use lighthouse_network::rpc::methods::{
BlobsByRangeRequest, BlobsByRootRequest, MAX_REQUEST_BLOB_SIDECARS, MAX_REQUEST_BLOCKS_DENEB, BlobsByRangeRequest, BlobsByRootRequest, MAX_REQUEST_BLOB_SIDECARS, MAX_REQUEST_BLOCKS_DENEB,
@ -13,14 +14,13 @@ use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo
use slog::{debug, error, trace, warn}; use slog::{debug, error, trace, warn};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::collections::{hash_map::Entry, HashMap}; use std::collections::{hash_map::Entry, HashMap};
use std::sync::Arc;
use task_executor::TaskExecutor; use task_executor::TaskExecutor;
use tokio_stream::StreamExt; use tokio_stream::StreamExt;
use types::blob_sidecar::BlobIdentifier; use types::blob_sidecar::BlobIdentifier;
use types::{light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, Slot}; use types::{light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, Slot};
use super::Worker; impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
impl<T: BeaconChainTypes> Worker<T> {
/* Auxiliary functions */ /* Auxiliary functions */
/// Disconnects and ban's a peer, sending a Goodbye request with the associated reason. /// Disconnects and ban's a peer, sending a Goodbye request with the associated reason.
@ -132,7 +132,7 @@ impl<T: BeaconChainTypes> Worker<T> {
/// Handle a `BlocksByRoot` request from the peer. /// Handle a `BlocksByRoot` request from the peer.
pub fn handle_blocks_by_root_request( pub fn handle_blocks_by_root_request(
self, self: Arc<Self>,
executor: TaskExecutor, executor: TaskExecutor,
send_on_drop: SendOnDrop, send_on_drop: SendOnDrop,
peer_id: PeerId, peer_id: PeerId,
@ -217,7 +217,7 @@ impl<T: BeaconChainTypes> Worker<T> {
} }
/// Handle a `BlobsByRoot` request from the peer. /// Handle a `BlobsByRoot` request from the peer.
pub fn handle_blobs_by_root_request( pub fn handle_blobs_by_root_request(
self, self: Arc<Self>,
send_on_drop: SendOnDrop, send_on_drop: SendOnDrop,
peer_id: PeerId, peer_id: PeerId,
request_id: PeerRequestId, request_id: PeerRequestId,
@ -297,7 +297,7 @@ impl<T: BeaconChainTypes> Worker<T> {
/// Handle a `BlocksByRoot` request from the peer. /// Handle a `BlocksByRoot` request from the peer.
pub fn handle_light_client_bootstrap( pub fn handle_light_client_bootstrap(
self, self: &Arc<Self>,
peer_id: PeerId, peer_id: PeerId,
request_id: PeerRequestId, request_id: PeerRequestId,
request: LightClientBootstrapRequest, request: LightClientBootstrapRequest,
@ -370,7 +370,7 @@ impl<T: BeaconChainTypes> Worker<T> {
/// Handle a `BlocksByRange` request from the peer. /// Handle a `BlocksByRange` request from the peer.
pub fn handle_blocks_by_range_request( pub fn handle_blocks_by_range_request(
self, self: Arc<Self>,
executor: TaskExecutor, executor: TaskExecutor,
send_on_drop: SendOnDrop, send_on_drop: SendOnDrop,
peer_id: PeerId, peer_id: PeerId,
@ -616,7 +616,7 @@ impl<T: BeaconChainTypes> Worker<T> {
/// Handle a `BlobsByRange` request from the peer. /// Handle a `BlobsByRange` request from the peer.
pub fn handle_blobs_by_range_request( pub fn handle_blobs_by_range_request(
self, self: Arc<Self>,
send_on_drop: SendOnDrop, send_on_drop: SendOnDrop,
peer_id: PeerId, peer_id: PeerId,
request_id: PeerRequestId, request_id: PeerRequestId,

View File

@ -1,24 +1,27 @@
use std::time::Duration;
use super::{super::work_reprocessing_queue::ReprocessQueueMessage, Worker};
use crate::beacon_processor::work_reprocessing_queue::QueuedRpcBlock;
use crate::beacon_processor::worker::FUTURE_SLOT_TOLERANCE;
use crate::beacon_processor::DuplicateCache;
use crate::metrics; use crate::metrics;
use crate::sync::manager::{BlockProcessType, ResponseType, SyncMessage}; use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERANCE};
use crate::sync::{BatchProcessResult, ChainId}; use crate::sync::manager::ResponseType;
use beacon_chain::blob_verification::BlockWrapper; use crate::sync::BatchProcessResult;
use beacon_chain::blob_verification::{AsBlock, MaybeAvailableBlock}; use crate::sync::{
manager::{BlockProcessType, SyncMessage},
ChainId,
};
use beacon_chain::blob_verification::{AsBlock, BlockWrapper, MaybeAvailableBlock};
use beacon_chain::data_availability_checker::AvailabilityCheckError; use beacon_chain::data_availability_checker::AvailabilityCheckError;
use beacon_chain::AvailabilityProcessingStatus;
use beacon_chain::{ use beacon_chain::{
observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms, observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms,
BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError,
NotifyExecutionLayer, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer,
};
use beacon_processor::{
work_reprocessing_queue::{QueuedRpcBlock, ReprocessQueueMessage},
AsyncFn, BlockingFn, DuplicateCache,
}; };
use lighthouse_network::PeerAction; use lighthouse_network::PeerAction;
use slog::{debug, error, info, warn}; use slog::{debug, error, info, warn};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::sync::Arc;
use std::time::Duration;
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
use tokio::sync::mpsc; use tokio::sync::mpsc;
use types::blob_sidecar::FixedBlobSidecarList; use types::blob_sidecar::FixedBlobSidecarList;
@ -43,28 +46,72 @@ struct ChainSegmentFailed {
peer_action: Option<PeerAction>, peer_action: Option<PeerAction>,
} }
impl<T: BeaconChainTypes> Worker<T> { impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
/// Attempt to process a block received from a direct RPC request. /// Returns an async closure which processes a beacon block received via RPC.
#[allow(clippy::too_many_arguments)] ///
pub async fn process_rpc_block( /// This separate function was required to prevent a cycle during compiler
self, /// type checking.
pub fn generate_rpc_beacon_block_process_fn(
self: Arc<Self>,
block_root: Hash256, block_root: Hash256,
block: BlockWrapper<T::EthSpec>, block: BlockWrapper<T::EthSpec>,
seen_timestamp: Duration, seen_timestamp: Duration,
process_type: BlockProcessType, process_type: BlockProcessType,
reprocess_tx: mpsc::Sender<ReprocessQueueMessage<T>>, ) -> AsyncFn {
duplicate_cache: DuplicateCache, let process_fn = async move {
should_process: bool, let reprocess_tx = self.reprocess_tx.clone();
) { let duplicate_cache = self.duplicate_cache.clone();
if !should_process { self.process_rpc_block(
block_root,
block,
seen_timestamp,
process_type,
reprocess_tx,
duplicate_cache,
)
.await;
};
Box::pin(process_fn)
}
/// Returns the `process_fn` and `ignore_fn` required when requeuing an RPC block.
pub fn generate_rpc_beacon_block_fns(
self: Arc<Self>,
block_root: Hash256,
block: BlockWrapper<T::EthSpec>,
seen_timestamp: Duration,
process_type: BlockProcessType,
) -> (AsyncFn, BlockingFn) {
// An async closure which will import the block.
let process_fn = self.clone().generate_rpc_beacon_block_process_fn(
block_root,
block,
seen_timestamp,
process_type.clone(),
);
// A closure which will ignore the block.
let ignore_fn = move || {
// Sync handles these results // Sync handles these results
self.send_sync_message(SyncMessage::BlockComponentProcessed { self.send_sync_message(SyncMessage::BlockComponentProcessed {
process_type, process_type,
result: crate::sync::manager::BlockProcessingResult::Ignored, result: crate::sync::manager::BlockProcessingResult::Ignored,
response_type: crate::sync::manager::ResponseType::Block, response_type: crate::sync::manager::ResponseType::Block,
}); });
return; };
} (process_fn, Box::new(ignore_fn))
}
/// Attempt to process a block received from a direct RPC request.
#[allow(clippy::too_many_arguments)]
pub async fn process_rpc_block(
self: Arc<NetworkBeaconProcessor<T>>,
block_root: Hash256,
block: BlockWrapper<T::EthSpec>,
seen_timestamp: Duration,
process_type: BlockProcessType,
reprocess_tx: mpsc::Sender<ReprocessQueueMessage>,
duplicate_cache: DuplicateCache,
) {
// Check if the block is already being imported through another source // Check if the block is already being imported through another source
let handle = match duplicate_cache.check_and_insert(block_root) { let handle = match duplicate_cache.check_and_insert(block_root) {
Some(handle) => handle, Some(handle) => handle,
@ -75,13 +122,18 @@ impl<T: BeaconChainTypes> Worker<T> {
"action" => "sending rpc block to reprocessing queue", "action" => "sending rpc block to reprocessing queue",
"block_root" => %block_root, "block_root" => %block_root,
); );
// Send message to work reprocess queue to retry the block // Send message to work reprocess queue to retry the block
let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { let (process_fn, ignore_fn) = self.clone().generate_rpc_beacon_block_fns(
block_root, block_root,
block: block.clone(), block,
process_type,
seen_timestamp, seen_timestamp,
should_process: true, process_type,
);
let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock {
beacon_block_root: block_root,
process_fn,
ignore_fn,
}); });
if reprocess_tx.try_send(reprocess_msg).is_err() { if reprocess_tx.try_send(reprocess_msg).is_err() {
@ -103,33 +155,21 @@ impl<T: BeaconChainTypes> Worker<T> {
}); });
// Checks if a block from this proposer is already known. // Checks if a block from this proposer is already known.
let proposal_already_known = || { let block_equivocates = || {
match self match self
.chain .chain
.observed_block_producers .observed_block_producers
.read() .read()
.proposer_has_been_observed(block.message()) .proposer_has_been_observed(block.message(), block.canonical_root())
{ {
Ok(is_observed) => is_observed, Ok(seen_status) => seen_status.is_slashable(),
// Both of these blocks will be rejected, so reject them now rather //Both of these blocks will be rejected, so reject them now rather
// than re-queuing them. // than re-queuing them.
Err(ObserveError::FinalizedBlock { .. }) Err(ObserveError::FinalizedBlock { .. })
| Err(ObserveError::ValidatorIndexTooHigh { .. }) => false, | Err(ObserveError::ValidatorIndexTooHigh { .. }) => false,
} }
}; };
// Returns `true` if the block is already known to fork choice. Notably,
// this will return `false` for blocks that we've already imported but
// ancestors of the finalized checkpoint. That should not be an issue
// for our use here since finalized blocks will always be late and won't
// be requeued anyway.
let block_is_already_known = || {
self.chain
.canonical_head
.fork_choice_read_lock()
.contains_block(&block_root)
};
// If we've already seen a block from this proposer *and* the block // If we've already seen a block from this proposer *and* the block
// arrived before the attestation deadline, requeue it to ensure it is // arrived before the attestation deadline, requeue it to ensure it is
// imported late enough that it won't receive a proposer boost. // imported late enough that it won't receive a proposer boost.
@ -137,7 +177,7 @@ impl<T: BeaconChainTypes> Worker<T> {
// Don't requeue blocks if they're already known to fork choice, just // Don't requeue blocks if they're already known to fork choice, just
// push them through to block processing so they can be handled through // push them through to block processing so they can be handled through
// the normal channels. // the normal channels.
if !block_is_late && proposal_already_known() && !block_is_already_known() { if !block_is_late && block_equivocates() {
debug!( debug!(
self.log, self.log,
"Delaying processing of duplicate RPC block"; "Delaying processing of duplicate RPC block";
@ -147,12 +187,16 @@ impl<T: BeaconChainTypes> Worker<T> {
); );
// Send message to work reprocess queue to retry the block // Send message to work reprocess queue to retry the block
let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { let (process_fn, ignore_fn) = self.clone().generate_rpc_beacon_block_fns(
block_root, block_root,
block: block.clone(), block,
process_type,
seen_timestamp, seen_timestamp,
should_process: true, process_type,
);
let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock {
beacon_block_root: block_root,
process_fn,
ignore_fn,
}); });
if reprocess_tx.try_send(reprocess_msg).is_err() { if reprocess_tx.try_send(reprocess_msg).is_err() {
@ -171,7 +215,7 @@ impl<T: BeaconChainTypes> Worker<T> {
let result = self let result = self
.chain .chain
.process_block(block_root, block, NotifyExecutionLayer::Yes) .process_block(block_root, block, NotifyExecutionLayer::Yes, || Ok(()))
.await; .await;
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);
@ -213,8 +257,27 @@ impl<T: BeaconChainTypes> Worker<T> {
drop(handle); drop(handle);
} }
/// Returns an async closure which processes a list of blobs received via RPC.
///
/// This separate function was required to prevent a cycle during compiler
/// type checking.
pub fn generate_rpc_blobs_process_fn(
self: Arc<Self>,
block_root: Hash256,
block: FixedBlobSidecarList<T::EthSpec>,
seen_timestamp: Duration,
process_type: BlockProcessType,
) -> AsyncFn {
let process_fn = async move {
self.clone()
.process_rpc_blobs(block_root, block, seen_timestamp, process_type)
.await;
};
Box::pin(process_fn)
}
pub async fn process_rpc_blobs( pub async fn process_rpc_blobs(
self, self: Arc<NetworkBeaconProcessor<T>>,
block_root: Hash256, block_root: Hash256,
blobs: FixedBlobSidecarList<T::EthSpec>, blobs: FixedBlobSidecarList<T::EthSpec>,
_seen_timestamp: Duration, _seen_timestamp: Duration,
@ -243,6 +306,10 @@ impl<T: BeaconChainTypes> Worker<T> {
}); });
} }
pub fn send_delayed_lookup(&self, block_root: Hash256) {
self.send_sync_message(SyncMessage::MissingGossipBlockComponentsDelayed(block_root))
}
/// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync /// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync
/// thread if more blocks are needed to process it. /// thread if more blocks are needed to process it.
pub async fn process_chain_segment( pub async fn process_chain_segment(

View File

@ -1,21 +1,25 @@
#![cfg(test)] #![cfg(test)]
use crate::beacon_processor::work_reprocessing_queue::{ use crate::{
QUEUED_ATTESTATION_DELAY, QUEUED_RPC_BLOCK_DELAY, network_beacon_processor::{
ChainSegmentProcessId, DuplicateCache, InvalidBlockStorage, NetworkBeaconProcessor,
},
service::NetworkMessage,
sync::{manager::BlockProcessType, SyncMessage},
}; };
use crate::beacon_processor::*;
use crate::{service::NetworkMessage, sync::SyncMessage};
use beacon_chain::test_utils::{ use beacon_chain::test_utils::{
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
}; };
use beacon_chain::{BeaconChain, ChainConfig, WhenSlotSkipped, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use beacon_chain::{BeaconChain, ChainConfig, WhenSlotSkipped, MAXIMUM_GOSSIP_CLOCK_DISPARITY};
use beacon_processor::{work_reprocessing_queue::*, *};
use lighthouse_network::discovery::ConnectionId; use lighthouse_network::discovery::ConnectionId;
use lighthouse_network::rpc::methods::BlobsByRangeRequest;
use lighthouse_network::rpc::SubstreamId; use lighthouse_network::rpc::SubstreamId;
use lighthouse_network::{ use lighthouse_network::{
discv5::enr::{CombinedKey, EnrBuilder}, discv5::enr::{CombinedKey, EnrBuilder},
rpc::methods::{MetaData, MetaDataV2}, rpc::methods::{MetaData, MetaDataV2},
types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield},
MessageId, NetworkGlobals, PeerId, Response, Client, MessageId, NetworkGlobals, PeerId, Response,
}; };
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::cmp; use std::cmp;
@ -23,9 +27,11 @@ use std::iter::Iterator;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use types::blob_sidecar::FixedBlobSidecarList;
use types::{ use types::{
Attestation, AttesterSlashing, Epoch, MainnetEthSpec, ProposerSlashing, SignedBeaconBlock, Attestation, AttesterSlashing, Epoch, Hash256, MainnetEthSpec, ProposerSlashing,
SignedBlobSidecarList, SignedVoluntaryExit, Slot, SubnetId, SignedAggregateAndProof, SignedBeaconBlock, SignedBlobSidecarList, SignedVoluntaryExit, Slot,
SubnetId,
}; };
type E = MainnetEthSpec; type E = MainnetEthSpec;
@ -53,11 +59,12 @@ struct TestRig {
attester_slashing: AttesterSlashing<E>, attester_slashing: AttesterSlashing<E>,
proposer_slashing: ProposerSlashing, proposer_slashing: ProposerSlashing,
voluntary_exit: SignedVoluntaryExit, voluntary_exit: SignedVoluntaryExit,
beacon_processor_tx: mpsc::Sender<WorkEvent<T>>, beacon_processor_tx: BeaconProcessorSend<E>,
work_journal_rx: mpsc::Receiver<&'static str>, work_journal_rx: mpsc::Receiver<&'static str>,
_network_rx: mpsc::UnboundedReceiver<NetworkMessage<E>>, _network_rx: mpsc::UnboundedReceiver<NetworkMessage<E>>,
_sync_rx: mpsc::UnboundedReceiver<SyncMessage<E>>, _sync_rx: mpsc::UnboundedReceiver<SyncMessage<E>>,
duplicate_cache: DuplicateCache, duplicate_cache: DuplicateCache,
network_beacon_processor: Arc<NetworkBeaconProcessor<T>>,
_harness: BeaconChainHarness<T>, _harness: BeaconChainHarness<T>,
} }
@ -66,7 +73,7 @@ struct TestRig {
impl Drop for TestRig { impl Drop for TestRig {
fn drop(&mut self) { fn drop(&mut self) {
// Causes the beacon processor to shutdown. // Causes the beacon processor to shutdown.
self.beacon_processor_tx = mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN).0; self.beacon_processor_tx = BeaconProcessorSend(mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN).0);
} }
} }
@ -172,6 +179,7 @@ impl TestRig {
let log = harness.logger().clone(); let log = harness.logger().clone();
let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN);
let beacon_processor_tx = BeaconProcessorSend(beacon_processor_tx);
let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); let (sync_tx, _sync_rx) = mpsc::unbounded_channel();
// Default metadata // Default metadata
@ -194,22 +202,40 @@ impl TestRig {
let executor = harness.runtime.task_executor.clone(); let executor = harness.runtime.task_executor.clone();
let (work_reprocessing_tx, work_reprocessing_rx) =
mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN);
let (work_journal_tx, work_journal_rx) = mpsc::channel(16_364); let (work_journal_tx, work_journal_rx) = mpsc::channel(16_364);
let duplicate_cache = DuplicateCache::default(); let duplicate_cache = DuplicateCache::default();
BeaconProcessor { let network_beacon_processor = NetworkBeaconProcessor {
beacon_chain: Arc::downgrade(&chain), beacon_processor_send: beacon_processor_tx.clone(),
duplicate_cache: duplicate_cache.clone(),
chain: harness.chain.clone(),
network_tx, network_tx,
sync_tx, sync_tx,
reprocess_tx: work_reprocessing_tx.clone(),
network_globals: network_globals.clone(),
invalid_block_storage: InvalidBlockStorage::Disabled,
executor: executor.clone(),
log: log.clone(),
};
let network_beacon_processor = Arc::new(network_beacon_processor);
BeaconProcessor {
network_globals, network_globals,
executor, executor,
max_workers: cmp::max(1, num_cpus::get()), max_workers: cmp::max(1, num_cpus::get()),
current_workers: 0, current_workers: 0,
importing_blocks: duplicate_cache.clone(), enable_backfill_rate_limiting: harness.chain.config.enable_backfill_rate_limiting,
invalid_block_storage: InvalidBlockStorage::Disabled,
log: log.clone(), log: log.clone(),
} }
.spawn_manager(beacon_processor_rx, Some(work_journal_tx)); .spawn_manager(
beacon_processor_rx,
work_reprocessing_tx,
work_reprocessing_rx,
Some(work_journal_tx),
harness.chain.slot_clock.clone(),
);
Self { Self {
chain, chain,
@ -226,6 +252,7 @@ impl TestRig {
_network_rx, _network_rx,
_sync_rx, _sync_rx,
duplicate_cache, duplicate_cache,
network_beacon_processor,
_harness: harness, _harness: harness,
} }
} }
@ -239,55 +266,56 @@ impl TestRig {
} }
pub fn enqueue_gossip_block(&self) { pub fn enqueue_gossip_block(&self) {
self.beacon_processor_tx self.network_beacon_processor
.try_send(WorkEvent::gossip_beacon_block( .send_gossip_beacon_block(
junk_message_id(), junk_message_id(),
junk_peer_id(), junk_peer_id(),
Client::default(), Client::default(),
self.next_block.clone(), self.next_block.clone(),
Duration::from_secs(0), Duration::from_secs(0),
)) )
.unwrap(); .unwrap();
} }
pub fn enqueue_gossip_blob(&self, blob_index: usize) { pub fn enqueue_gossip_blob(&self, blob_index: usize) {
if let Some(blobs) = self.next_blobs.as_ref() { if let Some(blobs) = self.next_blobs.as_ref() {
let blob = blobs.get(blob_index).unwrap(); let blob = blobs.get(blob_index).unwrap();
self.beacon_processor_tx self.network_beacon_processor
.try_send(WorkEvent::gossip_signed_blob_sidecar( .send_gossip_blob_sidecar(
junk_message_id(), junk_message_id(),
junk_peer_id(), junk_peer_id(),
Client::default(), Client::default(),
blob_index as u64, blob.message.index,
blob.clone(), blob.clone(),
Duration::from_secs(0), Duration::from_secs(0),
)) )
.unwrap(); .unwrap();
} }
} }
pub fn enqueue_rpc_block(&self) { pub fn enqueue_rpc_block(&self) {
let event = WorkEvent::rpc_beacon_block( self.network_beacon_processor
self.next_block.canonical_root(), .send_rpc_beacon_block(
self.next_block.clone().into(), self.next_block.canonical_root(),
std::time::Duration::default(), self.next_block.clone().into(),
BlockProcessType::ParentLookup { std::time::Duration::default(),
chain_hash: Hash256::random(), BlockProcessType::ParentLookup {
}, chain_hash: Hash256::random(),
); },
self.beacon_processor_tx.try_send(event).unwrap(); )
.unwrap();
} }
pub fn enqueue_single_lookup_rpc_block(&self) { pub fn enqueue_single_lookup_rpc_block(&self) {
let event = WorkEvent::rpc_beacon_block( self.network_beacon_processor
self.next_block.canonical_root(), .send_rpc_beacon_block(
self.next_block.clone().into(), self.next_block.canonical_root(),
std::time::Duration::default(), self.next_block.clone().into(),
BlockProcessType::SingleBlock { id: 1 }, std::time::Duration::default(),
); BlockProcessType::SingleBlock { id: 1 },
self.beacon_processor_tx.try_send(event).unwrap(); )
.unwrap();
} }
pub fn enqueue_single_lookup_rpc_blobs(&self) { pub fn enqueue_single_lookup_rpc_blobs(&self) {
if let Some(blobs) = self.next_blobs.clone() { if let Some(blobs) = self.next_blobs.clone() {
let blobs = FixedBlobSidecarList::from( let blobs = FixedBlobSidecarList::from(
@ -296,91 +324,94 @@ impl TestRig {
.map(|b| Some(b.message)) .map(|b| Some(b.message))
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
); );
let event = WorkEvent::rpc_blobs( self.network_beacon_processor
self.next_block.canonical_root(), .send_rpc_blobs(
blobs, self.next_block.canonical_root(),
std::time::Duration::default(), blobs,
BlockProcessType::SingleBlock { id: 1 }, std::time::Duration::default(),
); BlockProcessType::SingleBlock { id: 1 },
self.beacon_processor_tx.try_send(event).unwrap(); )
.unwrap();
} }
} }
pub fn enqueue_blobs_by_range_request(&self, count: u64) { pub fn enqueue_blobs_by_range_request(&self, count: u64) {
let event = WorkEvent::blobs_by_range_request( self.network_beacon_processor
PeerId::random(), .send_blobs_by_range_request(
(ConnectionId::new(42), SubstreamId::new(24)), PeerId::random(),
BlobsByRangeRequest { (ConnectionId::new(42), SubstreamId::new(24)),
start_slot: 0, BlobsByRangeRequest {
count, start_slot: 0,
}, count,
); },
self.beacon_processor_tx.try_send(event).unwrap(); )
.unwrap();
} }
pub fn enqueue_backfill_batch(&self) { pub fn enqueue_backfill_batch(&self) {
let event = WorkEvent::chain_segment( self.network_beacon_processor
ChainSegmentProcessId::BackSyncBatchId(Epoch::default()), .send_chain_segment(
Vec::default(), ChainSegmentProcessId::BackSyncBatchId(Epoch::default()),
); Vec::default(),
self.beacon_processor_tx.try_send(event).unwrap(); )
.unwrap();
} }
pub fn enqueue_unaggregated_attestation(&self) { pub fn enqueue_unaggregated_attestation(&self) {
let (attestation, subnet_id) = self.attestations.first().unwrap().clone(); let (attestation, subnet_id) = self.attestations.first().unwrap().clone();
self.beacon_processor_tx self.network_beacon_processor
.try_send(WorkEvent::unaggregated_attestation( .send_unaggregated_attestation(
junk_message_id(), junk_message_id(),
junk_peer_id(), junk_peer_id(),
attestation, attestation,
subnet_id, subnet_id,
true, true,
Duration::from_secs(0), Duration::from_secs(0),
)) )
.unwrap(); .unwrap();
} }
pub fn enqueue_gossip_attester_slashing(&self) { pub fn enqueue_gossip_attester_slashing(&self) {
self.beacon_processor_tx self.network_beacon_processor
.try_send(WorkEvent::gossip_attester_slashing( .send_gossip_attester_slashing(
junk_message_id(), junk_message_id(),
junk_peer_id(), junk_peer_id(),
Box::new(self.attester_slashing.clone()), Box::new(self.attester_slashing.clone()),
)) )
.unwrap(); .unwrap();
} }
pub fn enqueue_gossip_proposer_slashing(&self) { pub fn enqueue_gossip_proposer_slashing(&self) {
self.beacon_processor_tx self.network_beacon_processor
.try_send(WorkEvent::gossip_proposer_slashing( .send_gossip_proposer_slashing(
junk_message_id(), junk_message_id(),
junk_peer_id(), junk_peer_id(),
Box::new(self.proposer_slashing.clone()), Box::new(self.proposer_slashing.clone()),
)) )
.unwrap(); .unwrap();
} }
pub fn enqueue_gossip_voluntary_exit(&self) { pub fn enqueue_gossip_voluntary_exit(&self) {
self.beacon_processor_tx self.network_beacon_processor
.try_send(WorkEvent::gossip_voluntary_exit( .send_gossip_voluntary_exit(
junk_message_id(), junk_message_id(),
junk_peer_id(), junk_peer_id(),
Box::new(self.voluntary_exit.clone()), Box::new(self.voluntary_exit.clone()),
)) )
.unwrap(); .unwrap();
} }
pub fn enqueue_next_block_unaggregated_attestation(&self) { pub fn enqueue_next_block_unaggregated_attestation(&self) {
let (attestation, subnet_id) = self.next_block_attestations.first().unwrap().clone(); let (attestation, subnet_id) = self.next_block_attestations.first().unwrap().clone();
self.beacon_processor_tx self.network_beacon_processor
.try_send(WorkEvent::unaggregated_attestation( .send_unaggregated_attestation(
junk_message_id(), junk_message_id(),
junk_peer_id(), junk_peer_id(),
attestation, attestation,
subnet_id, subnet_id,
true, true,
Duration::from_secs(0), Duration::from_secs(0),
)) )
.unwrap(); .unwrap();
} }
@ -390,13 +421,13 @@ impl TestRig {
.first() .first()
.unwrap() .unwrap()
.clone(); .clone();
self.beacon_processor_tx self.network_beacon_processor
.try_send(WorkEvent::aggregated_attestation( .send_aggregated_attestation(
junk_message_id(), junk_message_id(),
junk_peer_id(), junk_peer_id(),
aggregate, aggregate,
Duration::from_secs(0), Duration::from_secs(0),
)) )
.unwrap(); .unwrap();
} }
@ -709,7 +740,7 @@ async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod
events.push(RPC_BLOCK); events.push(RPC_BLOCK);
if num_blobs > 0 { if num_blobs > 0 {
rig.enqueue_single_lookup_rpc_blobs(); rig.enqueue_single_lookup_rpc_blobs();
events.push(RPC_BLOB); events.push(RPC_BLOBS);
} }
} }
}; };
@ -792,7 +823,7 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod
events.push(RPC_BLOCK); events.push(RPC_BLOCK);
if num_blobs > 0 { if num_blobs > 0 {
rig.enqueue_single_lookup_rpc_blobs(); rig.enqueue_single_lookup_rpc_blobs();
events.push(RPC_BLOB); events.push(RPC_BLOBS);
} }
} }
}; };
@ -972,7 +1003,7 @@ async fn test_rpc_block_reprocessing() {
rig.enqueue_single_lookup_rpc_blobs(); rig.enqueue_single_lookup_rpc_blobs();
if rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0) > 0 { if rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0) > 0 {
rig.assert_event_journal(&[RPC_BLOB, WORKER_FREED, NOTHING_TO_DO]) rig.assert_event_journal(&[RPC_BLOBS, WORKER_FREED, NOTHING_TO_DO])
.await; .await;
} }

View File

@ -5,21 +5,24 @@
//! syncing-related responses to the Sync manager. //! syncing-related responses to the Sync manager.
#![allow(clippy::unit_arg)] #![allow(clippy::unit_arg)]
use crate::beacon_processor::{
BeaconProcessor, InvalidBlockStorage, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN,
};
use crate::error; use crate::error;
use crate::network_beacon_processor::{InvalidBlockStorage, NetworkBeaconProcessor};
use crate::service::{NetworkMessage, RequestId}; use crate::service::{NetworkMessage, RequestId};
use crate::status::status_message; use crate::status::status_message;
use crate::sync::manager::RequestId as SyncId; use crate::sync::manager::RequestId as SyncId;
use crate::sync::SyncMessage; use crate::sync::SyncMessage;
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use futures::{future, StreamExt}; use beacon_processor::{
work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache,
use lighthouse_network::{rpc::*, PubsubMessage}; };
use lighthouse_network::{MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response}; use futures::prelude::*;
use slog::{debug, error, o, trace, warn}; use lighthouse_network::rpc::*;
use std::cmp; use lighthouse_network::{
MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response,
};
use logging::TimeLatch;
use slog::{debug, o, trace};
use slog::{error, warn};
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH}; use std::time::{Duration, SystemTime, UNIX_EPOCH};
use tokio::sync::mpsc; use tokio::sync::mpsc;
@ -37,9 +40,11 @@ pub struct Router<T: BeaconChainTypes> {
/// A network context to return and handle RPC requests. /// A network context to return and handle RPC requests.
network: HandlerNetworkContext<T::EthSpec>, network: HandlerNetworkContext<T::EthSpec>,
/// A multi-threaded, non-blocking processor for applying messages to the beacon chain. /// A multi-threaded, non-blocking processor for applying messages to the beacon chain.
beacon_processor_send: mpsc::Sender<BeaconWorkEvent<T>>, network_beacon_processor: Arc<NetworkBeaconProcessor<T>>,
/// The `Router` logger. /// The `Router` logger.
log: slog::Logger, log: slog::Logger,
/// Provides de-bounce functionality for logging.
logger_debounce: TimeLatch,
} }
/// Types of messages the router can receive. /// Types of messages the router can receive.
@ -75,12 +80,15 @@ pub enum RouterMessage<T: EthSpec> {
impl<T: BeaconChainTypes> Router<T> { impl<T: BeaconChainTypes> Router<T> {
/// Initializes and runs the Router. /// Initializes and runs the Router.
#[allow(clippy::too_many_arguments)]
pub fn spawn( pub fn spawn(
beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>, network_globals: Arc<NetworkGlobals<T::EthSpec>>,
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>, network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
executor: task_executor::TaskExecutor, executor: task_executor::TaskExecutor,
invalid_block_storage: InvalidBlockStorage, invalid_block_storage: InvalidBlockStorage,
beacon_processor_send: BeaconProcessorSend<T::EthSpec>,
beacon_processor_reprocess_tx: mpsc::Sender<ReprocessQueueMessage>,
log: slog::Logger, log: slog::Logger,
) -> error::Result<mpsc::UnboundedSender<RouterMessage<T::EthSpec>>> { ) -> error::Result<mpsc::UnboundedSender<RouterMessage<T::EthSpec>>> {
let message_handler_log = log.new(o!("service"=> "router")); let message_handler_log = log.new(o!("service"=> "router"));
@ -88,34 +96,33 @@ impl<T: BeaconChainTypes> Router<T> {
let (handler_send, handler_recv) = mpsc::unbounded_channel(); let (handler_send, handler_recv) = mpsc::unbounded_channel();
let (beacon_processor_send, beacon_processor_receive) =
mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN);
let sync_logger = log.new(o!("service"=> "sync")); let sync_logger = log.new(o!("service"=> "sync"));
// generate the message channel
let (sync_send, sync_recv) = mpsc::unbounded_channel::<SyncMessage<T::EthSpec>>();
// spawn the sync thread let network_beacon_processor = NetworkBeaconProcessor {
let sync_send = crate::sync::manager::spawn( beacon_processor_send,
executor.clone(), duplicate_cache: DuplicateCache::default(),
beacon_chain.clone(), chain: beacon_chain.clone(),
network_globals.clone(),
network_send.clone(),
beacon_processor_send.clone(),
sync_logger,
);
BeaconProcessor {
beacon_chain: Arc::downgrade(&beacon_chain),
network_tx: network_send.clone(), network_tx: network_send.clone(),
sync_tx: sync_send.clone(), sync_tx: sync_send.clone(),
reprocess_tx: beacon_processor_reprocess_tx,
network_globals: network_globals.clone(), network_globals: network_globals.clone(),
executor: executor.clone(),
max_workers: cmp::max(1, num_cpus::get()),
current_workers: 0,
importing_blocks: Default::default(),
invalid_block_storage, invalid_block_storage,
executor: executor.clone(),
log: log.clone(), log: log.clone(),
} };
.spawn_manager(beacon_processor_receive, None); let network_beacon_processor = Arc::new(network_beacon_processor);
// spawn the sync thread
crate::sync::manager::spawn(
executor.clone(),
beacon_chain.clone(),
network_send.clone(),
network_beacon_processor.clone(),
sync_recv,
sync_logger,
);
// generate the Message handler // generate the Message handler
let mut handler = Router { let mut handler = Router {
@ -123,8 +130,9 @@ impl<T: BeaconChainTypes> Router<T> {
chain: beacon_chain, chain: beacon_chain,
sync_send, sync_send,
network: HandlerNetworkContext::new(network_send, log.clone()), network: HandlerNetworkContext::new(network_send, log.clone()),
beacon_processor_send, network_beacon_processor,
log: message_handler_log, log: message_handler_log,
logger_debounce: TimeLatch::default(),
}; };
// spawn handler task and move the message handler instance into the spawned thread // spawn handler task and move the message handler instance into the spawned thread
@ -192,20 +200,25 @@ impl<T: BeaconChainTypes> Router<T> {
Request::Status(status_message) => { Request::Status(status_message) => {
self.on_status_request(peer_id, request_id, status_message) self.on_status_request(peer_id, request_id, status_message)
} }
Request::BlocksByRange(request) => self.send_beacon_processor_work( Request::BlocksByRange(request) => self.handle_beacon_processor_send_result(
BeaconWorkEvent::blocks_by_range_request(peer_id, request_id, request), self.network_beacon_processor
.send_blocks_by_range_request(peer_id, request_id, request),
), ),
Request::BlocksByRoot(request) => self.send_beacon_processor_work( Request::BlocksByRoot(request) => self.handle_beacon_processor_send_result(
BeaconWorkEvent::blocks_by_roots_request(peer_id, request_id, request), self.network_beacon_processor
.send_blocks_by_roots_request(peer_id, request_id, request),
), ),
Request::BlobsByRange(request) => self.send_beacon_processor_work( Request::BlobsByRange(request) => self.handle_beacon_processor_send_result(
BeaconWorkEvent::blobs_by_range_request(peer_id, request_id, request), self.network_beacon_processor
.send_blobs_by_range_request(peer_id, request_id, request),
), ),
Request::BlobsByRoot(request) => self.send_beacon_processor_work( Request::BlobsByRoot(request) => self.handle_beacon_processor_send_result(
BeaconWorkEvent::blobs_by_root_request(peer_id, request_id, request), self.network_beacon_processor
.send_blobs_by_roots_request(peer_id, request_id, request),
), ),
Request::LightClientBootstrap(request) => self.send_beacon_processor_work( Request::LightClientBootstrap(request) => self.handle_beacon_processor_send_result(
BeaconWorkEvent::lightclient_bootstrap_request(peer_id, request_id, request), self.network_beacon_processor
.send_lightclient_bootstrap_request(peer_id, request_id, request),
), ),
} }
} }
@ -220,10 +233,10 @@ impl<T: BeaconChainTypes> Router<T> {
match response { match response {
Response::Status(status_message) => { Response::Status(status_message) => {
debug!(self.log, "Received Status Response"; "peer_id" => %peer_id, &status_message); debug!(self.log, "Received Status Response"; "peer_id" => %peer_id, &status_message);
self.send_beacon_processor_work(BeaconWorkEvent::status_message( self.handle_beacon_processor_send_result(
peer_id, self.network_beacon_processor
status_message, .send_status_message(peer_id, status_message),
)) )
} }
Response::BlocksByRange(beacon_block) => { Response::BlocksByRange(beacon_block) => {
self.on_blocks_by_range_response(peer_id, request_id, beacon_block); self.on_blocks_by_range_response(peer_id, request_id, beacon_block);
@ -254,48 +267,53 @@ impl<T: BeaconChainTypes> Router<T> {
) { ) {
match gossip_message { match gossip_message {
PubsubMessage::AggregateAndProofAttestation(aggregate_and_proof) => self PubsubMessage::AggregateAndProofAttestation(aggregate_and_proof) => self
.send_beacon_processor_work(BeaconWorkEvent::aggregated_attestation( .handle_beacon_processor_send_result(
message_id, self.network_beacon_processor.send_aggregated_attestation(
peer_id, message_id,
*aggregate_and_proof, peer_id,
timestamp_now(), *aggregate_and_proof,
)), timestamp_now(),
PubsubMessage::Attestation(subnet_attestation) => { ),
self.send_beacon_processor_work(BeaconWorkEvent::unaggregated_attestation( ),
message_id, PubsubMessage::Attestation(subnet_attestation) => self
peer_id, .handle_beacon_processor_send_result(
subnet_attestation.1, self.network_beacon_processor.send_unaggregated_attestation(
subnet_attestation.0, message_id,
should_process, peer_id,
timestamp_now(), subnet_attestation.1,
)) subnet_attestation.0,
} should_process,
PubsubMessage::BeaconBlock(block) => { timestamp_now(),
self.send_beacon_processor_work(BeaconWorkEvent::gossip_beacon_block( ),
),
PubsubMessage::BeaconBlock(block) => self.handle_beacon_processor_send_result(
self.network_beacon_processor.send_gossip_beacon_block(
message_id, message_id,
peer_id, peer_id,
self.network_globals.client(&peer_id), self.network_globals.client(&peer_id),
block, block,
timestamp_now(), timestamp_now(),
)) ),
} ),
PubsubMessage::BlobSidecar(data) => { PubsubMessage::BlobSidecar(data) => {
let (blob_index, signed_blob) = *data; let (blob_index, signed_blob) = *data;
let peer_client = self.network_globals.client(&peer_id); self.handle_beacon_processor_send_result(
self.send_beacon_processor_work(BeaconWorkEvent::gossip_signed_blob_sidecar( self.network_beacon_processor.send_gossip_blob_sidecar(
message_id, message_id,
peer_id, peer_id,
peer_client, self.network_globals.client(&peer_id),
blob_index, blob_index,
signed_blob, signed_blob,
timestamp_now(), timestamp_now(),
)) ),
)
} }
PubsubMessage::VoluntaryExit(exit) => { PubsubMessage::VoluntaryExit(exit) => {
debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id); debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id);
self.send_beacon_processor_work(BeaconWorkEvent::gossip_voluntary_exit( self.handle_beacon_processor_send_result(
message_id, peer_id, exit, self.network_beacon_processor
)) .send_gossip_voluntary_exit(message_id, peer_id, exit),
)
} }
PubsubMessage::ProposerSlashing(proposer_slashing) => { PubsubMessage::ProposerSlashing(proposer_slashing) => {
debug!( debug!(
@ -303,11 +321,13 @@ impl<T: BeaconChainTypes> Router<T> {
"Received a proposer slashing"; "Received a proposer slashing";
"peer_id" => %peer_id "peer_id" => %peer_id
); );
self.send_beacon_processor_work(BeaconWorkEvent::gossip_proposer_slashing( self.handle_beacon_processor_send_result(
message_id, self.network_beacon_processor.send_gossip_proposer_slashing(
peer_id, message_id,
proposer_slashing, peer_id,
)) proposer_slashing,
),
)
} }
PubsubMessage::AttesterSlashing(attester_slashing) => { PubsubMessage::AttesterSlashing(attester_slashing) => {
debug!( debug!(
@ -315,11 +335,13 @@ impl<T: BeaconChainTypes> Router<T> {
"Received a attester slashing"; "Received a attester slashing";
"peer_id" => %peer_id "peer_id" => %peer_id
); );
self.send_beacon_processor_work(BeaconWorkEvent::gossip_attester_slashing( self.handle_beacon_processor_send_result(
message_id, self.network_beacon_processor.send_gossip_attester_slashing(
peer_id, message_id,
attester_slashing, peer_id,
)) attester_slashing,
),
)
} }
PubsubMessage::SignedContributionAndProof(contribution_and_proof) => { PubsubMessage::SignedContributionAndProof(contribution_and_proof) => {
trace!( trace!(
@ -327,12 +349,14 @@ impl<T: BeaconChainTypes> Router<T> {
"Received sync committee aggregate"; "Received sync committee aggregate";
"peer_id" => %peer_id "peer_id" => %peer_id
); );
self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_contribution( self.handle_beacon_processor_send_result(
message_id, self.network_beacon_processor.send_gossip_sync_contribution(
peer_id, message_id,
*contribution_and_proof, peer_id,
timestamp_now(), *contribution_and_proof,
)) timestamp_now(),
),
)
} }
PubsubMessage::SyncCommitteeMessage(sync_committtee_msg) => { PubsubMessage::SyncCommitteeMessage(sync_committtee_msg) => {
trace!( trace!(
@ -340,13 +364,15 @@ impl<T: BeaconChainTypes> Router<T> {
"Received sync committee signature"; "Received sync committee signature";
"peer_id" => %peer_id "peer_id" => %peer_id
); );
self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_signature( self.handle_beacon_processor_send_result(
message_id, self.network_beacon_processor.send_gossip_sync_signature(
peer_id, message_id,
sync_committtee_msg.1, peer_id,
sync_committtee_msg.0, sync_committtee_msg.1,
timestamp_now(), sync_committtee_msg.0,
)) timestamp_now(),
),
)
} }
PubsubMessage::LightClientFinalityUpdate(light_client_finality_update) => { PubsubMessage::LightClientFinalityUpdate(light_client_finality_update) => {
trace!( trace!(
@ -354,13 +380,14 @@ impl<T: BeaconChainTypes> Router<T> {
"Received light client finality update"; "Received light client finality update";
"peer_id" => %peer_id "peer_id" => %peer_id
); );
self.send_beacon_processor_work( self.handle_beacon_processor_send_result(
BeaconWorkEvent::gossip_light_client_finality_update( self.network_beacon_processor
message_id, .send_gossip_light_client_finality_update(
peer_id, message_id,
light_client_finality_update, peer_id,
timestamp_now(), *light_client_finality_update,
), timestamp_now(),
),
) )
} }
PubsubMessage::LightClientOptimisticUpdate(light_client_optimistic_update) => { PubsubMessage::LightClientOptimisticUpdate(light_client_optimistic_update) => {
@ -369,21 +396,25 @@ impl<T: BeaconChainTypes> Router<T> {
"Received light client optimistic update"; "Received light client optimistic update";
"peer_id" => %peer_id "peer_id" => %peer_id
); );
self.send_beacon_processor_work( self.handle_beacon_processor_send_result(
BeaconWorkEvent::gossip_light_client_optimistic_update( self.network_beacon_processor
message_id, .send_gossip_light_client_optimistic_update(
peer_id, message_id,
light_client_optimistic_update, peer_id,
timestamp_now(), *light_client_optimistic_update,
), timestamp_now(),
),
) )
} }
PubsubMessage::BlsToExecutionChange(bls_to_execution_change) => self PubsubMessage::BlsToExecutionChange(bls_to_execution_change) => self
.send_beacon_processor_work(BeaconWorkEvent::gossip_bls_to_execution_change( .handle_beacon_processor_send_result(
message_id, self.network_beacon_processor
peer_id, .send_gossip_bls_to_execution_change(
bls_to_execution_change, message_id,
)), peer_id,
bls_to_execution_change,
),
),
} }
} }
@ -435,7 +466,10 @@ impl<T: BeaconChainTypes> Router<T> {
request_id, request_id,
); );
self.send_beacon_processor_work(BeaconWorkEvent::status_message(peer_id, status)) self.handle_beacon_processor_send_result(
self.network_beacon_processor
.send_status_message(peer_id, status),
)
} }
/// Handle a `BlocksByRange` response from the peer. /// Handle a `BlocksByRange` response from the peer.
@ -566,17 +600,22 @@ impl<T: BeaconChainTypes> Router<T> {
}); });
} }
fn send_beacon_processor_work(&mut self, work: BeaconWorkEvent<T>) { fn handle_beacon_processor_send_result(
self.beacon_processor_send &mut self,
.try_send(work) result: Result<(), crate::network_beacon_processor::Error<T::EthSpec>>,
.unwrap_or_else(|e| { ) {
let work_type = match &e { if let Err(e) = result {
mpsc::error::TrySendError::Closed(work) let work_type = match &e {
| mpsc::error::TrySendError::Full(work) => work.work_type(), mpsc::error::TrySendError::Closed(work) | mpsc::error::TrySendError::Full(work) => {
}; work.work_type()
}
};
if self.logger_debounce.elapsed() {
error!(&self.log, "Unable to send message to the beacon processor"; error!(&self.log, "Unable to send message to the beacon processor";
"error" => %e, "type" => work_type) "error" => %e, "type" => work_type)
}) }
}
} }
} }

View File

@ -1,5 +1,5 @@
use super::sync::manager::RequestId as SyncId; use super::sync::manager::RequestId as SyncId;
use crate::beacon_processor::InvalidBlockStorage; use crate::network_beacon_processor::InvalidBlockStorage;
use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::persisted_dht::{clear_dht, load_dht, persist_dht};
use crate::router::{Router, RouterMessage}; use crate::router::{Router, RouterMessage};
use crate::subnet_service::SyncCommitteeService; use crate::subnet_service::SyncCommitteeService;
@ -9,6 +9,7 @@ use crate::{
NetworkConfig, NetworkConfig,
}; };
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use beacon_processor::{work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend};
use futures::channel::mpsc::Sender; use futures::channel::mpsc::Sender;
use futures::future::OptionFuture; use futures::future::OptionFuture;
use futures::prelude::*; use futures::prelude::*;
@ -224,6 +225,8 @@ impl<T: BeaconChainTypes> NetworkService<T> {
config: &NetworkConfig, config: &NetworkConfig,
executor: task_executor::TaskExecutor, executor: task_executor::TaskExecutor,
gossipsub_registry: Option<&'_ mut Registry>, gossipsub_registry: Option<&'_ mut Registry>,
beacon_processor_send: BeaconProcessorSend<T::EthSpec>,
beacon_processor_reprocess_tx: mpsc::Sender<ReprocessQueueMessage>,
) -> error::Result<(Arc<NetworkGlobals<T::EthSpec>>, NetworkSenders<T::EthSpec>)> { ) -> error::Result<(Arc<NetworkGlobals<T::EthSpec>>, NetworkSenders<T::EthSpec>)> {
let network_log = executor.log().clone(); let network_log = executor.log().clone();
// build the channels for external comms // build the channels for external comms
@ -311,6 +314,8 @@ impl<T: BeaconChainTypes> NetworkService<T> {
network_senders.network_send(), network_senders.network_send(),
executor.clone(), executor.clone(),
invalid_block_storage, invalid_block_storage,
beacon_processor_send,
beacon_processor_reprocess_tx,
network_log.clone(), network_log.clone(),
)?; )?;

View File

@ -4,12 +4,15 @@ mod tests {
use crate::persisted_dht::load_dht; use crate::persisted_dht::load_dht;
use crate::{NetworkConfig, NetworkService}; use crate::{NetworkConfig, NetworkService};
use beacon_chain::test_utils::EphemeralHarnessType; use beacon_chain::test_utils::EphemeralHarnessType;
use beacon_processor::{
BeaconProcessorSend, MAX_SCHEDULED_WORK_QUEUE_LEN, MAX_WORK_EVENT_QUEUE_LEN,
};
use lighthouse_network::Enr; use lighthouse_network::Enr;
use slog::{o, Drain, Level, Logger}; use slog::{o, Drain, Level, Logger};
use sloggers::{null::NullLoggerBuilder, Build}; use sloggers::{null::NullLoggerBuilder, Build};
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use tokio::runtime::Runtime; use tokio::{runtime::Runtime, sync::mpsc};
use types::MinimalEthSpec as E; use types::MinimalEthSpec as E;
type BeaconChainHarness = beacon_chain::test_utils::BeaconChainHarness<EphemeralHarnessType<E>>; type BeaconChainHarness = beacon_chain::test_utils::BeaconChainHarness<EphemeralHarnessType<E>>;
@ -69,10 +72,20 @@ mod tests {
// Create a new network service which implicitly gets dropped at the // Create a new network service which implicitly gets dropped at the
// end of the block. // end of the block.
let _network_service = let (beacon_processor_send, _beacon_processor_receive) =
NetworkService::start(beacon_chain.clone(), &config, executor, None) mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN);
.await let (beacon_processor_reprocess_tx, _beacon_processor_reprocess_rx) =
.unwrap(); mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN);
let _network_service = NetworkService::start(
beacon_chain.clone(),
&config,
executor,
None,
BeaconProcessorSend(beacon_processor_send),
beacon_processor_reprocess_tx,
)
.await
.unwrap();
drop(signal); drop(signal);
}); });

View File

@ -8,7 +8,7 @@
//! If a batch fails, the backfill sync cannot progress. In this scenario, we mark the backfill //! If a batch fails, the backfill sync cannot progress. In this scenario, we mark the backfill
//! sync as failed, log an error and attempt to retry once a new peer joins the node. //! sync as failed, log an error and attempt to retry once a new peer joins the node.
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::network_beacon_processor::ChainSegmentProcessId;
use crate::sync::manager::{BatchProcessResult, Id}; use crate::sync::manager::{BatchProcessResult, Id};
use crate::sync::network_context::SyncNetworkContext; use crate::sync::network_context::SyncNetworkContext;
use crate::sync::range_sync::{ use crate::sync::range_sync::{
@ -537,8 +537,10 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
let process_id = ChainSegmentProcessId::BackSyncBatchId(batch_id); let process_id = ChainSegmentProcessId::BackSyncBatchId(batch_id);
self.current_processing_batch = Some(batch_id); self.current_processing_batch = Some(batch_id);
let work_event = BeaconWorkEvent::chain_segment(process_id, blocks); if let Err(e) = network
if let Err(e) = network.processor_channel().try_send(work_event) { .beacon_processor()
.send_chain_segment(process_id, blocks)
{
crit!(self.log, "Failed to send backfill segment to processor."; "msg" => "process_batch", crit!(self.log, "Failed to send backfill segment to processor."; "msg" => "process_batch",
"error" => %e, "batch" => self.processing_target); "error" => %e, "batch" => self.processing_target);
// This is unlikely to happen but it would stall syncing since the batch now has no // This is unlikely to happen but it would stall syncing since the batch now has no
@ -1097,12 +1099,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
match self.batches.entry(batch_id) { match self.batches.entry(batch_id) {
Entry::Occupied(_) => { Entry::Occupied(_) => {
// this batch doesn't need downloading, let this same function decide the next batch // this batch doesn't need downloading, let this same function decide the next batch
if batch_id if self.would_complete(batch_id) {
== self
.beacon_chain
.genesis_backfill_slot
.epoch(T::EthSpec::slots_per_epoch())
{
self.last_batch_downloaded = true; self.last_batch_downloaded = true;
} }
@ -1118,12 +1115,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
BACKFILL_EPOCHS_PER_BATCH, BACKFILL_EPOCHS_PER_BATCH,
batch_type, batch_type,
)); ));
if batch_id if self.would_complete(batch_id) {
== self
.beacon_chain
.genesis_backfill_slot
.epoch(T::EthSpec::slots_per_epoch())
{
self.last_batch_downloaded = true; self.last_batch_downloaded = true;
} }
self.to_be_downloaded = self self.to_be_downloaded = self
@ -1155,14 +1147,8 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
/// Checks with the beacon chain if backfill sync has completed. /// Checks with the beacon chain if backfill sync has completed.
fn check_completed(&mut self) -> bool { fn check_completed(&mut self) -> bool {
if self.current_start if self.would_complete(self.current_start) {
== self
.beacon_chain
.genesis_backfill_slot
.epoch(T::EthSpec::slots_per_epoch())
{
// Check that the beacon chain agrees // Check that the beacon chain agrees
if let Some(anchor_info) = self.beacon_chain.store.get_anchor_info() { if let Some(anchor_info) = self.beacon_chain.store.get_anchor_info() {
// Conditions that we have completed a backfill sync // Conditions that we have completed a backfill sync
if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) { if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) {
@ -1175,6 +1161,15 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
false false
} }
/// Checks if backfill would complete by syncing to `start_epoch`.
fn would_complete(&self, start_epoch: Epoch) -> bool {
start_epoch
<= self
.beacon_chain
.genesis_backfill_slot
.epoch(T::EthSpec::slots_per_epoch())
}
/// Updates the global network state indicating the current state of a backfill sync. /// Updates the global network state indicating the current state of a backfill sync.
fn set_state(&self, state: BackFillState) { fn set_state(&self, state: BackFillState) {
*self.network_globals.backfill_state.write() = state; *self.network_globals.backfill_state.write() = state;

View File

@ -1,6 +1,6 @@
use crate::sync::SyncMessage; use crate::network_beacon_processor::NetworkBeaconProcessor;
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use slog::{crit, warn}; use slog::crit;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::mpsc; use tokio::sync::mpsc;
@ -35,7 +35,7 @@ pub fn spawn_delayed_lookup_service<T: BeaconChainTypes>(
executor: &task_executor::TaskExecutor, executor: &task_executor::TaskExecutor,
beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
mut delayed_lookups_recv: mpsc::Receiver<DelayedLookupMessage>, mut delayed_lookups_recv: mpsc::Receiver<DelayedLookupMessage>,
sync_send: mpsc::UnboundedSender<SyncMessage<T::EthSpec>>, beacon_processor: Arc<NetworkBeaconProcessor<T>>,
log: slog::Logger, log: slog::Logger,
) { ) {
executor.spawn( executor.spawn(
@ -52,8 +52,8 @@ pub fn spawn_delayed_lookup_service<T: BeaconChainTypes>(
} else { } else {
delay - seconds_from_current_slot_start delay - seconds_from_current_slot_start
}; };
tokio::time::Instant::now() + duration_until_start Instant::now() + duration_until_start
} }
_ => { _ => {
crit!(log, crit!(log,
"Failed to read slot clock, delayed lookup service timing will be inaccurate.\ "Failed to read slot clock, delayed lookup service timing will be inaccurate.\
@ -69,11 +69,8 @@ pub fn spawn_delayed_lookup_service<T: BeaconChainTypes>(
while let Ok(msg) = delayed_lookups_recv.try_recv() { while let Ok(msg) = delayed_lookups_recv.try_recv() {
match msg { match msg {
DelayedLookupMessage::MissingComponents(block_root) => { DelayedLookupMessage::MissingComponents(block_root) => {
if let Err(e) = sync_send beacon_processor
.send(SyncMessage::MissingGossipBlockComponentsDelayed(block_root)) .send_delayed_lookup(block_root)
{
warn!(log, "Failed to send delayed lookup message"; "error" => ?e);
}
} }
} }
} }

View File

@ -1,20 +1,3 @@
use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
use beacon_chain::data_availability_checker::{AvailabilityCheckError, DataAvailabilityChecker};
use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError};
use lighthouse_network::rpc::RPCError;
use lighthouse_network::{PeerAction, PeerId};
use lru_cache::LRUTimeCache;
use slog::{debug, error, trace, warn, Logger};
use smallvec::SmallVec;
use std::collections::HashMap;
use std::fmt::Debug;
use std::sync::Arc;
use std::time::Duration;
use store::Hash256;
use strum::Display;
use types::blob_sidecar::FixedBlobSidecarList;
use types::{BlobSidecar, SignedBeaconBlock, Slot};
use self::parent_lookup::PARENT_FAIL_TOLERANCE; use self::parent_lookup::PARENT_FAIL_TOLERANCE;
use self::parent_lookup::{ParentLookup, ParentVerifyError}; use self::parent_lookup::{ParentLookup, ParentVerifyError};
use self::single_block_lookup::{LookupVerifyError, SingleBlockLookup}; use self::single_block_lookup::{LookupVerifyError, SingleBlockLookup};
@ -24,10 +7,26 @@ use super::{
manager::{BlockProcessType, Id}, manager::{BlockProcessType, Id},
network_context::SyncNetworkContext, network_context::SyncNetworkContext,
}; };
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent};
use crate::metrics; use crate::metrics;
use crate::network_beacon_processor::ChainSegmentProcessId;
use crate::sync::block_lookups::single_block_lookup::LookupId; use crate::sync::block_lookups::single_block_lookup::LookupId;
use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
use beacon_chain::data_availability_checker::{AvailabilityCheckError, DataAvailabilityChecker};
use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError};
use lighthouse_network::rpc::RPCError;
use lighthouse_network::{PeerAction, PeerId};
use lru_cache::LRUTimeCache;
pub use single_block_lookup::UnknownParentComponents; pub use single_block_lookup::UnknownParentComponents;
use slog::{debug, error, trace, warn, Logger};
use smallvec::SmallVec;
use std::collections::HashMap;
use std::fmt::Debug;
use std::sync::Arc;
use std::time::Duration;
use store::{Hash256, SignedBeaconBlock};
use strum::Display;
use types::blob_sidecar::FixedBlobSidecarList;
use types::{BlobSidecar, Slot};
pub(crate) mod delayed_lookup; pub(crate) mod delayed_lookup;
mod parent_lookup; mod parent_lookup;
@ -380,13 +379,13 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
}; };
if !has_pending_parent_request { if !has_pending_parent_request {
let block_wrapper = request_ref let rpc_block = request_ref
.get_downloaded_block() .get_downloaded_block()
.unwrap_or(BlockWrapper::Block(block)); .unwrap_or(BlockWrapper::Block(block));
// This is the correct block, send it for processing // This is the correct block, send it for processing
match self.send_block_for_processing( match self.send_block_for_processing(
block_root, block_root,
block_wrapper, rpc_block,
seen_timestamp, seen_timestamp,
BlockProcessType::SingleBlock { id }, BlockProcessType::SingleBlock { id },
cx, cx,
@ -562,14 +561,13 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
match parent_lookup.verify_block(block, &mut self.failed_chains) { match parent_lookup.verify_block(block, &mut self.failed_chains) {
Ok(Some((block_root, block))) => { Ok(Some((block_root, block))) => {
parent_lookup.add_current_request_block(block); parent_lookup.add_current_request_block(block);
if let Some(block_wrapper) = if let Some(rpc_block) = parent_lookup.current_parent_request.get_downloaded_block()
parent_lookup.current_parent_request.get_downloaded_block()
{ {
let chain_hash = parent_lookup.chain_hash(); let chain_hash = parent_lookup.chain_hash();
if self if self
.send_block_for_processing( .send_block_for_processing(
block_root, block_root,
block_wrapper, rpc_block,
seen_timestamp, seen_timestamp,
BlockProcessType::ParentLookup { chain_hash }, BlockProcessType::ParentLookup { chain_hash },
cx, cx,
@ -643,13 +641,12 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
Ok(Some((block_root, blobs))) => { Ok(Some((block_root, blobs))) => {
parent_lookup.add_current_request_blobs(blobs); parent_lookup.add_current_request_blobs(blobs);
let chain_hash = parent_lookup.chain_hash(); let chain_hash = parent_lookup.chain_hash();
if let Some(block_wrapper) = if let Some(rpc_block) = parent_lookup.current_parent_request.get_downloaded_block()
parent_lookup.current_parent_request.get_downloaded_block()
{ {
if self if self
.send_block_for_processing( .send_block_for_processing(
block_root, block_root,
block_wrapper, rpc_block,
seen_timestamp, seen_timestamp,
BlockProcessType::ParentLookup { chain_hash }, BlockProcessType::ParentLookup { chain_hash },
cx, cx,
@ -914,10 +911,10 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
let slot = block.slot(); let slot = block.slot();
let parent_root = block.parent_root(); let parent_root = block.parent_root();
let (block, blobs) = block.deconstruct(); let (block, blobs) = block.deconstruct();
request_ref.add_unknown_parent_block(block); request_ref.add_unknown_parent_components(UnknownParentComponents::new(
if let Some(blobs) = blobs { Some(block),
request_ref.add_unknown_parent_blobs(blobs); blobs,
} ));
self.search_parent(slot, root, parent_root, peer_id.to_peer_id(), cx); self.search_parent(slot, root, parent_root, peer_id.to_peer_id(), cx);
ShouldRemoveLookup::False ShouldRemoveLookup::False
} }
@ -1054,8 +1051,8 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_)) BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_))
| BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { | BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => {
// Check if the beacon processor is available // Check if the beacon processor is available
let beacon_processor_send = match cx.processor_channel_if_enabled() { let beacon_processor = match cx.beacon_processor_if_enabled() {
Some(channel) => channel, Some(beacon_processor) => beacon_processor,
None => { None => {
return trace!( return trace!(
self.log, self.log,
@ -1076,9 +1073,8 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
blocks.push(child_block); blocks.push(child_block);
}; };
let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); let process_id = ChainSegmentProcessId::ParentLookup(chain_hash);
let work = WorkEvent::chain_segment(process_id, blocks);
match beacon_processor_send.try_send(work) { match beacon_processor.send_chain_segment(process_id, blocks) {
Ok(_) => { Ok(_) => {
self.processing_parent_lookups self.processing_parent_lookups
.insert(chain_hash, (hashes, block_request)); .insert(chain_hash, (hashes, block_request));
@ -1170,7 +1166,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
.enumerate() .enumerate()
.find(|(_, req)| req.block_request_state.requested_block_root == chain_hash) .find(|(_, req)| req.block_request_state.requested_block_root == chain_hash)
{ {
if let Some((lookup_id, block_wrapper)) = if let Some((lookup_id, rpc_block)) =
self.single_block_lookups.get_mut(index).and_then(|lookup| { self.single_block_lookups.get_mut(index).and_then(|lookup| {
lookup lookup
.get_downloaded_block() .get_downloaded_block()
@ -1190,7 +1186,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
if self if self
.send_block_for_processing( .send_block_for_processing(
chain_hash, chain_hash,
block_wrapper, rpc_block,
Duration::from_secs(0), //TODO(sean) pipe this through Duration::from_secs(0), //TODO(sean) pipe this through
BlockProcessType::SingleBlock { id }, BlockProcessType::SingleBlock { id },
cx, cx,
@ -1235,11 +1231,15 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
process_type: BlockProcessType, process_type: BlockProcessType,
cx: &mut SyncNetworkContext<T>, cx: &mut SyncNetworkContext<T>,
) -> Result<(), ()> { ) -> Result<(), ()> {
match cx.processor_channel_if_enabled() { match cx.beacon_processor_if_enabled() {
Some(beacon_processor_send) => { Some(beacon_processor) => {
trace!(self.log, "Sending block for processing"; "block" => ?block_root, "process" => ?process_type); trace!(self.log, "Sending block for processing"; "block" => ?block_root, "process" => ?process_type);
let event = WorkEvent::rpc_beacon_block(block_root, block, duration, process_type); if let Err(e) = beacon_processor.send_rpc_beacon_block(
if let Err(e) = beacon_processor_send.try_send(event) { block_root,
block,
duration,
process_type,
) {
error!( error!(
self.log, self.log,
"Failed to send sync block to processor"; "Failed to send sync block to processor";
@ -1269,11 +1269,12 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
if blob_count == 0 { if blob_count == 0 {
return Ok(()); return Ok(());
} }
match cx.processor_channel_if_enabled() { match cx.beacon_processor_if_enabled() {
Some(beacon_processor_send) => { Some(beacon_processor) => {
trace!(self.log, "Sending blobs for processing"; "block" => ?block_root, "process_type" => ?process_type); trace!(self.log, "Sending blobs for processing"; "block" => ?block_root, "process_type" => ?process_type);
let event = WorkEvent::rpc_blobs(block_root, blobs, duration, process_type); if let Err(e) =
if let Err(e) = beacon_processor_send.try_send(event) { beacon_processor.send_rpc_blobs(block_root, blobs, duration, process_type)
{
error!( error!(
self.log, self.log,
"Failed to send sync blobs to processor"; "Failed to send sync blobs to processor";

View File

@ -1,22 +1,20 @@
use std::sync::Arc; use crate::network_beacon_processor::NetworkBeaconProcessor;
use crate::service::RequestId; use crate::service::RequestId;
use crate::sync::manager::RequestId as SyncId; use crate::sync::manager::RequestId as SyncId;
use crate::NetworkMessage; use crate::NetworkMessage;
use std::sync::Arc;
use super::*; use super::*;
use beacon_chain::{ use beacon_chain::builder::Witness;
builder::Witness, use beacon_chain::eth1_chain::CachingEth1Backend;
eth1_chain::CachingEth1Backend, use beacon_chain::test_utils::{build_log, BeaconChainHarness, EphemeralHarnessType};
test_utils::{build_log, BeaconChainHarness, EphemeralHarnessType}, use beacon_processor::WorkEvent;
};
use execution_layer::BlobsBundleV1; use execution_layer::BlobsBundleV1;
pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH};
use lighthouse_network::rpc::RPCResponseErrorCode; use lighthouse_network::rpc::RPCResponseErrorCode;
use lighthouse_network::{NetworkGlobals, Request}; use lighthouse_network::{NetworkGlobals, Request};
use slot_clock::{SlotClock, TestingSlotClock}; use slot_clock::{ManualSlotClock, SlotClock, TestingSlotClock};
use std::time::Duration;
use store::MemoryStore; use store::MemoryStore;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use types::{ use types::{
@ -25,10 +23,10 @@ use types::{
BeaconBlock, EthSpec, ForkName, FullPayloadDeneb, MinimalEthSpec as E, SignedBeaconBlock, BeaconBlock, EthSpec, ForkName, FullPayloadDeneb, MinimalEthSpec as E, SignedBeaconBlock,
}; };
type T = Witness<TestingSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>; type T = Witness<ManualSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
struct TestRig { struct TestRig {
beacon_processor_rx: mpsc::Receiver<WorkEvent<T>>, beacon_processor_rx: mpsc::Receiver<WorkEvent<E>>,
network_rx: mpsc::UnboundedReceiver<NetworkMessage<E>>, network_rx: mpsc::UnboundedReceiver<NetworkMessage<E>>,
rng: XorShiftRng, rng: XorShiftRng,
harness: BeaconChainHarness<T>, harness: BeaconChainHarness<T>,
@ -46,7 +44,7 @@ impl TestRig {
let log = build_log(slog::Level::Debug, enable_log); let log = build_log(slog::Level::Debug, enable_log);
// Initialise a new beacon chain // Initialise a new beacon chain
let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E::default()) let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E)
.default_spec() .default_spec()
.logger(log.clone()) .logger(log.clone())
.deterministic_keypairs(1) .deterministic_keypairs(1)
@ -60,8 +58,10 @@ impl TestRig {
let chain = harness.chain.clone(); let chain = harness.chain.clone();
let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(100);
let (network_tx, network_rx) = mpsc::unbounded_channel(); let (network_tx, network_rx) = mpsc::unbounded_channel();
let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log));
let (network_beacon_processor, beacon_processor_rx) =
NetworkBeaconProcessor::null_for_testing(globals);
let rng = XorShiftRng::from_seed([42; 16]); let rng = XorShiftRng::from_seed([42; 16]);
let rig = TestRig { let rig = TestRig {
beacon_processor_rx, beacon_processor_rx,
@ -75,11 +75,9 @@ impl TestRig {
log.new(slog::o!("component" => "block_lookups")), log.new(slog::o!("component" => "block_lookups")),
); );
let cx = { let cx = {
let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log));
SyncNetworkContext::new( SyncNetworkContext::new(
network_tx, network_tx,
globals, Arc::new(network_beacon_processor),
beacon_processor_tx,
chain, chain,
log.new(slog::o!("component" => "network_context")), log.new(slog::o!("component" => "network_context")),
) )
@ -103,7 +101,7 @@ impl TestRig {
let mut blob_sidecars = vec![]; let mut blob_sidecars = vec![];
if let Ok(message) = block.message_deneb_mut() { if let Ok(message) = block.message_deneb_mut() {
// get random number between 0 and Max Blobs // get random number between 0 and Max Blobs
let mut payload: &mut FullPayloadDeneb<E> = &mut message.body.execution_payload; let payload: &mut FullPayloadDeneb<E> = &mut message.body.execution_payload;
let num_blobs = match num_blobs { let num_blobs = match num_blobs {
NumBlobs::Random => { NumBlobs::Random => {
let mut num_blobs = rand::random::<usize>() % E::max_blobs_per_block(); let mut num_blobs = rand::random::<usize>() % E::max_blobs_per_block();
@ -209,13 +207,13 @@ impl TestRig {
match response_type { match response_type {
ResponseType::Block => match self.beacon_processor_rx.try_recv() { ResponseType::Block => match self.beacon_processor_rx.try_recv() {
Ok(work) => { Ok(work) => {
assert_eq!(work.work_type(), crate::beacon_processor::RPC_BLOCK); assert_eq!(work.work_type(), beacon_processor::RPC_BLOCK);
} }
other => panic!("Expected block process, found {:?}", other), other => panic!("Expected block process, found {:?}", other),
}, },
ResponseType::Blob => match self.beacon_processor_rx.try_recv() { ResponseType::Blob => match self.beacon_processor_rx.try_recv() {
Ok(work) => { Ok(work) => {
assert_eq!(work.work_type(), crate::beacon_processor::RPC_BLOB); assert_eq!(work.work_type(), beacon_processor::RPC_BLOBS);
} }
other => panic!("Expected blob process, found {:?}", other), other => panic!("Expected blob process, found {:?}", other),
}, },
@ -226,7 +224,7 @@ impl TestRig {
fn expect_parent_chain_process(&mut self) { fn expect_parent_chain_process(&mut self) {
match self.beacon_processor_rx.try_recv() { match self.beacon_processor_rx.try_recv() {
Ok(work) => { Ok(work) => {
assert_eq!(work.work_type(), crate::beacon_processor::CHAIN_SEGMENT); assert_eq!(work.work_type(), beacon_processor::CHAIN_SEGMENT);
} }
other => panic!("Expected chain segment process, found {:?}", other), other => panic!("Expected chain segment process, found {:?}", other),
} }

View File

@ -38,7 +38,7 @@ use super::block_lookups::{BlockLookups, PeerShouldHave};
use super::network_context::{BlockOrBlob, SyncNetworkContext}; use super::network_context::{BlockOrBlob, SyncNetworkContext};
use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::peer_sync_info::{remote_sync_type, PeerSyncType};
use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH};
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor};
use crate::service::NetworkMessage; use crate::service::NetworkMessage;
use crate::status::ToStatusMessage; use crate::status::ToStatusMessage;
use crate::sync::block_lookups::delayed_lookup; use crate::sync::block_lookups::delayed_lookup;
@ -207,9 +207,6 @@ pub struct SyncManager<T: BeaconChainTypes> {
/// A reference to the underlying beacon chain. /// A reference to the underlying beacon chain.
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
/// A reference to the network globals and peer-db.
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
/// A receiving channel sent by the message processor thread. /// A receiving channel sent by the message processor thread.
input_channel: mpsc::UnboundedReceiver<SyncMessage<T::EthSpec>>, input_channel: mpsc::UnboundedReceiver<SyncMessage<T::EthSpec>>,
@ -236,29 +233,26 @@ pub struct SyncManager<T: BeaconChainTypes> {
pub fn spawn<T: BeaconChainTypes>( pub fn spawn<T: BeaconChainTypes>(
executor: task_executor::TaskExecutor, executor: task_executor::TaskExecutor,
beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>, network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
beacon_processor_send: mpsc::Sender<BeaconWorkEvent<T>>, beacon_processor: Arc<NetworkBeaconProcessor<T>>,
sync_recv: mpsc::UnboundedReceiver<SyncMessage<T::EthSpec>>,
log: slog::Logger, log: slog::Logger,
) -> mpsc::UnboundedSender<SyncMessage<T::EthSpec>> { ) {
assert!( assert!(
MAX_REQUEST_BLOCKS >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, MAX_REQUEST_BLOCKS >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH,
"Max blocks that can be requested in a single batch greater than max allowed blocks in a single request" "Max blocks that can be requested in a single batch greater than max allowed blocks in a single request"
); );
// generate the message channel
let (sync_send, sync_recv) = mpsc::unbounded_channel::<SyncMessage<T::EthSpec>>();
let (delayed_lookups_send, delayed_lookups_recv) = let (delayed_lookups_send, delayed_lookups_recv) =
mpsc::channel::<DelayedLookupMessage>(DELAY_QUEUE_CHANNEL_SIZE); mpsc::channel::<DelayedLookupMessage>(DELAY_QUEUE_CHANNEL_SIZE);
// create an instance of the SyncManager // create an instance of the SyncManager
let network_globals = beacon_processor.network_globals.clone();
let mut sync_manager = SyncManager { let mut sync_manager = SyncManager {
chain: beacon_chain.clone(), chain: beacon_chain.clone(),
network_globals: network_globals.clone(),
input_channel: sync_recv, input_channel: sync_recv,
network: SyncNetworkContext::new( network: SyncNetworkContext::new(
network_send, network_send,
network_globals.clone(), beacon_processor.clone(),
beacon_processor_send,
beacon_chain.clone(), beacon_chain.clone(),
log.clone(), log.clone(),
), ),
@ -273,22 +267,24 @@ pub fn spawn<T: BeaconChainTypes>(
}; };
let log_clone = log.clone(); let log_clone = log.clone();
let sync_send_clone = sync_send.clone();
delayed_lookup::spawn_delayed_lookup_service( delayed_lookup::spawn_delayed_lookup_service(
&executor, &executor,
beacon_chain, beacon_chain,
delayed_lookups_recv, delayed_lookups_recv,
sync_send, beacon_processor,
log, log,
); );
// spawn the sync manager thread // spawn the sync manager thread
debug!(log_clone, "Sync Manager started"); debug!(log_clone, "Sync Manager started");
executor.spawn(async move { Box::pin(sync_manager.main()).await }, "sync"); executor.spawn(async move { Box::pin(sync_manager.main()).await }, "sync");
sync_send_clone
} }
impl<T: BeaconChainTypes> SyncManager<T> { impl<T: BeaconChainTypes> SyncManager<T> {
fn network_globals(&self) -> &NetworkGlobals<T::EthSpec> {
self.network.network_globals()
}
/* Input Handling Functions */ /* Input Handling Functions */
/// A peer has connected which has blocks that are unknown to us. /// A peer has connected which has blocks that are unknown to us.
@ -429,12 +425,12 @@ impl<T: BeaconChainTypes> SyncManager<T> {
let rpr = new_state.as_str(); let rpr = new_state.as_str();
// Drop the write lock // Drop the write lock
let update_sync_status = self let update_sync_status = self
.network_globals .network_globals()
.peers .peers
.write() .write()
.update_sync_status(peer_id, new_state.clone()); .update_sync_status(peer_id, new_state.clone());
if let Some(was_updated) = update_sync_status { if let Some(was_updated) = update_sync_status {
let is_connected = self.network_globals.peers.read().is_connected(peer_id); let is_connected = self.network_globals().peers.read().is_connected(peer_id);
if was_updated { if was_updated {
debug!( debug!(
self.log, self.log,
@ -490,7 +486,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
let head = self.chain.best_slot(); let head = self.chain.best_slot();
let current_slot = self.chain.slot().unwrap_or_else(|_| Slot::new(0)); let current_slot = self.chain.slot().unwrap_or_else(|_| Slot::new(0));
let peers = self.network_globals.peers.read(); let peers = self.network_globals().peers.read();
if current_slot >= head if current_slot >= head
&& current_slot.sub(head) <= (SLOT_IMPORT_TOLERANCE as u64) && current_slot.sub(head) <= (SLOT_IMPORT_TOLERANCE as u64)
&& head > 0 && head > 0
@ -552,8 +548,8 @@ impl<T: BeaconChainTypes> SyncManager<T> {
}, },
}; };
let old_state = self.network_globals.set_sync_state(new_state); let old_state = self.network_globals().set_sync_state(new_state);
let new_state = self.network_globals.sync_state.read(); let new_state = self.network_globals().sync_state.read().clone();
if !new_state.eq(&old_state) { if !new_state.eq(&old_state) {
info!(self.log, "Sync state updated"; "old_state" => %old_state, "new_state" => %new_state); info!(self.log, "Sync state updated"; "old_state" => %old_state, "new_state" => %new_state);
// If we have become synced - Subscribe to all the core subnet topics // If we have become synced - Subscribe to all the core subnet topics
@ -802,7 +798,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
} }
fn should_search_for_block(&mut self, block_slot: Slot, peer_id: &PeerId) -> bool { fn should_search_for_block(&mut self, block_slot: Slot, peer_id: &PeerId) -> bool {
if !self.network_globals.sync_state.read().is_synced() { if !self.network_globals().sync_state.read().is_synced() {
let head_slot = self.chain.canonical_head.cached_head().head_slot(); let head_slot = self.chain.canonical_head.cached_head().head_slot();
// if the block is far in the future, ignore it. If its within the slot tolerance of // if the block is far in the future, ignore it. If its within the slot tolerance of
@ -816,13 +812,13 @@ impl<T: BeaconChainTypes> SyncManager<T> {
} }
} }
self.network_globals.peers.read().is_connected(peer_id) self.network_globals().peers.read().is_connected(peer_id)
&& self.network.is_execution_engine_online() && self.network.is_execution_engine_online()
} }
fn synced_and_connected(&mut self, peer_id: &PeerId) -> bool { fn synced_and_connected(&mut self, peer_id: &PeerId) -> bool {
self.network_globals.sync_state.read().is_synced() self.network_globals().sync_state.read().is_synced()
&& self.network_globals.peers.read().is_connected(peer_id) && self.network_globals().peers.read().is_connected(peer_id)
&& self.network.is_execution_engine_online() && self.network.is_execution_engine_online()
} }

View File

@ -4,7 +4,7 @@
use super::block_sidecar_coupling::BlocksAndBlobsRequestInfo; use super::block_sidecar_coupling::BlocksAndBlobsRequestInfo;
use super::manager::{Id, RequestId as SyncRequestId}; use super::manager::{Id, RequestId as SyncRequestId};
use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; use super::range_sync::{BatchId, ByRangeRequestType, ChainId};
use crate::beacon_processor::WorkEvent; use crate::network_beacon_processor::NetworkBeaconProcessor;
use crate::service::{NetworkMessage, RequestId}; use crate::service::{NetworkMessage, RequestId};
use crate::status::ToStatusMessage; use crate::status::ToStatusMessage;
use crate::sync::block_lookups::{BlobRequestId, BlockRequestId}; use crate::sync::block_lookups::{BlobRequestId, BlockRequestId};
@ -36,9 +36,6 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
/// The network channel to relay messages to the Network service. /// The network channel to relay messages to the Network service.
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>, network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
/// Access to the network global vars.
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
/// A sequential ID for all RPC requests. /// A sequential ID for all RPC requests.
request_id: Id, request_id: Id,
@ -59,8 +56,8 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
/// `beacon_processor_send`. /// `beacon_processor_send`.
execution_engine_state: EngineState, execution_engine_state: EngineState,
/// Channel to send work to the beacon processor. /// Sends work to the beacon processor via a channel.
beacon_processor_send: mpsc::Sender<WorkEvent<T>>, network_beacon_processor: Arc<NetworkBeaconProcessor<T>>,
pub chain: Arc<BeaconChain<T>>, pub chain: Arc<BeaconChain<T>>,
@ -89,29 +86,31 @@ impl<T: EthSpec> From<Option<Arc<BlobSidecar<T>>>> for BlockOrBlob<T> {
impl<T: BeaconChainTypes> SyncNetworkContext<T> { impl<T: BeaconChainTypes> SyncNetworkContext<T> {
pub fn new( pub fn new(
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>, network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>, network_beacon_processor: Arc<NetworkBeaconProcessor<T>>,
beacon_processor_send: mpsc::Sender<WorkEvent<T>>,
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
log: slog::Logger, log: slog::Logger,
) -> Self { ) -> Self {
SyncNetworkContext { SyncNetworkContext {
network_send, network_send,
network_globals,
request_id: 1,
range_requests: Default::default(),
backfill_requests: Default::default(),
range_blocks_and_blobs_requests: Default::default(),
backfill_blocks_and_blobs_requests: Default::default(),
execution_engine_state: EngineState::Online, // always assume `Online` at the start execution_engine_state: EngineState::Online, // always assume `Online` at the start
beacon_processor_send, request_id: 1,
range_requests: FnvHashMap::default(),
backfill_requests: FnvHashMap::default(),
range_blocks_and_blobs_requests: FnvHashMap::default(),
backfill_blocks_and_blobs_requests: FnvHashMap::default(),
network_beacon_processor,
chain, chain,
log, log,
} }
} }
pub fn network_globals(&self) -> &NetworkGlobals<T::EthSpec> {
&self.network_beacon_processor.network_globals
}
/// Returns the Client type of the peer if known /// Returns the Client type of the peer if known
pub fn client_type(&self, peer_id: &PeerId) -> Client { pub fn client_type(&self, peer_id: &PeerId) -> Client {
self.network_globals self.network_globals()
.peers .peers
.read() .read()
.peer_info(peer_id) .peer_info(peer_id)
@ -564,13 +563,13 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
}) })
} }
pub fn processor_channel_if_enabled(&self) -> Option<&mpsc::Sender<WorkEvent<T>>> { pub fn beacon_processor_if_enabled(&self) -> Option<&Arc<NetworkBeaconProcessor<T>>> {
self.is_execution_engine_online() self.is_execution_engine_online()
.then_some(&self.beacon_processor_send) .then_some(&self.network_beacon_processor)
} }
pub fn processor_channel(&self) -> &mpsc::Sender<WorkEvent<T>> { pub fn beacon_processor(&self) -> &Arc<NetworkBeaconProcessor<T>> {
&self.beacon_processor_send &self.network_beacon_processor
} }
fn next_id(&mut self) -> Id { fn next_id(&mut self) -> Id {

View File

@ -1,5 +1,5 @@
use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; use super::batch::{BatchInfo, BatchProcessingResult, BatchState};
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::network_beacon_processor::ChainSegmentProcessId;
use crate::sync::{ use crate::sync::{
manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult, manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult,
}; };
@ -294,8 +294,8 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
return Ok(KeepChain); return Ok(KeepChain);
} }
let beacon_processor_send = match network.processor_channel_if_enabled() { let beacon_processor = match network.beacon_processor_if_enabled() {
Some(channel) => channel, Some(beacon_processor) => beacon_processor,
None => return Ok(KeepChain), None => return Ok(KeepChain),
}; };
@ -317,9 +317,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id); let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id);
self.current_processing_batch = Some(batch_id); self.current_processing_batch = Some(batch_id);
let work_event = BeaconWorkEvent::chain_segment(process_id, blocks); if let Err(e) = beacon_processor.send_chain_segment(process_id, blocks) {
if let Err(e) = beacon_processor_send.try_send(work_event) {
crit!(self.log, "Failed to send chain segment to processor."; "msg" => "process_batch", crit!(self.log, "Failed to send chain segment to processor."; "msg" => "process_batch",
"error" => %e, "batch" => self.processing_target); "error" => %e, "batch" => self.processing_target);
// This is unlikely to happen but it would stall syncing since the batch now has no // This is unlikely to happen but it would stall syncing since the batch now has no

View File

@ -379,27 +379,27 @@ where
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use crate::network_beacon_processor::NetworkBeaconProcessor;
use crate::beacon_processor::WorkEvent as BeaconWorkEvent;
use crate::service::RequestId; use crate::service::RequestId;
use crate::NetworkMessage; use crate::NetworkMessage;
use beacon_chain::{
builder::Witness, use super::*;
eth1_chain::CachingEth1Backend, use beacon_chain::builder::Witness;
parking_lot::RwLock, use beacon_chain::eth1_chain::CachingEth1Backend;
test_utils::{build_log, BeaconChainHarness, EphemeralHarnessType}, use beacon_chain::parking_lot::RwLock;
EngineState, use beacon_chain::EngineState;
}; use beacon_processor::WorkEvent as BeaconWorkEvent;
use lighthouse_network::{ use lighthouse_network::rpc::BlocksByRangeRequest;
rpc::{BlocksByRangeRequest, StatusMessage}, use lighthouse_network::Request;
NetworkGlobals, Request, use lighthouse_network::{rpc::StatusMessage, NetworkGlobals};
}; use slog::{o, Drain};
use slog::o;
use slot_clock::TestingSlotClock;
use std::{collections::HashSet, sync::Arc};
use store::MemoryStore;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType};
use slot_clock::TestingSlotClock;
use std::collections::HashSet;
use std::sync::Arc;
use store::MemoryStore;
use types::{Hash256, MinimalEthSpec as E}; use types::{Hash256, MinimalEthSpec as E};
#[derive(Debug)] #[derive(Debug)]
@ -449,11 +449,23 @@ mod tests {
type TestBeaconChainType = type TestBeaconChainType =
Witness<TestingSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>; Witness<TestingSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
fn build_log(level: slog::Level, enabled: bool) -> slog::Logger {
let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let drain = slog_async::Async::new(drain).build().fuse();
if enabled {
slog::Logger::root(drain.filter_level(level).fuse(), o!())
} else {
slog::Logger::root(drain.filter(|_| false).fuse(), o!())
}
}
#[allow(unused)] #[allow(unused)]
struct TestRig { struct TestRig {
log: slog::Logger, log: slog::Logger,
/// To check what does sync send to the beacon processor. /// To check what does sync send to the beacon processor.
beacon_processor_rx: mpsc::Receiver<BeaconWorkEvent<TestBeaconChainType>>, beacon_processor_rx: mpsc::Receiver<BeaconWorkEvent<E>>,
/// To set up different scenarios where sync is told about known/unkown blocks. /// To set up different scenarios where sync is told about known/unkown blocks.
chain: Arc<FakeStorage>, chain: Arc<FakeStorage>,
/// Needed by range to handle communication with the network. /// Needed by range to handle communication with the network.
@ -581,7 +593,7 @@ mod tests {
fn expect_chain_segment(&mut self) { fn expect_chain_segment(&mut self) {
match self.beacon_processor_rx.try_recv() { match self.beacon_processor_rx.try_recv() {
Ok(work) => { Ok(work) => {
assert_eq!(work.work_type(), crate::beacon_processor::CHAIN_SEGMENT); assert_eq!(work.work_type(), beacon_processor::CHAIN_SEGMENT);
} }
other => panic!("Expected chain segment process, found {:?}", other), other => panic!("Expected chain segment process, found {:?}", other),
} }
@ -591,7 +603,7 @@ mod tests {
fn range(log_enabled: bool) -> (TestRig, RangeSync<TestBeaconChainType, FakeStorage>) { fn range(log_enabled: bool) -> (TestRig, RangeSync<TestBeaconChainType, FakeStorage>) {
let log = build_log(slog::Level::Trace, log_enabled); let log = build_log(slog::Level::Trace, log_enabled);
// Initialise a new beacon chain // Initialise a new beacon chain
let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E::default()) let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E)
.default_spec() .default_spec()
.logger(log.clone()) .logger(log.clone())
.deterministic_keypairs(1) .deterministic_keypairs(1)
@ -600,17 +612,17 @@ mod tests {
let chain = harness.chain; let chain = harness.chain;
let fake_store = Arc::new(FakeStorage::default()); let fake_store = Arc::new(FakeStorage::default());
let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(10);
let range_sync = RangeSync::<TestBeaconChainType, FakeStorage>::new( let range_sync = RangeSync::<TestBeaconChainType, FakeStorage>::new(
fake_store.clone(), fake_store.clone(),
log.new(o!("component" => "range")), log.new(o!("component" => "range")),
); );
let (network_tx, network_rx) = mpsc::unbounded_channel(); let (network_tx, network_rx) = mpsc::unbounded_channel();
let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log));
let (network_beacon_processor, beacon_processor_rx) =
NetworkBeaconProcessor::null_for_testing(globals.clone());
let cx = SyncNetworkContext::new( let cx = SyncNetworkContext::new(
network_tx, network_tx,
globals.clone(), Arc::new(network_beacon_processor),
beacon_processor_tx,
chain, chain,
log.new(o!("component" => "network_context")), log.new(o!("component" => "network_context")),
); );

View File

@ -1,5 +1,6 @@
use clap::{App, Arg}; use clap::{App, Arg};
use strum::VariantNames; use strum::VariantNames;
use types::ProgressiveBalancesMode;
pub fn cli_app<'a, 'b>() -> App<'a, 'b> { pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
App::new("beacon_node") App::new("beacon_node")
@ -539,6 +540,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
[default: 8192 (mainnet) or 64 (minimal)]") [default: 8192 (mainnet) or 64 (minimal)]")
.takes_value(true) .takes_value(true)
) )
.arg(
Arg::with_name("epochs-per-migration")
.long("epochs-per-migration")
.value_name("N")
.help("The number of epochs to wait between running the migration of data from the \
hot DB to the cold DB. Less frequent runs can be useful for minimizing disk \
writes")
.default_value("1")
.takes_value(true)
)
.arg( .arg(
Arg::with_name("block-cache-size") Arg::with_name("block-cache-size")
.long("block-cache-size") .long("block-cache-size")
@ -889,7 +900,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.help("Set the timeout for checkpoint sync calls to remote beacon node HTTP endpoint.") .help("Set the timeout for checkpoint sync calls to remote beacon node HTTP endpoint.")
.value_name("SECONDS") .value_name("SECONDS")
.takes_value(true) .takes_value(true)
.default_value("60") .default_value("180")
) )
.arg( .arg(
Arg::with_name("reconstruct-historic-states") Arg::with_name("reconstruct-historic-states")
@ -1159,4 +1170,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
developers. This directory is not pruned, users should be careful to avoid \ developers. This directory is not pruned, users should be careful to avoid \
filling up their disks.") filling up their disks.")
) )
.arg(
Arg::with_name("progressive-balances")
.long("progressive-balances")
.value_name("MODE")
.help("Options to enable or disable the progressive balances cache for \
unrealized FFG progression calculation. The default `checked` mode compares \
the progressive balances from the cache against results from the existing \
method. If there is a mismatch, it falls back to the existing method. The \
optimized mode (`fast`) is faster but is still experimental, and is \
not recommended for mainnet usage at this time.")
.takes_value(true)
.possible_values(ProgressiveBalancesMode::VARIANTS)
)
} }

View File

@ -421,6 +421,12 @@ pub fn get_config<E: EthSpec>(
client_config.store.prune_payloads = prune_payloads; client_config.store.prune_payloads = prune_payloads;
} }
if let Some(epochs_per_migration) =
clap_utils::parse_optional(cli_args, "epochs-per-migration")?
{
client_config.chain.epochs_per_migration = epochs_per_migration;
}
if let Some(prune_blobs) = clap_utils::parse_optional(cli_args, "prune-blobs")? { if let Some(prune_blobs) = clap_utils::parse_optional(cli_args, "prune-blobs")? {
client_config.store.prune_blobs = prune_blobs; client_config.store.prune_blobs = prune_blobs;
} }
@ -837,6 +843,12 @@ pub fn get_config<E: EthSpec>(
client_config.network.invalid_block_storage = Some(path); client_config.network.invalid_block_storage = Some(path);
} }
if let Some(progressive_balances_mode) =
clap_utils::parse_optional(cli_args, "progressive-balances")?
{
client_config.chain.progressive_balances_mode = progressive_balances_mode;
}
Ok(client_config) Ok(client_config)
} }

View File

@ -43,7 +43,6 @@ pub use metrics::scrape_for_metrics;
use parking_lot::MutexGuard; use parking_lot::MutexGuard;
use std::sync::Arc; use std::sync::Arc;
use strum::{EnumString, IntoStaticStr}; use strum::{EnumString, IntoStaticStr};
use types::blob_sidecar::BlobSidecarList;
pub use types::*; pub use types::*;
pub type ColumnIter<'a> = Box<dyn Iterator<Item = Result<(Hash256, Vec<u8>), Error>> + 'a>; pub type ColumnIter<'a> = Box<dyn Iterator<Item = Result<(Hash256, Vec<u8>), Error>> + 'a>;

View File

@ -395,6 +395,7 @@ macro_rules! impl_try_into_beacon_state {
// Caching // Caching
total_active_balance: <_>::default(), total_active_balance: <_>::default(),
progressive_balances_cache: <_>::default(),
committee_caches: <_>::default(), committee_caches: <_>::default(),
pubkey_cache: <_>::default(), pubkey_cache: <_>::default(),
exit_cache: <_>::default(), exit_cache: <_>::default(),

View File

@ -63,7 +63,7 @@ where
.load_cold_state_by_slot(lower_limit_slot)? .load_cold_state_by_slot(lower_limit_slot)?
.ok_or(HotColdDBError::MissingLowerLimitState(lower_limit_slot))?; .ok_or(HotColdDBError::MissingLowerLimitState(lower_limit_slot))?;
state.build_all_caches(&self.spec)?; state.build_caches(&self.spec)?;
process_results(block_root_iter, |iter| -> Result<(), Error> { process_results(block_root_iter, |iter| -> Result<(), Error> {
let mut io_batch = vec![]; let mut io_batch = vec![];

View File

@ -48,6 +48,17 @@ The Ethereum community provides various [public endpoints](https://eth-clients.g
lighthouse bn --checkpoint-sync-url https://example.com/ ... lighthouse bn --checkpoint-sync-url https://example.com/ ...
``` ```
### Adjusting the timeout
If the beacon node fails to start due to a timeout from the checkpoint sync server, you can try
running it again with a longer timeout by adding the flag `--checkpoint-sync-url-timeout`.
```
lighthouse bn --checkpoint-sync-url-timeout 300 --checkpoint-sync-url https://example.com/ ...
```
The flag takes a value in seconds. For more information see `lighthouse bn --help`.
## Backfilling Blocks ## Backfilling Blocks
Once forwards sync completes, Lighthouse will commence a "backfill sync" to download the blocks Once forwards sync completes, Lighthouse will commence a "backfill sync" to download the blocks

View File

@ -1,6 +1,6 @@
[package] [package]
name = "boot_node" name = "boot_node"
version = "4.2.0" version = "4.3.0"
authors = ["Sigma Prime <contact@sigmaprime.io>"] authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2021" edition = "2021"

View File

@ -322,6 +322,26 @@ impl BeaconNodeHttpClient {
ok_or_error(response).await ok_or_error(response).await
} }
/// Generic POST function supporting arbitrary responses and timeouts.
async fn post_generic_with_consensus_version<T: Serialize, U: IntoUrl>(
&self,
url: U,
body: &T,
timeout: Option<Duration>,
fork: ForkName,
) -> Result<Response, Error> {
let mut builder = self.client.post(url);
if let Some(timeout) = timeout {
builder = builder.timeout(timeout);
}
let response = builder
.header(CONSENSUS_VERSION_HEADER, fork.to_string())
.json(body)
.send()
.await?;
ok_or_error(response).await
}
/// `GET beacon/genesis` /// `GET beacon/genesis`
/// ///
/// ## Errors /// ## Errors
@ -654,6 +674,77 @@ impl BeaconNodeHttpClient {
Ok(()) Ok(())
} }
pub fn post_beacon_blocks_v2_path(
&self,
validation_level: Option<BroadcastValidation>,
) -> Result<Url, Error> {
let mut path = self.eth_path(V2)?;
path.path_segments_mut()
.map_err(|_| Error::InvalidUrl(self.server.clone()))?
.extend(&["beacon", "blocks"]);
path.set_query(
validation_level
.map(|v| format!("broadcast_validation={}", v))
.as_deref(),
);
Ok(path)
}
pub fn post_beacon_blinded_blocks_v2_path(
&self,
validation_level: Option<BroadcastValidation>,
) -> Result<Url, Error> {
let mut path = self.eth_path(V2)?;
path.path_segments_mut()
.map_err(|_| Error::InvalidUrl(self.server.clone()))?
.extend(&["beacon", "blinded_blocks"]);
path.set_query(
validation_level
.map(|v| format!("broadcast_validation={}", v))
.as_deref(),
);
Ok(path)
}
/// `POST v2/beacon/blocks`
pub async fn post_beacon_blocks_v2<T: EthSpec, Payload: AbstractExecPayload<T>>(
&self,
block_contents: &SignedBlockContents<T, Payload>,
validation_level: Option<BroadcastValidation>,
) -> Result<(), Error> {
self.post_generic_with_consensus_version(
self.post_beacon_blocks_v2_path(validation_level)?,
block_contents,
Some(self.timeouts.proposal),
block_contents.signed_block().message().body().fork_name(),
)
.await?;
Ok(())
}
/// `POST v2/beacon/blinded_blocks`
//TODO(sean) update this along with builder updates
pub async fn post_beacon_blinded_blocks_v2<T: EthSpec>(
&self,
block: &SignedBlindedBeaconBlock<T>,
validation_level: Option<BroadcastValidation>,
) -> Result<(), Error> {
self.post_generic_with_consensus_version(
self.post_beacon_blinded_blocks_v2_path(validation_level)?,
block,
Some(self.timeouts.proposal),
block.message().body().fork_name(),
)
.await?;
Ok(())
}
/// Path for `v2/beacon/blocks` /// Path for `v2/beacon/blocks`
pub fn get_beacon_blocks_path(&self, block_id: BlockId) -> Result<Url, Error> { pub fn get_beacon_blocks_path(&self, block_id: BlockId) -> Result<Url, Error> {
let mut path = self.eth_path(V2)?; let mut path = self.eth_path(V2)?;

View File

@ -7,7 +7,7 @@ use mediatype::{names, MediaType, MediaTypeList};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use ssz_derive::Encode; use ssz_derive::Encode;
use std::convert::TryFrom; use std::convert::TryFrom;
use std::fmt; use std::fmt::{self, Display};
use std::str::{from_utf8, FromStr}; use std::str::{from_utf8, FromStr};
use std::time::Duration; use std::time::Duration;
pub use types::*; pub use types::*;
@ -1261,6 +1261,50 @@ pub struct ForkChoiceNode {
pub execution_block_hash: Option<Hash256>, pub execution_block_hash: Option<Hash256>,
} }
#[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum BroadcastValidation {
Gossip,
Consensus,
ConsensusAndEquivocation,
}
impl Default for BroadcastValidation {
fn default() -> Self {
Self::Gossip
}
}
impl Display for BroadcastValidation {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Gossip => write!(f, "gossip"),
Self::Consensus => write!(f, "consensus"),
Self::ConsensusAndEquivocation => write!(f, "consensus_and_equivocation"),
}
}
}
impl FromStr for BroadcastValidation {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"gossip" => Ok(Self::Gossip),
"consensus" => Ok(Self::Consensus),
"consensus_and_equivocation" => Ok(Self::ConsensusAndEquivocation),
_ => Err("Invalid broadcast validation level"),
}
}
}
#[derive(Default, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub struct BroadcastValidationQuery {
#[serde(default)]
pub broadcast_validation: BroadcastValidation,
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -1375,6 +1419,20 @@ pub enum SignedBlockContents<T: EthSpec, Payload: AbstractExecPayload<T> = FullP
} }
impl<T: EthSpec, Payload: AbstractExecPayload<T>> SignedBlockContents<T, Payload> { impl<T: EthSpec, Payload: AbstractExecPayload<T>> SignedBlockContents<T, Payload> {
pub fn new(
block: SignedBeaconBlock<T, Payload>,
blobs: Option<SignedBlobSidecarList<T>>,
) -> Self {
if let Some(blobs) = blobs {
Self::BlockAndBlobSidecars(SignedBeaconBlockAndBlobSidecars {
signed_block: block,
signed_blob_sidecars: blobs,
})
} else {
Self::Block(block)
}
}
pub fn signed_block(&self) -> &SignedBeaconBlock<T, Payload> { pub fn signed_block(&self) -> &SignedBeaconBlock<T, Payload> {
match self { match self {
SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => { SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => {
@ -1384,6 +1442,15 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> SignedBlockContents<T, Payload
} }
} }
pub fn blobs_cloned(&self) -> Option<SignedBlobSidecarList<T>> {
match self {
SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => {
Some(block_and_sidecars.signed_blob_sidecars.clone())
}
SignedBlockContents::Block(_block) => None,
}
}
pub fn deconstruct(self) -> BlockContentsTuple<T, Payload> { pub fn deconstruct(self) -> BlockContentsTuple<T, Payload> {
match self { match self {
SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => ( SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => (

View File

@ -1,6 +1,8 @@
# Lighthouse Team (Sigma Prime) # Lighthouse Team (Sigma Prime)
- enr:-Jq4QItoFUuug_n_qbYbU0OY04-np2wT8rUCauOOXNi0H3BWbDj-zbfZb7otA7jZ6flbBpx1LNZK2TDebZ9dEKx84LYBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISsaa0ZiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMo - enr:-Le4QPUXJS2BTORXxyx2Ia-9ae4YqA_JWX3ssj4E_J-3z1A-HmFGrU8BpvpqhNabayXeOZ2Nq_sbeDgtzMJpLLnXFgAChGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISsaa0Zg2lwNpAkAIkHAAAAAPA8kv_-awoTiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMohHVkcDaCI4I
- enr:-Jq4QN_YBsUOqQsty1OGvYv48PMaiEt1AzGD1NkYQHaxZoTyVGqMYXg0K9c0LPNWC9pkXmggApp8nygYLsQwScwAgfgBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISLosQxiXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMo - enr:-Le4QLHZDSvkLfqgEo8IWGG96h6mxwe_PsggC20CL3neLBjfXLGAQFOPSltZ7oP6ol54OvaNqO02Rnvb8YmDR274uq8ChGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLosQxg2lwNpAqAX4AAAAAAPA8kv_-ax65iXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMohHVkcDaCI4I
- enr:-Le4QH6LQrusDbAHPjU_HcKOuMeXfdEB5NJyXgHWFadfHgiySqeDyusQMvfphdYWOzuSZO9Uq2AMRJR5O4ip7OvVma8BhGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLY9ncg2lwNpAkAh8AgQIBAAAAAAAAAAmXiXNlY3AyNTZrMaECDYCZTZEksF-kmgPholqgVt8IXr-8L7Nu7YrZ7HUpgxmDdWRwgiMohHVkcDaCI4I
- enr:-Le4QIqLuWybHNONr933Lk0dcMmAB5WgvGKRyDihy1wHDIVlNuuztX62W51voT4I8qD34GcTEOTmag1bcdZ_8aaT4NUBhGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLY04ng2lwNpAkAh8AgAIBAAAAAAAAAA-fiXNlY3AyNTZrMaEDscnRV6n1m-D9ID5UsURk0jsoKNXt1TIrj8uKOGW6iluDdWRwgiMohHVkcDaCI4I
# EF Team # EF Team
- enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg - enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg
- enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg - enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg

View File

@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!(
// NOTE: using --match instead of --exclude for compatibility with old Git // NOTE: using --match instead of --exclude for compatibility with old Git
"--match=thiswillnevermatchlol" "--match=thiswillnevermatchlol"
], ],
prefix = "Lighthouse/v4.2.0-", prefix = "Lighthouse/v4.3.0-",
fallback = "Lighthouse/v4.2.0" fallback = "Lighthouse/v4.3.0"
); );
/// Returns `VERSION`, but with platform information appended to the end. /// Returns `VERSION`, but with platform information appended to the end.

View File

@ -1,10 +1,15 @@
use crate::{ForkChoiceStore, InvalidationOperation}; use crate::{ForkChoiceStore, InvalidationOperation};
use per_epoch_processing::altair::participation_cache::Error as ParticipationCacheError;
use proto_array::{ use proto_array::{
Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, ProposerHeadError, Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, ProposerHeadError,
ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold,
}; };
use slog::{crit, debug, warn, Logger}; use slog::{crit, debug, error, warn, Logger};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use state_processing::per_epoch_processing::altair::ParticipationCache;
use state_processing::per_epoch_processing::{
weigh_justification_and_finalization, JustificationAndFinalizationState,
};
use state_processing::{ use state_processing::{
per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing, per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing,
}; };
@ -18,6 +23,7 @@ use types::{
EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch,
SignedBeaconBlock, Slot, SignedBeaconBlock, Slot,
}; };
use types::{ProgressiveBalancesCache, ProgressiveBalancesMode};
#[derive(Debug)] #[derive(Debug)]
pub enum Error<T> { pub enum Error<T> {
@ -72,7 +78,9 @@ pub enum Error<T> {
}, },
UnrealizedVoteProcessing(state_processing::EpochProcessingError), UnrealizedVoteProcessing(state_processing::EpochProcessingError),
ParticipationCacheBuild(BeaconStateError), ParticipationCacheBuild(BeaconStateError),
ParticipationCacheError(ParticipationCacheError),
ValidatorStatuses(BeaconStateError), ValidatorStatuses(BeaconStateError),
ProgressiveBalancesCacheCheckFailed(String),
} }
impl<T> From<InvalidAttestation> for Error<T> { impl<T> From<InvalidAttestation> for Error<T> {
@ -93,6 +101,18 @@ impl<T> From<state_processing::EpochProcessingError> for Error<T> {
} }
} }
impl<T> From<BeaconStateError> for Error<T> {
fn from(e: BeaconStateError) -> Self {
Error::BeaconStateError(e)
}
}
impl<T> From<ParticipationCacheError> for Error<T> {
fn from(e: ParticipationCacheError) -> Self {
Error::ParticipationCacheError(e)
}
}
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
/// Controls how fork choice should behave when restoring from a persisted fork choice. /// Controls how fork choice should behave when restoring from a persisted fork choice.
pub enum ResetPayloadStatuses { pub enum ResetPayloadStatuses {
@ -645,7 +665,9 @@ where
block_delay: Duration, block_delay: Duration,
state: &BeaconState<E>, state: &BeaconState<E>,
payload_verification_status: PayloadVerificationStatus, payload_verification_status: PayloadVerificationStatus,
progressive_balances_mode: ProgressiveBalancesMode,
spec: &ChainSpec, spec: &ChainSpec,
log: &Logger,
) -> Result<(), Error<T::Error>> { ) -> Result<(), Error<T::Error>> {
// If this block has already been processed we do not need to reprocess it. // If this block has already been processed we do not need to reprocess it.
// We check this immediately in case re-processing the block mutates some property of the // We check this immediately in case re-processing the block mutates some property of the
@ -739,46 +761,85 @@ where
parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 >= block_epoch parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 >= block_epoch
}); });
let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if let Some((
if let Some((parent_justified, parent_finalized)) = parent_checkpoints { parent_justified,
(parent_justified, parent_finalized) parent_finalized,
} else { )) =
let justification_and_finalization_state = match block { parent_checkpoints
// TODO(deneb): Ensure that the final specification {
// does not substantially modify per epoch processing. (parent_justified, parent_finalized)
BeaconBlockRef::Deneb(_) } else {
| BeaconBlockRef::Capella(_) let justification_and_finalization_state = match block {
| BeaconBlockRef::Merge(_) BeaconBlockRef::Deneb(_)
| BeaconBlockRef::Altair(_) => { | BeaconBlockRef::Capella(_)
let participation_cache = | BeaconBlockRef::Merge(_)
per_epoch_processing::altair::ParticipationCache::new(state, spec) | BeaconBlockRef::Altair(_) => match progressive_balances_mode {
.map_err(Error::ParticipationCacheBuild)?; ProgressiveBalancesMode::Disabled => {
let participation_cache = ParticipationCache::new(state, spec)
.map_err(Error::ParticipationCacheBuild)?;
per_epoch_processing::altair::process_justification_and_finalization( per_epoch_processing::altair::process_justification_and_finalization(
state, state,
&participation_cache, &participation_cache,
)? )?
} }
BeaconBlockRef::Base(_) => { ProgressiveBalancesMode::Fast
let mut validator_statuses = | ProgressiveBalancesMode::Checked
per_epoch_processing::base::ValidatorStatuses::new(state, spec) | ProgressiveBalancesMode::Strict => {
.map_err(Error::ValidatorStatuses)?; let maybe_participation_cache = progressive_balances_mode
validator_statuses .perform_comparative_checks()
.process_attestations(state) .then(|| {
.map_err(Error::ValidatorStatuses)?; ParticipationCache::new(state, spec)
per_epoch_processing::base::process_justification_and_finalization( .map_err(Error::ParticipationCacheBuild)
state, })
&validator_statuses.total_balances, .transpose()?;
spec,
)?
}
};
( process_justification_and_finalization_from_progressive_cache::<E, T>(
justification_and_finalization_state.current_justified_checkpoint(), state,
justification_and_finalization_state.finalized_checkpoint(), maybe_participation_cache.as_ref(),
) )
.or_else(|e| {
if progressive_balances_mode != ProgressiveBalancesMode::Strict {
error!(
log,
"Processing with progressive balances cache failed";
"info" => "falling back to the non-optimized processing method",
"error" => ?e,
);
let participation_cache = maybe_participation_cache
.map(Ok)
.unwrap_or_else(|| ParticipationCache::new(state, spec))
.map_err(Error::ParticipationCacheBuild)?;
per_epoch_processing::altair::process_justification_and_finalization(
state,
&participation_cache,
).map_err(Error::from)
} else {
Err(e)
}
})?
}
},
BeaconBlockRef::Base(_) => {
let mut validator_statuses =
per_epoch_processing::base::ValidatorStatuses::new(state, spec)
.map_err(Error::ValidatorStatuses)?;
validator_statuses
.process_attestations(state)
.map_err(Error::ValidatorStatuses)?;
per_epoch_processing::base::process_justification_and_finalization(
state,
&validator_statuses.total_balances,
spec,
)?
}
}; };
(
justification_and_finalization_state.current_justified_checkpoint(),
justification_and_finalization_state.finalized_checkpoint(),
)
};
// Update best known unrealized justified & finalized checkpoints // Update best known unrealized justified & finalized checkpoints
if unrealized_justified_checkpoint.epoch if unrealized_justified_checkpoint.epoch
> self.fc_store.unrealized_justified_checkpoint().epoch > self.fc_store.unrealized_justified_checkpoint().epoch
@ -1504,6 +1565,92 @@ where
} }
} }
/// Process justification and finalization using progressive cache. Also performs a comparative
/// check against the `ParticipationCache` if it is supplied.
///
/// Returns an error if the cache is not initialized or if there is a mismatch on the comparative check.
fn process_justification_and_finalization_from_progressive_cache<E, T>(
state: &BeaconState<E>,
maybe_participation_cache: Option<&ParticipationCache>,
) -> Result<JustificationAndFinalizationState<E>, Error<T::Error>>
where
E: EthSpec,
T: ForkChoiceStore<E>,
{
let justification_and_finalization_state = JustificationAndFinalizationState::new(state);
if state.current_epoch() <= E::genesis_epoch() + 1 {
return Ok(justification_and_finalization_state);
}
// Load cached balances
let progressive_balances_cache: &ProgressiveBalancesCache = state.progressive_balances_cache();
let previous_target_balance =
progressive_balances_cache.previous_epoch_target_attesting_balance()?;
let current_target_balance =
progressive_balances_cache.current_epoch_target_attesting_balance()?;
let total_active_balance = state.get_total_active_balance()?;
if let Some(participation_cache) = maybe_participation_cache {
check_progressive_balances::<E, T>(
state,
participation_cache,
previous_target_balance,
current_target_balance,
total_active_balance,
)?;
}
weigh_justification_and_finalization(
justification_and_finalization_state,
total_active_balance,
previous_target_balance,
current_target_balance,
)
.map_err(Error::from)
}
/// Perform comparative checks against `ParticipationCache`, will return error if there's a mismatch.
fn check_progressive_balances<E, T>(
state: &BeaconState<E>,
participation_cache: &ParticipationCache,
cached_previous_target_balance: u64,
cached_current_target_balance: u64,
cached_total_active_balance: u64,
) -> Result<(), Error<T::Error>>
where
E: EthSpec,
T: ForkChoiceStore<E>,
{
let slot = state.slot();
let epoch = state.current_epoch();
// Check previous epoch target balances
let previous_target_balance = participation_cache.previous_epoch_target_attesting_balance()?;
if previous_target_balance != cached_previous_target_balance {
return Err(Error::ProgressiveBalancesCacheCheckFailed(
format!("Previous epoch target attesting balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, previous_target_balance, cached_previous_target_balance)
));
}
// Check current epoch target balances
let current_target_balance = participation_cache.current_epoch_target_attesting_balance()?;
if current_target_balance != cached_current_target_balance {
return Err(Error::ProgressiveBalancesCacheCheckFailed(
format!("Current epoch target attesting balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, current_target_balance, cached_current_target_balance)
));
}
// Check current epoch total balances
let total_active_balance = participation_cache.current_epoch_total_active_balance();
if total_active_balance != cached_total_active_balance {
return Err(Error::ProgressiveBalancesCacheCheckFailed(
format!("Current epoch total active balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, total_active_balance, cached_total_active_balance)
));
}
Ok(())
}
/// Helper struct that is used to encode/decode the state of the `ForkChoice` as SSZ bytes. /// Helper struct that is used to encode/decode the state of the `ForkChoice` as SSZ bytes.
/// ///
/// This is used when persisting the state of the fork choice to disk. /// This is used when persisting the state of the fork choice to disk.

View File

@ -17,12 +17,13 @@ use fork_choice::{
use store::MemoryStore; use store::MemoryStore;
use types::{ use types::{
test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint,
Epoch, EthSpec, Hash256, IndexedAttestation, MainnetEthSpec, SignedBeaconBlock, Slot, SubnetId, Epoch, EthSpec, ForkName, Hash256, IndexedAttestation, MainnetEthSpec, ProgressiveBalancesMode,
RelativeEpoch, SignedBeaconBlock, Slot, SubnetId,
}; };
pub type E = MainnetEthSpec; pub type E = MainnetEthSpec;
pub const VALIDATOR_COUNT: usize = 32; pub const VALIDATOR_COUNT: usize = 64;
/// Defines some delay between when an attestation is created and when it is mutated. /// Defines some delay between when an attestation is created and when it is mutated.
pub enum MutationDelay { pub enum MutationDelay {
@ -68,6 +69,24 @@ impl ForkChoiceTest {
Self { harness } Self { harness }
} }
/// Creates a new tester with the specified `ProgressiveBalancesMode` and genesis from latest fork.
fn new_with_progressive_balances_mode(mode: ProgressiveBalancesMode) -> ForkChoiceTest {
// genesis with latest fork (at least altair required to test the cache)
let spec = ForkName::latest().make_genesis_spec(ChainSpec::default());
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.spec(spec)
.chain_config(ChainConfig {
progressive_balances_mode: mode,
..ChainConfig::default()
})
.deterministic_keypairs(VALIDATOR_COUNT)
.fresh_ephemeral_store()
.mock_execution_layer()
.build();
Self { harness }
}
/// Get a value from the `ForkChoice` instantiation. /// Get a value from the `ForkChoice` instantiation.
fn get<T, U>(&self, func: T) -> U fn get<T, U>(&self, func: T) -> U
where where
@ -212,6 +231,39 @@ impl ForkChoiceTest {
self self
} }
/// Slash a validator from the previous epoch committee.
pub async fn add_previous_epoch_attester_slashing(self) -> Self {
let state = self.harness.get_current_state();
let previous_epoch_shuffling = state.get_shuffling(RelativeEpoch::Previous).unwrap();
let validator_indices = previous_epoch_shuffling
.iter()
.map(|idx| *idx as u64)
.take(1)
.collect();
self.harness
.add_attester_slashing(validator_indices)
.unwrap();
self
}
/// Slash the proposer of a block in the previous epoch.
pub async fn add_previous_epoch_proposer_slashing(self, slots_per_epoch: u64) -> Self {
let previous_epoch_slot = self.harness.get_current_slot() - slots_per_epoch;
let previous_epoch_block = self
.harness
.chain
.block_at_slot(previous_epoch_slot, WhenSlotSkipped::None)
.unwrap()
.unwrap();
let proposer_index: u64 = previous_epoch_block.message().proposer_index();
self.harness.add_proposer_slashing(proposer_index).unwrap();
self
}
/// Apply `count` blocks to the chain (without attestations). /// Apply `count` blocks to the chain (without attestations).
pub async fn apply_blocks_without_new_attestations(self, count: usize) -> Self { pub async fn apply_blocks_without_new_attestations(self, count: usize) -> Self {
self.harness.advance_slot(); self.harness.advance_slot();
@ -286,7 +338,9 @@ impl ForkChoiceTest {
Duration::from_secs(0), Duration::from_secs(0),
&state, &state,
PayloadVerificationStatus::Verified, PayloadVerificationStatus::Verified,
self.harness.chain.config.progressive_balances_mode,
&self.harness.chain.spec, &self.harness.chain.spec,
self.harness.logger(),
) )
.unwrap(); .unwrap();
self self
@ -328,7 +382,9 @@ impl ForkChoiceTest {
Duration::from_secs(0), Duration::from_secs(0),
&state, &state,
PayloadVerificationStatus::Verified, PayloadVerificationStatus::Verified,
self.harness.chain.config.progressive_balances_mode,
&self.harness.chain.spec, &self.harness.chain.spec,
self.harness.logger(),
) )
.err() .err()
.expect("on_block did not return an error"); .expect("on_block did not return an error");
@ -1287,3 +1343,65 @@ async fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() {
.assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_finalized_epoch_is_less_than(checkpoint.epoch)
.assert_shutdown_signal_sent(); .assert_shutdown_signal_sent();
} }
/// Checks that `ProgressiveBalancesCache` is updated correctly after an attester slashing event,
/// where the slashed validator is a target attester in previous / current epoch.
#[tokio::test]
async fn progressive_balances_cache_attester_slashing() {
ForkChoiceTest::new_with_progressive_balances_mode(ProgressiveBalancesMode::Strict)
// first two epochs
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.await
.unwrap()
// Note: This test may fail if the shuffling used changes, right now it re-runs with
// deterministic shuffling. A shuffling change my cause the slashed proposer to propose
// again in the next epoch, which results in a block processing failure
// (`HeaderInvalid::ProposerSlashed`). The harness should be re-worked to successfully skip
// the slot in this scenario rather than panic-ing. The same applies to
// `progressive_balances_cache_proposer_slashing`.
.apply_blocks(1)
.await
.add_previous_epoch_attester_slashing()
.await
// expect fork choice to import blocks successfully after a previous epoch attester is
// slashed, i.e. the slashed attester's balance is correctly excluded from
// the previous epoch total balance in `ProgressiveBalancesCache`.
.apply_blocks(1)
.await
// expect fork choice to import another epoch of blocks successfully - the slashed
// attester's balance should be excluded from the current epoch total balance in
// `ProgressiveBalancesCache` as well.
.apply_blocks(MainnetEthSpec::slots_per_epoch() as usize)
.await;
}
/// Checks that `ProgressiveBalancesCache` is updated correctly after a proposer slashing event,
/// where the slashed validator is a target attester in previous / current epoch.
#[tokio::test]
async fn progressive_balances_cache_proposer_slashing() {
ForkChoiceTest::new_with_progressive_balances_mode(ProgressiveBalancesMode::Strict)
// first two epochs
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.await
.unwrap()
// Note: This test may fail if the shuffling used changes, right now it re-runs with
// deterministic shuffling. A shuffling change my cause the slashed proposer to propose
// again in the next epoch, which results in a block processing failure
// (`HeaderInvalid::ProposerSlashed`). The harness should be re-worked to successfully skip
// the slot in this scenario rather than panic-ing. The same applies to
// `progressive_balances_cache_attester_slashing`.
.apply_blocks(1)
.await
.add_previous_epoch_proposer_slashing(MainnetEthSpec::slots_per_epoch())
.await
// expect fork choice to import blocks successfully after a previous epoch proposer is
// slashed, i.e. the slashed proposer's balance is correctly excluded from
// the previous epoch total balance in `ProgressiveBalancesCache`.
.apply_blocks(1)
.await
// expect fork choice to import another epoch of blocks successfully - the slashed
// proposer's balance should be excluded from the current epoch total balance in
// `ProgressiveBalancesCache` as well.
.apply_blocks(MainnetEthSpec::slots_per_epoch() as usize)
.await;
}

View File

@ -7,6 +7,7 @@ mod slash_validator;
pub mod altair; pub mod altair;
pub mod base; pub mod base;
pub mod update_progressive_balances_cache;
pub use deposit_data_tree::DepositDataTree; pub use deposit_data_tree::DepositDataTree;
pub use get_attestation_participation::get_attestation_participation_flag_indices; pub use get_attestation_participation::get_attestation_participation_flag_indices;

View File

@ -1,3 +1,4 @@
use crate::common::update_progressive_balances_cache::update_progressive_balances_on_slashing;
use crate::{ use crate::{
common::{decrease_balance, increase_balance, initiate_validator_exit}, common::{decrease_balance, increase_balance, initiate_validator_exit},
per_block_processing::errors::BlockProcessingError, per_block_processing::errors::BlockProcessingError,
@ -43,6 +44,8 @@ pub fn slash_validator<T: EthSpec>(
.safe_div(spec.min_slashing_penalty_quotient_for_state(state))?, .safe_div(spec.min_slashing_penalty_quotient_for_state(state))?,
)?; )?;
update_progressive_balances_on_slashing(state, slashed_index)?;
// Apply proposer and whistleblower rewards // Apply proposer and whistleblower rewards
let proposer_index = ctxt.get_proposer_index(state, spec)? as usize; let proposer_index = ctxt.get_proposer_index(state, spec)? as usize;
let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index); let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index);

View File

@ -0,0 +1,142 @@
/// A collection of all functions that mutates the `ProgressiveBalancesCache`.
use crate::metrics::{
PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL,
PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL,
};
use crate::per_epoch_processing::altair::ParticipationCache;
use crate::{BlockProcessingError, EpochProcessingError};
use lighthouse_metrics::set_gauge;
use ssz_types::VariableList;
use std::borrow::Cow;
use types::consts::altair::TIMELY_TARGET_FLAG_INDEX;
use types::{
is_progressive_balances_enabled, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec,
ParticipationFlags, ProgressiveBalancesCache,
};
/// Initializes the `ProgressiveBalancesCache` cache using balance values from the
/// `ParticipationCache`. If the optional `&ParticipationCache` is not supplied, it will be computed
/// from the `BeaconState`.
pub fn initialize_progressive_balances_cache<E: EthSpec>(
state: &mut BeaconState<E>,
maybe_participation_cache: Option<&ParticipationCache>,
spec: &ChainSpec,
) -> Result<(), BeaconStateError> {
if !is_progressive_balances_enabled(state)
|| state.progressive_balances_cache().is_initialized()
{
return Ok(());
}
let participation_cache = match maybe_participation_cache {
Some(cache) => Cow::Borrowed(cache),
None => Cow::Owned(ParticipationCache::new(state, spec)?),
};
let previous_epoch_target_attesting_balance = participation_cache
.previous_epoch_target_attesting_balance_raw()
.map_err(|e| BeaconStateError::ParticipationCacheError(format!("{e:?}")))?;
let current_epoch_target_attesting_balance = participation_cache
.current_epoch_target_attesting_balance_raw()
.map_err(|e| BeaconStateError::ParticipationCacheError(format!("{e:?}")))?;
let current_epoch = state.current_epoch();
state.progressive_balances_cache_mut().initialize(
current_epoch,
previous_epoch_target_attesting_balance,
current_epoch_target_attesting_balance,
);
update_progressive_balances_metrics(state.progressive_balances_cache())?;
Ok(())
}
/// Updates the `ProgressiveBalancesCache` when a new target attestation has been processed.
pub fn update_progressive_balances_on_attestation<T: EthSpec>(
state: &mut BeaconState<T>,
epoch: Epoch,
validator_index: usize,
) -> Result<(), BlockProcessingError> {
if is_progressive_balances_enabled(state) {
let validator = state.get_validator(validator_index)?;
if !validator.slashed {
let validator_effective_balance = validator.effective_balance;
state
.progressive_balances_cache_mut()
.on_new_target_attestation(epoch, validator_effective_balance)?;
}
}
Ok(())
}
/// Updates the `ProgressiveBalancesCache` when a target attester has been slashed.
pub fn update_progressive_balances_on_slashing<T: EthSpec>(
state: &mut BeaconState<T>,
validator_index: usize,
) -> Result<(), BlockProcessingError> {
if is_progressive_balances_enabled(state) {
let previous_epoch_participation = state.previous_epoch_participation()?;
let is_previous_epoch_target_attester =
is_target_attester_in_epoch::<T>(previous_epoch_participation, validator_index)?;
let current_epoch_participation = state.current_epoch_participation()?;
let is_current_epoch_target_attester =
is_target_attester_in_epoch::<T>(current_epoch_participation, validator_index)?;
let validator_effective_balance = state.get_effective_balance(validator_index)?;
state.progressive_balances_cache_mut().on_slashing(
is_previous_epoch_target_attester,
is_current_epoch_target_attester,
validator_effective_balance,
)?;
}
Ok(())
}
/// Updates the `ProgressiveBalancesCache` on epoch transition.
pub fn update_progressive_balances_on_epoch_transition<T: EthSpec>(
state: &mut BeaconState<T>,
spec: &ChainSpec,
) -> Result<(), EpochProcessingError> {
if is_progressive_balances_enabled(state) {
state
.progressive_balances_cache_mut()
.on_epoch_transition(spec)?;
update_progressive_balances_metrics(state.progressive_balances_cache())?;
}
Ok(())
}
pub fn update_progressive_balances_metrics(
cache: &ProgressiveBalancesCache,
) -> Result<(), BeaconStateError> {
set_gauge(
&PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL,
cache.previous_epoch_target_attesting_balance()? as i64,
);
set_gauge(
&PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL,
cache.current_epoch_target_attesting_balance()? as i64,
);
Ok(())
}
fn is_target_attester_in_epoch<T: EthSpec>(
epoch_participation: &VariableList<ParticipationFlags, T::ValidatorRegistryLimit>,
validator_index: usize,
) -> Result<bool, BlockProcessingError> {
let participation_flags = epoch_participation
.get(validator_index)
.ok_or(BeaconStateError::UnknownValidator(validator_index))?;
participation_flags
.has_flag(TIMELY_TARGET_FLAG_INDEX)
.map_err(|e| e.into())
}

View File

@ -111,7 +111,7 @@ pub fn initialize_beacon_state_from_eth1<T: EthSpec>(
} }
// Now that we have our validators, initialize the caches (including the committees) // Now that we have our validators, initialize the caches (including the committees)
state.build_all_caches(spec)?; state.build_caches(spec)?;
// Set genesis validators root for domain separation and chain versioning // Set genesis validators root for domain separation and chain versioning
*state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache()?; *state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache()?;
@ -134,7 +134,7 @@ pub fn process_activations<T: EthSpec>(
state: &mut BeaconState<T>, state: &mut BeaconState<T>,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
let (validators, balances) = state.validators_and_balances_mut(); let (validators, balances, _) = state.validators_and_balances_and_progressive_balances_mut();
for (index, validator) in validators.iter_mut().enumerate() { for (index, validator) in validators.iter_mut().enumerate() {
let balance = balances let balance = balances
.get(index) .get(index)

View File

@ -2,7 +2,7 @@
#![cfg_attr( #![cfg_attr(
not(test), not(test),
deny( deny(
clippy::integer_arithmetic, clippy::arithmetic_side_effects,
clippy::disallowed_methods, clippy::disallowed_methods,
clippy::indexing_slicing, clippy::indexing_slicing,
clippy::unwrap_used, clippy::unwrap_used,

View File

@ -23,4 +23,15 @@ lazy_static! {
"beacon_participation_prev_epoch_active_gwei_total", "beacon_participation_prev_epoch_active_gwei_total",
"Total effective balance (gwei) of validators active in the previous epoch" "Total effective balance (gwei) of validators active in the previous epoch"
); );
/*
* Participation Metrics (progressive balances)
*/
pub static ref PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_participation_prev_epoch_target_attesting_gwei_progressive_total",
"Progressive total effective balance (gwei) of validators who attested to the target in the previous epoch"
);
pub static ref PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_participation_curr_epoch_target_attesting_gwei_progressive_total",
"Progressive total effective balance (gwei) of validators who attested to the target in the current epoch"
);
} }

View File

@ -42,6 +42,9 @@ mod verify_proposer_slashing;
use crate::common::decrease_balance; use crate::common::decrease_balance;
use crate::StateProcessingStrategy; use crate::StateProcessingStrategy;
use crate::common::update_progressive_balances_cache::{
initialize_progressive_balances_cache, update_progressive_balances_metrics,
};
#[cfg(feature = "arbitrary-fuzz")] #[cfg(feature = "arbitrary-fuzz")]
use arbitrary::Arbitrary; use arbitrary::Arbitrary;
@ -115,6 +118,8 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
.fork_name(spec) .fork_name(spec)
.map_err(BlockProcessingError::InconsistentStateFork)?; .map_err(BlockProcessingError::InconsistentStateFork)?;
initialize_progressive_balances_cache(state, None, spec)?;
let verify_signatures = match block_signature_strategy { let verify_signatures = match block_signature_strategy {
BlockSignatureStrategy::VerifyBulk => { BlockSignatureStrategy::VerifyBulk => {
// Verify all signatures in the block at once. // Verify all signatures in the block at once.
@ -183,6 +188,10 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
)?; )?;
} }
if is_progressive_balances_enabled(state) {
update_progressive_balances_metrics(state.progressive_balances_cache())?;
}
Ok(()) Ok(())
} }

View File

@ -1,4 +1,4 @@
#![allow(clippy::integer_arithmetic)] #![allow(clippy::arithmetic_side_effects)]
use super::signature_sets::{Error as SignatureSetError, *}; use super::signature_sets::{Error as SignatureSetError, *};
use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError};

View File

@ -1,6 +1,8 @@
use super::signature_sets::Error as SignatureSetError; use super::signature_sets::Error as SignatureSetError;
use crate::per_epoch_processing::altair::participation_cache;
use crate::ContextError; use crate::ContextError;
use merkle_proof::MerkleTreeError; use merkle_proof::MerkleTreeError;
use participation_cache::Error as ParticipationCacheError;
use safe_arith::ArithError; use safe_arith::ArithError;
use ssz::DecodeError; use ssz::DecodeError;
use types::*; use types::*;
@ -99,6 +101,7 @@ pub enum BlockProcessingError {
length: usize, length: usize,
}, },
WithdrawalCredentialsInvalid, WithdrawalCredentialsInvalid,
ParticipationCacheError(ParticipationCacheError),
} }
impl From<BeaconStateError> for BlockProcessingError { impl From<BeaconStateError> for BlockProcessingError {
@ -156,6 +159,12 @@ impl From<BlockOperationError<HeaderInvalid>> for BlockProcessingError {
} }
} }
impl From<ParticipationCacheError> for BlockProcessingError {
fn from(e: ParticipationCacheError) -> Self {
BlockProcessingError::ParticipationCacheError(e)
}
}
/// A conversion that consumes `self` and adds an `index` variable to resulting struct. /// A conversion that consumes `self` and adds an `index` variable to resulting struct.
/// ///
/// Used here to allow converting an error into an upstream error that points to the object that /// Used here to allow converting an error into an upstream error that points to the object that

View File

@ -97,6 +97,8 @@ pub mod base {
pub mod altair { pub mod altair {
use super::*; use super::*;
use crate::common::update_progressive_balances_cache::update_progressive_balances_on_attestation;
use types::consts::altair::TIMELY_TARGET_FLAG_INDEX;
pub fn process_attestations<T: EthSpec>( pub fn process_attestations<T: EthSpec>(
state: &mut BeaconState<T>, state: &mut BeaconState<T>,
@ -163,6 +165,14 @@ pub mod altair {
get_base_reward(state, index, base_reward_per_increment, spec)? get_base_reward(state, index, base_reward_per_increment, spec)?
.safe_mul(weight)?, .safe_mul(weight)?,
)?; )?;
if flag_index == TIMELY_TARGET_FLAG_INDEX {
update_progressive_balances_on_attestation(
state,
data.target.epoch,
index,
)?;
}
} }
} }
} }
@ -235,6 +245,7 @@ pub fn process_attester_slashings<T: EthSpec>(
Ok(()) Ok(())
} }
/// Wrapper function to handle calling the correct version of `process_attestations` based on /// Wrapper function to handle calling the correct version of `process_attestations` based on
/// the fork. /// the fork.
pub fn process_attestations<T: EthSpec, Payload: AbstractExecPayload<T>>( pub fn process_attestations<T: EthSpec, Payload: AbstractExecPayload<T>>(

View File

@ -63,7 +63,7 @@ async fn valid_block_ok() {
let state = harness.get_current_state(); let state = harness.get_current_state();
let slot = state.slot(); let slot = state.slot();
let (block, mut state) = harness let ((block, _), mut state) = harness
.make_block_return_pre_state(state, slot + Slot::new(1)) .make_block_return_pre_state(state, slot + Slot::new(1))
.await; .await;
@ -89,7 +89,7 @@ async fn invalid_block_header_state_slot() {
let state = harness.get_current_state(); let state = harness.get_current_state();
let slot = state.slot() + Slot::new(1); let slot = state.slot() + Slot::new(1);
let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot).await; let ((signed_block, _), mut state) = harness.make_block_return_pre_state(state, slot).await;
let (mut block, signature) = signed_block.deconstruct(); let (mut block, signature) = signed_block.deconstruct();
*block.slot_mut() = slot + Slot::new(1); *block.slot_mut() = slot + Slot::new(1);
@ -120,7 +120,7 @@ async fn invalid_parent_block_root() {
let state = harness.get_current_state(); let state = harness.get_current_state();
let slot = state.slot(); let slot = state.slot();
let (signed_block, mut state) = harness let ((signed_block, _), mut state) = harness
.make_block_return_pre_state(state, slot + Slot::new(1)) .make_block_return_pre_state(state, slot + Slot::new(1))
.await; .await;
let (mut block, signature) = signed_block.deconstruct(); let (mut block, signature) = signed_block.deconstruct();
@ -155,7 +155,7 @@ async fn invalid_block_signature() {
let state = harness.get_current_state(); let state = harness.get_current_state();
let slot = state.slot(); let slot = state.slot();
let (signed_block, mut state) = harness let ((signed_block, _), mut state) = harness
.make_block_return_pre_state(state, slot + Slot::new(1)) .make_block_return_pre_state(state, slot + Slot::new(1))
.await; .await;
let (block, _) = signed_block.deconstruct(); let (block, _) = signed_block.deconstruct();
@ -188,7 +188,7 @@ async fn invalid_randao_reveal_signature() {
let state = harness.get_current_state(); let state = harness.get_current_state();
let slot = state.slot(); let slot = state.slot();
let (signed_block, mut state) = harness let ((signed_block, _), mut state) = harness
.make_block_with_modifier(state, slot + 1, |block| { .make_block_with_modifier(state, slot + 1, |block| {
*block.body_mut().randao_reveal_mut() = Signature::empty(); *block.body_mut().randao_reveal_mut() = Signature::empty();
}) })
@ -960,7 +960,7 @@ async fn fork_spanning_exit() {
spec.bellatrix_fork_epoch = Some(Epoch::new(4)); spec.bellatrix_fork_epoch = Some(Epoch::new(4));
spec.shard_committee_period = 0; spec.shard_committee_period = 0;
let harness = BeaconChainHarness::builder(MainnetEthSpec::default()) let harness = BeaconChainHarness::builder(MainnetEthSpec)
.spec(spec.clone()) .spec(spec.clone())
.deterministic_keypairs(VALIDATOR_COUNT) .deterministic_keypairs(VALIDATOR_COUNT)
.mock_execution_layer() .mock_execution_layer()

View File

@ -1,4 +1,7 @@
use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error}; use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error};
use crate::common::update_progressive_balances_cache::{
initialize_progressive_balances_cache, update_progressive_balances_on_epoch_transition,
};
use crate::per_epoch_processing::{ use crate::per_epoch_processing::{
effective_balance_updates::process_effective_balance_updates, effective_balance_updates::process_effective_balance_updates,
historical_roots_update::process_historical_roots_update, historical_roots_update::process_historical_roots_update,
@ -31,6 +34,7 @@ pub fn process_epoch<T: EthSpec>(
// Pre-compute participating indices and total balances. // Pre-compute participating indices and total balances.
let participation_cache = ParticipationCache::new(state, spec)?; let participation_cache = ParticipationCache::new(state, spec)?;
let sync_committee = state.current_sync_committee()?.clone(); let sync_committee = state.current_sync_committee()?.clone();
initialize_progressive_balances_cache::<T>(state, Some(&participation_cache), spec)?;
// Justification and finalization. // Justification and finalization.
let justification_and_finalization_state = let justification_and_finalization_state =
@ -56,7 +60,7 @@ pub fn process_epoch<T: EthSpec>(
process_eth1_data_reset(state)?; process_eth1_data_reset(state)?;
// Update effective balances with hysteresis (lag). // Update effective balances with hysteresis (lag).
process_effective_balance_updates(state, spec)?; process_effective_balance_updates(state, Some(&participation_cache), spec)?;
// Reset slashings // Reset slashings
process_slashings_reset(state)?; process_slashings_reset(state)?;
@ -75,6 +79,8 @@ pub fn process_epoch<T: EthSpec>(
// Rotate the epoch caches to suit the epoch transition. // Rotate the epoch caches to suit the epoch transition.
state.advance_caches(spec)?; state.advance_caches(spec)?;
update_progressive_balances_on_epoch_transition(state, spec)?;
Ok(EpochProcessingSummary::Altair { Ok(EpochProcessingSummary::Altair {
participation_cache, participation_cache,
sync_committee, sync_committee,

View File

@ -34,7 +34,7 @@ pub fn process_inactivity_updates<T: EthSpec>(
.safe_add_assign(spec.inactivity_score_bias)?; .safe_add_assign(spec.inactivity_score_bias)?;
} }
// Decrease the score of all validators for forgiveness when not during a leak // Decrease the score of all validators for forgiveness when not during a leak
if !state.is_in_inactivity_leak(previous_epoch, spec) { if !state.is_in_inactivity_leak(previous_epoch, spec)? {
let inactivity_score = state.get_inactivity_score_mut(index)?; let inactivity_score = state.get_inactivity_score_mut(index)?;
inactivity_score inactivity_score
.safe_sub_assign(min(spec.inactivity_score_recovery_rate, *inactivity_score))?; .safe_sub_assign(min(spec.inactivity_score_recovery_rate, *inactivity_score))?;

View File

@ -11,49 +11,23 @@
//! Additionally, this cache is returned from the `altair::process_epoch` function and can be used //! Additionally, this cache is returned from the `altair::process_epoch` function and can be used
//! to get useful summaries about the validator participation in an epoch. //! to get useful summaries about the validator participation in an epoch.
use safe_arith::{ArithError, SafeArith};
use types::{ use types::{
consts::altair::{ consts::altair::{
NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX,
TIMELY_TARGET_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX,
}, },
BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, RelativeEpoch, Balance, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags,
RelativeEpoch,
}; };
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq, Clone)]
pub enum Error { pub enum Error {
InvalidFlagIndex(usize), InvalidFlagIndex(usize),
InvalidValidatorIndex(usize), InvalidValidatorIndex(usize),
} }
/// A balance which will never be below the specified `minimum`.
///
/// This is an effort to ensure the `EFFECTIVE_BALANCE_INCREMENT` minimum is always respected.
#[derive(PartialEq, Debug, Clone, Copy)]
struct Balance {
raw: u64,
minimum: u64,
}
impl Balance {
/// Initialize the balance to `0`, or the given `minimum`.
pub fn zero(minimum: u64) -> Self {
Self { raw: 0, minimum }
}
/// Returns the balance with respect to the initialization `minimum`.
pub fn get(&self) -> u64 {
std::cmp::max(self.raw, self.minimum)
}
/// Add-assign to the balance.
pub fn safe_add_assign(&mut self, other: u64) -> Result<(), ArithError> {
self.raw.safe_add_assign(other)
}
}
/// Caches the participation values for one epoch (either the previous or current). /// Caches the participation values for one epoch (either the previous or current).
#[derive(PartialEq, Debug)] #[derive(PartialEq, Debug, Clone)]
struct SingleEpochParticipationCache { struct SingleEpochParticipationCache {
/// Maps an active validator index to their participation flags. /// Maps an active validator index to their participation flags.
/// ///
@ -95,6 +69,14 @@ impl SingleEpochParticipationCache {
.ok_or(Error::InvalidFlagIndex(flag_index)) .ok_or(Error::InvalidFlagIndex(flag_index))
} }
/// Returns the raw total balance of attesters who have `flag_index` set.
fn total_flag_balance_raw(&self, flag_index: usize) -> Result<Balance, Error> {
self.total_flag_balances
.get(flag_index)
.copied()
.ok_or(Error::InvalidFlagIndex(flag_index))
}
/// Returns `true` if `val_index` is active, unslashed and has `flag_index` set. /// Returns `true` if `val_index` is active, unslashed and has `flag_index` set.
/// ///
/// ## Errors /// ## Errors
@ -173,7 +155,7 @@ impl SingleEpochParticipationCache {
} }
/// Maintains a cache to be used during `altair::process_epoch`. /// Maintains a cache to be used during `altair::process_epoch`.
#[derive(PartialEq, Debug)] #[derive(PartialEq, Debug, Clone)]
pub struct ParticipationCache { pub struct ParticipationCache {
current_epoch: Epoch, current_epoch: Epoch,
/// Caches information about active validators pertaining to `self.current_epoch`. /// Caches information about active validators pertaining to `self.current_epoch`.
@ -291,6 +273,11 @@ impl ParticipationCache {
.total_flag_balance(TIMELY_TARGET_FLAG_INDEX) .total_flag_balance(TIMELY_TARGET_FLAG_INDEX)
} }
pub fn current_epoch_target_attesting_balance_raw(&self) -> Result<Balance, Error> {
self.current_epoch_participation
.total_flag_balance_raw(TIMELY_TARGET_FLAG_INDEX)
}
pub fn previous_epoch_total_active_balance(&self) -> u64 { pub fn previous_epoch_total_active_balance(&self) -> u64 {
self.previous_epoch_participation.total_active_balance.get() self.previous_epoch_participation.total_active_balance.get()
} }
@ -300,6 +287,11 @@ impl ParticipationCache {
.total_flag_balance(TIMELY_TARGET_FLAG_INDEX) .total_flag_balance(TIMELY_TARGET_FLAG_INDEX)
} }
pub fn previous_epoch_target_attesting_balance_raw(&self) -> Result<Balance, Error> {
self.previous_epoch_participation
.total_flag_balance_raw(TIMELY_TARGET_FLAG_INDEX)
}
pub fn previous_epoch_source_attesting_balance(&self) -> Result<u64, Error> { pub fn previous_epoch_source_attesting_balance(&self) -> Result<u64, Error> {
self.previous_epoch_participation self.previous_epoch_participation
.total_flag_balance(TIMELY_SOURCE_FLAG_INDEX) .total_flag_balance(TIMELY_SOURCE_FLAG_INDEX)

View File

@ -77,7 +77,7 @@ pub fn get_flag_index_deltas<T: EthSpec>(
let mut delta = Delta::default(); let mut delta = Delta::default();
if unslashed_participating_indices.contains(index)? { if unslashed_participating_indices.contains(index)? {
if !state.is_in_inactivity_leak(previous_epoch, spec) { if !state.is_in_inactivity_leak(previous_epoch, spec)? {
let reward_numerator = base_reward let reward_numerator = base_reward
.safe_mul(weight)? .safe_mul(weight)?
.safe_mul(unslashed_participating_increments)?; .safe_mul(unslashed_participating_increments)?;

View File

@ -52,7 +52,7 @@ pub fn process_epoch<T: EthSpec>(
process_eth1_data_reset(state)?; process_eth1_data_reset(state)?;
// Update effective balances with hysteresis (lag). // Update effective balances with hysteresis (lag).
process_effective_balance_updates(state, spec)?; process_effective_balance_updates(state, None, spec)?;
// Reset slashings // Reset slashings
process_slashings_reset(state)?; process_slashings_reset(state)?;

View File

@ -11,6 +11,9 @@ use crate::per_epoch_processing::{
}; };
use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch};
use crate::common::update_progressive_balances_cache::{
initialize_progressive_balances_cache, update_progressive_balances_on_epoch_transition,
};
pub use historical_summaries_update::process_historical_summaries_update; pub use historical_summaries_update::process_historical_summaries_update;
mod historical_summaries_update; mod historical_summaries_update;
@ -27,6 +30,7 @@ pub fn process_epoch<T: EthSpec>(
// Pre-compute participating indices and total balances. // Pre-compute participating indices and total balances.
let participation_cache = ParticipationCache::new(state, spec)?; let participation_cache = ParticipationCache::new(state, spec)?;
let sync_committee = state.current_sync_committee()?.clone(); let sync_committee = state.current_sync_committee()?.clone();
initialize_progressive_balances_cache(state, Some(&participation_cache), spec)?;
// Justification and finalization. // Justification and finalization.
let justification_and_finalization_state = let justification_and_finalization_state =
@ -52,7 +56,7 @@ pub fn process_epoch<T: EthSpec>(
process_eth1_data_reset(state)?; process_eth1_data_reset(state)?;
// Update effective balances with hysteresis (lag). // Update effective balances with hysteresis (lag).
process_effective_balance_updates(state, spec)?; process_effective_balance_updates(state, Some(&participation_cache), spec)?;
// Reset slashings // Reset slashings
process_slashings_reset(state)?; process_slashings_reset(state)?;
@ -71,6 +75,8 @@ pub fn process_epoch<T: EthSpec>(
// Rotate the epoch caches to suit the epoch transition. // Rotate the epoch caches to suit the epoch transition.
state.advance_caches(spec)?; state.advance_caches(spec)?;
update_progressive_balances_on_epoch_transition(state, spec)?;
Ok(EpochProcessingSummary::Altair { Ok(EpochProcessingSummary::Altair {
participation_cache, participation_cache,
sync_committee, sync_committee,

View File

@ -1,11 +1,13 @@
use super::errors::EpochProcessingError; use super::errors::EpochProcessingError;
use crate::per_epoch_processing::altair::ParticipationCache;
use safe_arith::SafeArith; use safe_arith::SafeArith;
use types::beacon_state::BeaconState; use types::beacon_state::BeaconState;
use types::chain_spec::ChainSpec; use types::chain_spec::ChainSpec;
use types::{BeaconStateError, EthSpec}; use types::{BeaconStateError, EthSpec, ProgressiveBalancesCache};
pub fn process_effective_balance_updates<T: EthSpec>( pub fn process_effective_balance_updates<T: EthSpec>(
state: &mut BeaconState<T>, state: &mut BeaconState<T>,
maybe_participation_cache: Option<&ParticipationCache>,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), EpochProcessingError> { ) -> Result<(), EpochProcessingError> {
let hysteresis_increment = spec let hysteresis_increment = spec
@ -13,7 +15,8 @@ pub fn process_effective_balance_updates<T: EthSpec>(
.safe_div(spec.hysteresis_quotient)?; .safe_div(spec.hysteresis_quotient)?;
let downward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_downward_multiplier)?; let downward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_downward_multiplier)?;
let upward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_upward_multiplier)?; let upward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_upward_multiplier)?;
let (validators, balances) = state.validators_and_balances_mut(); let (validators, balances, progressive_balances_cache) =
state.validators_and_balances_and_progressive_balances_mut();
for (index, validator) in validators.iter_mut().enumerate() { for (index, validator) in validators.iter_mut().enumerate() {
let balance = balances let balance = balances
.get(index) .get(index)
@ -23,11 +26,43 @@ pub fn process_effective_balance_updates<T: EthSpec>(
if balance.safe_add(downward_threshold)? < validator.effective_balance if balance.safe_add(downward_threshold)? < validator.effective_balance
|| validator.effective_balance.safe_add(upward_threshold)? < balance || validator.effective_balance.safe_add(upward_threshold)? < balance
{ {
validator.effective_balance = std::cmp::min( let old_effective_balance = validator.effective_balance;
let new_effective_balance = std::cmp::min(
balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?,
spec.max_effective_balance, spec.max_effective_balance,
); );
if let Some(participation_cache) = maybe_participation_cache {
update_progressive_balances(
participation_cache,
progressive_balances_cache,
index,
old_effective_balance,
new_effective_balance,
)?;
}
validator.effective_balance = new_effective_balance;
} }
} }
Ok(()) Ok(())
} }
fn update_progressive_balances(
participation_cache: &ParticipationCache,
progressive_balances_cache: &mut ProgressiveBalancesCache,
index: usize,
old_effective_balance: u64,
new_effective_balance: u64,
) -> Result<(), EpochProcessingError> {
if old_effective_balance != new_effective_balance {
let is_current_epoch_target_attester =
participation_cache.is_current_epoch_timely_target_attester(index)?;
progressive_balances_cache.on_effective_balance_change(
is_current_epoch_target_attester,
old_effective_balance,
new_effective_balance,
)?;
}
Ok(())
}

View File

@ -16,7 +16,7 @@ pub fn process_slashings<T: EthSpec>(
total_balance, total_balance,
); );
let (validators, balances) = state.validators_and_balances_mut(); let (validators, balances, _) = state.validators_and_balances_and_progressive_balances_mut();
for (index, validator) in validators.iter().enumerate() { for (index, validator) in validators.iter().enumerate() {
if validator.slashed if validator.slashed
&& epoch.safe_add(T::EpochsPerSlashingsVector::to_u64().safe_div(2)?)? && epoch.safe_add(T::EpochsPerSlashingsVector::to_u64().safe_div(2)?)?

View File

@ -1,3 +1,4 @@
use crate::common::update_progressive_balances_cache::initialize_progressive_balances_cache;
use crate::common::{get_attestation_participation_flag_indices, get_attesting_indices}; use crate::common::{get_attestation_participation_flag_indices, get_attesting_indices};
use std::mem; use std::mem;
use std::sync::Arc; use std::sync::Arc;
@ -101,6 +102,7 @@ pub fn upgrade_to_altair<E: EthSpec>(
next_sync_committee: temp_sync_committee, // not read next_sync_committee: temp_sync_committee, // not read
// Caches // Caches
total_active_balance: pre.total_active_balance, total_active_balance: pre.total_active_balance,
progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache),
committee_caches: mem::take(&mut pre.committee_caches), committee_caches: mem::take(&mut pre.committee_caches),
pubkey_cache: mem::take(&mut pre.pubkey_cache), pubkey_cache: mem::take(&mut pre.pubkey_cache),
exit_cache: mem::take(&mut pre.exit_cache), exit_cache: mem::take(&mut pre.exit_cache),
@ -110,6 +112,8 @@ pub fn upgrade_to_altair<E: EthSpec>(
// Fill in previous epoch participation from the pre state's pending attestations. // Fill in previous epoch participation from the pre state's pending attestations.
translate_participation(&mut post, &pre.previous_epoch_attestations, spec)?; translate_participation(&mut post, &pre.previous_epoch_attestations, spec)?;
initialize_progressive_balances_cache(&mut post, None, spec)?;
// Fill in sync committees // Fill in sync committees
// Note: A duplicate committee is assigned for the current and next committee at the fork // Note: A duplicate committee is assigned for the current and next committee at the fork
// boundary // boundary

Some files were not shown because too many files have changed in this diff Show More