Merge branch 'eip4844' into deneb-free-blobs
This commit is contained in:
commit
b6c0e91c05
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@ -73,7 +73,7 @@ jobs:
|
|||||||
- uses: KyleMayes/install-llvm-action@v1
|
- uses: KyleMayes/install-llvm-action@v1
|
||||||
if: startsWith(matrix.arch, 'x86_64-windows')
|
if: startsWith(matrix.arch, 'x86_64-windows')
|
||||||
with:
|
with:
|
||||||
version: "13.0"
|
version: "15.0"
|
||||||
directory: ${{ runner.temp }}/llvm
|
directory: ${{ runner.temp }}/llvm
|
||||||
- name: Set LIBCLANG_PATH
|
- name: Set LIBCLANG_PATH
|
||||||
if: startsWith(matrix.arch, 'x86_64-windows')
|
if: startsWith(matrix.arch, 'x86_64-windows')
|
||||||
|
4
.github/workflows/test-suite.yml
vendored
4
.github/workflows/test-suite.yml
vendored
@ -14,7 +14,7 @@ env:
|
|||||||
# FIXME: temporarily allow warnings on 4844 branch. Revert to original later: RUSTFLAGS: "-D warnings -C debuginfo=0"
|
# FIXME: temporarily allow warnings on 4844 branch. Revert to original later: RUSTFLAGS: "-D warnings -C debuginfo=0"
|
||||||
RUSTFLAGS: "-C debuginfo=0"
|
RUSTFLAGS: "-C debuginfo=0"
|
||||||
# The Nightly version used for cargo-udeps, might need updating from time to time.
|
# The Nightly version used for cargo-udeps, might need updating from time to time.
|
||||||
PINNED_NIGHTLY: nightly-2022-12-15
|
PINNED_NIGHTLY: nightly-2023-04-16
|
||||||
# Prevent Github API rate limiting.
|
# Prevent Github API rate limiting.
|
||||||
LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
jobs:
|
jobs:
|
||||||
@ -84,7 +84,7 @@ jobs:
|
|||||||
run: choco install -y make
|
run: choco install -y make
|
||||||
- uses: KyleMayes/install-llvm-action@v1
|
- uses: KyleMayes/install-llvm-action@v1
|
||||||
with:
|
with:
|
||||||
version: "13.0"
|
version: "15.0"
|
||||||
directory: ${{ runner.temp }}/llvm
|
directory: ${{ runner.temp }}/llvm
|
||||||
- name: Set LIBCLANG_PATH
|
- name: Set LIBCLANG_PATH
|
||||||
run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
|
run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
|
||||||
|
13
Cargo.lock
generated
13
Cargo.lock
generated
@ -643,7 +643,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "beacon_node"
|
name = "beacon_node"
|
||||||
version = "4.0.1"
|
version = "4.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"beacon_chain",
|
"beacon_chain",
|
||||||
"clap",
|
"clap",
|
||||||
@ -844,7 +844,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "boot_node"
|
name = "boot_node"
|
||||||
version = "4.0.1"
|
version = "4.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"beacon_node",
|
"beacon_node",
|
||||||
"clap",
|
"clap",
|
||||||
@ -889,6 +889,7 @@ name = "builder_client"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"eth2",
|
"eth2",
|
||||||
|
"lighthouse_version",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
"sensitive_url",
|
"sensitive_url",
|
||||||
"serde",
|
"serde",
|
||||||
@ -3108,9 +3109,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "h2"
|
name = "h2"
|
||||||
version = "0.3.16"
|
version = "0.3.18"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d"
|
checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"fnv",
|
"fnv",
|
||||||
@ -3898,7 +3899,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lcli"
|
name = "lcli"
|
||||||
version = "4.0.1"
|
version = "4.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"account_utils",
|
"account_utils",
|
||||||
"beacon_chain",
|
"beacon_chain",
|
||||||
@ -4551,7 +4552,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lighthouse"
|
name = "lighthouse"
|
||||||
version = "4.0.1"
|
version = "4.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"account_manager",
|
"account_manager",
|
||||||
"account_utils",
|
"account_utils",
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM rust:1.66.0-bullseye AS builder
|
FROM rust:1.68.2-bullseye AS builder
|
||||||
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake clang libclang-dev protobuf-compiler
|
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake clang libclang-dev protobuf-compiler
|
||||||
COPY . lighthouse
|
COPY . lighthouse
|
||||||
ARG FEATURES
|
ARG FEATURES
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "beacon_node"
|
name = "beacon_node"
|
||||||
version = "4.0.1"
|
version = "4.1.0"
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
|
@ -118,7 +118,6 @@ use types::beacon_block_body::KzgCommitments;
|
|||||||
use types::beacon_state::CloneConfig;
|
use types::beacon_state::CloneConfig;
|
||||||
use types::blob_sidecar::{BlobIdentifier, BlobSidecarList, Blobs};
|
use types::blob_sidecar::{BlobIdentifier, BlobSidecarList, Blobs};
|
||||||
use types::consts::deneb::MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS;
|
use types::consts::deneb::MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS;
|
||||||
use types::consts::merge::INTERVALS_PER_SLOT;
|
|
||||||
use types::*;
|
use types::*;
|
||||||
|
|
||||||
pub type ForkChoiceError = fork_choice::Error<crate::ForkChoiceStoreError>;
|
pub type ForkChoiceError = fork_choice::Error<crate::ForkChoiceStoreError>;
|
||||||
@ -140,12 +139,6 @@ pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1)
|
|||||||
/// The timeout for the eth1 finalization cache
|
/// The timeout for the eth1 finalization cache
|
||||||
pub const ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_millis(200);
|
pub const ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_millis(200);
|
||||||
|
|
||||||
/// The latest delay from the start of the slot at which to attempt a 1-slot re-org.
|
|
||||||
fn max_re_org_slot_delay(seconds_per_slot: u64) -> Duration {
|
|
||||||
// Allow at least half of the attestation deadline for the block to propagate.
|
|
||||||
Duration::from_secs(seconds_per_slot) / INTERVALS_PER_SLOT as u32 / 2
|
|
||||||
}
|
|
||||||
|
|
||||||
// These keys are all zero because they get stored in different columns, see `DBColumn` type.
|
// These keys are all zero because they get stored in different columns, see `DBColumn` type.
|
||||||
pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero();
|
pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero();
|
||||||
pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero();
|
pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero();
|
||||||
@ -401,7 +394,7 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
|||||||
/// in recent epochs.
|
/// in recent epochs.
|
||||||
pub(crate) observed_sync_aggregators: RwLock<ObservedSyncAggregators<T::EthSpec>>,
|
pub(crate) observed_sync_aggregators: RwLock<ObservedSyncAggregators<T::EthSpec>>,
|
||||||
/// Maintains a record of which validators have proposed blocks for each slot.
|
/// Maintains a record of which validators have proposed blocks for each slot.
|
||||||
pub(crate) observed_block_producers: RwLock<ObservedBlockProducers<T::EthSpec>>,
|
pub observed_block_producers: RwLock<ObservedBlockProducers<T::EthSpec>>,
|
||||||
/// Maintains a record of blob sidecars seen over the gossip network.
|
/// Maintains a record of blob sidecars seen over the gossip network.
|
||||||
pub(crate) observed_blob_sidecars: RwLock<ObservedBlobSidecars<T::EthSpec>>,
|
pub(crate) observed_blob_sidecars: RwLock<ObservedBlobSidecars<T::EthSpec>>,
|
||||||
/// Maintains a record of which validators have submitted voluntary exits.
|
/// Maintains a record of which validators have submitted voluntary exits.
|
||||||
@ -1106,7 +1099,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.execution_layer
|
.execution_layer
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.ok_or(Error::ExecutionLayerMissing)?
|
.ok_or(Error::ExecutionLayerMissing)?
|
||||||
.get_payload_by_block_hash(exec_block_hash, fork)
|
.get_payload_for_header(&execution_payload_header, fork)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, Box::new(e))
|
Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, Box::new(e))
|
||||||
@ -2298,12 +2291,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
&self,
|
&self,
|
||||||
exit: SignedVoluntaryExit,
|
exit: SignedVoluntaryExit,
|
||||||
) -> Result<ObservationOutcome<SignedVoluntaryExit, T::EthSpec>, Error> {
|
) -> Result<ObservationOutcome<SignedVoluntaryExit, T::EthSpec>, Error> {
|
||||||
// NOTE: this could be more efficient if it avoided cloning the head state
|
let head_snapshot = self.head().snapshot;
|
||||||
let wall_clock_state = self.wall_clock_state()?;
|
let head_state = &head_snapshot.beacon_state;
|
||||||
|
let wall_clock_epoch = self.epoch()?;
|
||||||
|
|
||||||
Ok(self
|
Ok(self
|
||||||
.observed_voluntary_exits
|
.observed_voluntary_exits
|
||||||
.lock()
|
.lock()
|
||||||
.verify_and_observe(exit, &wall_clock_state, &self.spec)
|
.verify_and_observe_at(exit, wall_clock_epoch, head_state, &self.spec)
|
||||||
.map(|exit| {
|
.map(|exit| {
|
||||||
// this method is called for both API and gossip exits, so this covers all exit events
|
// this method is called for both API and gossip exits, so this covers all exit events
|
||||||
if let Some(event_handler) = self.event_handler.as_ref() {
|
if let Some(event_handler) = self.event_handler.as_ref() {
|
||||||
@ -3802,7 +3797,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let (state, state_root_opt) = self
|
let (state, state_root_opt) = self
|
||||||
.task_executor
|
.task_executor
|
||||||
.spawn_blocking_handle(
|
.spawn_blocking_handle(
|
||||||
move || chain.load_state_for_block_production::<Payload>(slot),
|
move || chain.load_state_for_block_production(slot),
|
||||||
"produce_partial_beacon_block",
|
"produce_partial_beacon_block",
|
||||||
)
|
)
|
||||||
.ok_or(BlockProductionError::ShuttingDown)?
|
.ok_or(BlockProductionError::ShuttingDown)?
|
||||||
@ -3825,7 +3820,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
|
|
||||||
/// Load a beacon state from the database for block production. This is a long-running process
|
/// Load a beacon state from the database for block production. This is a long-running process
|
||||||
/// that should not be performed in an `async` context.
|
/// that should not be performed in an `async` context.
|
||||||
fn load_state_for_block_production<Payload: ExecPayload<T::EthSpec>>(
|
fn load_state_for_block_production(
|
||||||
self: &Arc<Self>,
|
self: &Arc<Self>,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
) -> Result<(BeaconState<T::EthSpec>, Option<Hash256>), BlockProductionError> {
|
) -> Result<(BeaconState<T::EthSpec>, Option<Hash256>), BlockProductionError> {
|
||||||
@ -3939,7 +3934,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
// 1. It seems we have time to propagate and still receive the proposer boost.
|
// 1. It seems we have time to propagate and still receive the proposer boost.
|
||||||
// 2. The current head block was seen late.
|
// 2. The current head block was seen late.
|
||||||
// 3. The `get_proposer_head` conditions from fork choice pass.
|
// 3. The `get_proposer_head` conditions from fork choice pass.
|
||||||
let proposing_on_time = slot_delay < max_re_org_slot_delay(self.spec.seconds_per_slot);
|
let proposing_on_time = slot_delay < self.config.re_org_cutoff(self.spec.seconds_per_slot);
|
||||||
if !proposing_on_time {
|
if !proposing_on_time {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -3969,6 +3964,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
slot,
|
slot,
|
||||||
canonical_head,
|
canonical_head,
|
||||||
re_org_threshold,
|
re_org_threshold,
|
||||||
|
&self.config.re_org_disallowed_offsets,
|
||||||
self.config.re_org_max_epochs_since_finalization,
|
self.config.re_org_max_epochs_since_finalization,
|
||||||
)
|
)
|
||||||
.map_err(|e| match e {
|
.map_err(|e| match e {
|
||||||
@ -4247,6 +4243,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.get_preliminary_proposer_head(
|
.get_preliminary_proposer_head(
|
||||||
head_block_root,
|
head_block_root,
|
||||||
re_org_threshold,
|
re_org_threshold,
|
||||||
|
&self.config.re_org_disallowed_offsets,
|
||||||
self.config.re_org_max_epochs_since_finalization,
|
self.config.re_org_max_epochs_since_finalization,
|
||||||
)
|
)
|
||||||
.map_err(|e| e.map_inner_error(Error::ProposerHeadForkChoiceError))?;
|
.map_err(|e| e.map_inner_error(Error::ProposerHeadForkChoiceError))?;
|
||||||
@ -4257,7 +4254,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let re_org_block_slot = head_slot + 1;
|
let re_org_block_slot = head_slot + 1;
|
||||||
let fork_choice_slot = info.current_slot;
|
let fork_choice_slot = info.current_slot;
|
||||||
|
|
||||||
// If a re-orging proposal isn't made by the `max_re_org_slot_delay` then we give up
|
// If a re-orging proposal isn't made by the `re_org_cutoff` then we give up
|
||||||
// and allow the fork choice update for the canonical head through so that we may attest
|
// and allow the fork choice update for the canonical head through so that we may attest
|
||||||
// correctly.
|
// correctly.
|
||||||
let current_slot_ok = if head_slot == fork_choice_slot {
|
let current_slot_ok = if head_slot == fork_choice_slot {
|
||||||
@ -4268,7 +4265,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.and_then(|slot_start| {
|
.and_then(|slot_start| {
|
||||||
let now = self.slot_clock.now_duration()?;
|
let now = self.slot_clock.now_duration()?;
|
||||||
let slot_delay = now.saturating_sub(slot_start);
|
let slot_delay = now.saturating_sub(slot_start);
|
||||||
Some(slot_delay <= max_re_org_slot_delay(self.spec.seconds_per_slot))
|
Some(slot_delay <= self.config.re_org_cutoff(self.spec.seconds_per_slot))
|
||||||
})
|
})
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
} else {
|
} else {
|
||||||
|
@ -25,7 +25,7 @@ use futures::channel::mpsc::Sender;
|
|||||||
use kzg::{Kzg, TrustedSetup};
|
use kzg::{Kzg, TrustedSetup};
|
||||||
use operation_pool::{OperationPool, PersistedOperationPool};
|
use operation_pool::{OperationPool, PersistedOperationPool};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use proto_array::ReOrgThreshold;
|
use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold};
|
||||||
use slasher::Slasher;
|
use slasher::Slasher;
|
||||||
use slog::{crit, error, info, Logger};
|
use slog::{crit, error, info, Logger};
|
||||||
use slot_clock::{SlotClock, TestingSlotClock};
|
use slot_clock::{SlotClock, TestingSlotClock};
|
||||||
@ -180,6 +180,15 @@ where
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Sets the proposer re-org disallowed offsets list.
|
||||||
|
pub fn proposer_re_org_disallowed_offsets(
|
||||||
|
mut self,
|
||||||
|
disallowed_offsets: DisallowedReOrgOffsets,
|
||||||
|
) -> Self {
|
||||||
|
self.chain_config.re_org_disallowed_offsets = disallowed_offsets;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
/// Sets the store (database).
|
/// Sets the store (database).
|
||||||
///
|
///
|
||||||
/// Should generally be called early in the build chain.
|
/// Should generally be called early in the build chain.
|
||||||
|
@ -1,10 +1,12 @@
|
|||||||
pub use proto_array::ReOrgThreshold;
|
pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold};
|
||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use types::{Checkpoint, Epoch};
|
use types::{Checkpoint, Epoch};
|
||||||
|
|
||||||
pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20);
|
pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20);
|
||||||
pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2);
|
pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2);
|
||||||
|
/// Default to 1/12th of the slot, which is 1 second on mainnet.
|
||||||
|
pub const DEFAULT_RE_ORG_CUTOFF_DENOMINATOR: u32 = 12;
|
||||||
pub const DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT: u64 = 250;
|
pub const DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT: u64 = 250;
|
||||||
|
|
||||||
/// Default fraction of a slot lookahead for payload preparation (12/3 = 4 seconds on mainnet).
|
/// Default fraction of a slot lookahead for payload preparation (12/3 = 4 seconds on mainnet).
|
||||||
@ -34,6 +36,13 @@ pub struct ChainConfig {
|
|||||||
pub re_org_threshold: Option<ReOrgThreshold>,
|
pub re_org_threshold: Option<ReOrgThreshold>,
|
||||||
/// Maximum number of epochs since finalization for attempting a proposer re-org.
|
/// Maximum number of epochs since finalization for attempting a proposer re-org.
|
||||||
pub re_org_max_epochs_since_finalization: Epoch,
|
pub re_org_max_epochs_since_finalization: Epoch,
|
||||||
|
/// Maximum delay after the start of the slot at which to propose a reorging block.
|
||||||
|
pub re_org_cutoff_millis: Option<u64>,
|
||||||
|
/// Additional epoch offsets at which re-orging block proposals are not permitted.
|
||||||
|
///
|
||||||
|
/// By default this list is empty, but it can be useful for reacting to network conditions, e.g.
|
||||||
|
/// slow gossip of re-org blocks at slot 1 in the epoch.
|
||||||
|
pub re_org_disallowed_offsets: DisallowedReOrgOffsets,
|
||||||
/// Number of milliseconds to wait for fork choice before proposing a block.
|
/// Number of milliseconds to wait for fork choice before proposing a block.
|
||||||
///
|
///
|
||||||
/// If set to 0 then block proposal will not wait for fork choice at all.
|
/// If set to 0 then block proposal will not wait for fork choice at all.
|
||||||
@ -82,6 +91,8 @@ impl Default for ChainConfig {
|
|||||||
max_network_size: 10 * 1_048_576, // 10M
|
max_network_size: 10 * 1_048_576, // 10M
|
||||||
re_org_threshold: Some(DEFAULT_RE_ORG_THRESHOLD),
|
re_org_threshold: Some(DEFAULT_RE_ORG_THRESHOLD),
|
||||||
re_org_max_epochs_since_finalization: DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION,
|
re_org_max_epochs_since_finalization: DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION,
|
||||||
|
re_org_cutoff_millis: None,
|
||||||
|
re_org_disallowed_offsets: DisallowedReOrgOffsets::default(),
|
||||||
fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT,
|
fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT,
|
||||||
// Builder fallback configs that are set in `clap` will override these.
|
// Builder fallback configs that are set in `clap` will override these.
|
||||||
builder_fallback_skips: 3,
|
builder_fallback_skips: 3,
|
||||||
@ -100,3 +111,14 @@ impl Default for ChainConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl ChainConfig {
|
||||||
|
/// The latest delay from the start of the slot at which to attempt a 1-slot re-org.
|
||||||
|
pub fn re_org_cutoff(&self, seconds_per_slot: u64) -> Duration {
|
||||||
|
self.re_org_cutoff_millis
|
||||||
|
.map(Duration::from_millis)
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
Duration::from_secs(seconds_per_slot) / DEFAULT_RE_ORG_CUTOFF_DENOMINATOR
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -88,7 +88,7 @@ fn get_sync_status<T: EthSpec>(
|
|||||||
let period = T::SlotsPerEth1VotingPeriod::to_u64();
|
let period = T::SlotsPerEth1VotingPeriod::to_u64();
|
||||||
let voting_period_start_slot = (current_slot / period) * period;
|
let voting_period_start_slot = (current_slot / period) * period;
|
||||||
|
|
||||||
let period_start = slot_start_seconds::<T>(
|
let period_start = slot_start_seconds(
|
||||||
genesis_time,
|
genesis_time,
|
||||||
spec.seconds_per_slot,
|
spec.seconds_per_slot,
|
||||||
voting_period_start_slot,
|
voting_period_start_slot,
|
||||||
@ -470,7 +470,7 @@ impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
|
|||||||
fn eth1_data(&self, state: &BeaconState<T>, spec: &ChainSpec) -> Result<Eth1Data, Error> {
|
fn eth1_data(&self, state: &BeaconState<T>, spec: &ChainSpec) -> Result<Eth1Data, Error> {
|
||||||
let period = T::SlotsPerEth1VotingPeriod::to_u64();
|
let period = T::SlotsPerEth1VotingPeriod::to_u64();
|
||||||
let voting_period_start_slot = (state.slot() / period) * period;
|
let voting_period_start_slot = (state.slot() / period) * period;
|
||||||
let voting_period_start_seconds = slot_start_seconds::<T>(
|
let voting_period_start_seconds = slot_start_seconds(
|
||||||
state.genesis_time(),
|
state.genesis_time(),
|
||||||
spec.seconds_per_slot,
|
spec.seconds_per_slot,
|
||||||
voting_period_start_slot,
|
voting_period_start_slot,
|
||||||
@ -658,11 +658,7 @@ fn find_winning_vote(valid_votes: Eth1DataVoteCount) -> Option<Eth1Data> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the unix-epoch seconds at the start of the given `slot`.
|
/// Returns the unix-epoch seconds at the start of the given `slot`.
|
||||||
fn slot_start_seconds<T: EthSpec>(
|
fn slot_start_seconds(genesis_unix_seconds: u64, seconds_per_slot: u64, slot: Slot) -> u64 {
|
||||||
genesis_unix_seconds: u64,
|
|
||||||
seconds_per_slot: u64,
|
|
||||||
slot: Slot,
|
|
||||||
) -> u64 {
|
|
||||||
genesis_unix_seconds + slot.as_u64() * seconds_per_slot
|
genesis_unix_seconds + slot.as_u64() * seconds_per_slot
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -698,7 +694,7 @@ mod test {
|
|||||||
fn get_voting_period_start_seconds(state: &BeaconState<E>, spec: &ChainSpec) -> u64 {
|
fn get_voting_period_start_seconds(state: &BeaconState<E>, spec: &ChainSpec) -> u64 {
|
||||||
let period = <E as EthSpec>::SlotsPerEth1VotingPeriod::to_u64();
|
let period = <E as EthSpec>::SlotsPerEth1VotingPeriod::to_u64();
|
||||||
let voting_period_start_slot = (state.slot() / period) * period;
|
let voting_period_start_slot = (state.slot() / period) * period;
|
||||||
slot_start_seconds::<E>(
|
slot_start_seconds(
|
||||||
state.genesis_time(),
|
state.genesis_time(),
|
||||||
spec.seconds_per_slot,
|
spec.seconds_per_slot,
|
||||||
voting_period_start_slot,
|
voting_period_start_slot,
|
||||||
@ -708,23 +704,23 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn slot_start_time() {
|
fn slot_start_time() {
|
||||||
let zero_sec = 0;
|
let zero_sec = 0;
|
||||||
assert_eq!(slot_start_seconds::<E>(100, zero_sec, Slot::new(2)), 100);
|
assert_eq!(slot_start_seconds(100, zero_sec, Slot::new(2)), 100);
|
||||||
|
|
||||||
let one_sec = 1;
|
let one_sec = 1;
|
||||||
assert_eq!(slot_start_seconds::<E>(100, one_sec, Slot::new(0)), 100);
|
assert_eq!(slot_start_seconds(100, one_sec, Slot::new(0)), 100);
|
||||||
assert_eq!(slot_start_seconds::<E>(100, one_sec, Slot::new(1)), 101);
|
assert_eq!(slot_start_seconds(100, one_sec, Slot::new(1)), 101);
|
||||||
assert_eq!(slot_start_seconds::<E>(100, one_sec, Slot::new(2)), 102);
|
assert_eq!(slot_start_seconds(100, one_sec, Slot::new(2)), 102);
|
||||||
|
|
||||||
let three_sec = 3;
|
let three_sec = 3;
|
||||||
assert_eq!(slot_start_seconds::<E>(100, three_sec, Slot::new(0)), 100);
|
assert_eq!(slot_start_seconds(100, three_sec, Slot::new(0)), 100);
|
||||||
assert_eq!(slot_start_seconds::<E>(100, three_sec, Slot::new(1)), 103);
|
assert_eq!(slot_start_seconds(100, three_sec, Slot::new(1)), 103);
|
||||||
assert_eq!(slot_start_seconds::<E>(100, three_sec, Slot::new(2)), 106);
|
assert_eq!(slot_start_seconds(100, three_sec, Slot::new(2)), 106);
|
||||||
|
|
||||||
let five_sec = 5;
|
let five_sec = 5;
|
||||||
assert_eq!(slot_start_seconds::<E>(100, five_sec, Slot::new(0)), 100);
|
assert_eq!(slot_start_seconds(100, five_sec, Slot::new(0)), 100);
|
||||||
assert_eq!(slot_start_seconds::<E>(100, five_sec, Slot::new(1)), 105);
|
assert_eq!(slot_start_seconds(100, five_sec, Slot::new(1)), 105);
|
||||||
assert_eq!(slot_start_seconds::<E>(100, five_sec, Slot::new(2)), 110);
|
assert_eq!(slot_start_seconds(100, five_sec, Slot::new(2)), 110);
|
||||||
assert_eq!(slot_start_seconds::<E>(100, five_sec, Slot::new(3)), 115);
|
assert_eq!(slot_start_seconds(100, five_sec, Slot::new(3)), 115);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_eth1_block(timestamp: u64, number: u64) -> Eth1Block {
|
fn get_eth1_block(timestamp: u64, number: u64) -> Eth1Block {
|
||||||
|
@ -37,7 +37,7 @@ mod naive_aggregation_pool;
|
|||||||
mod observed_aggregates;
|
mod observed_aggregates;
|
||||||
mod observed_attesters;
|
mod observed_attesters;
|
||||||
mod observed_blob_sidecars;
|
mod observed_blob_sidecars;
|
||||||
mod observed_block_producers;
|
pub mod observed_block_producers;
|
||||||
pub mod observed_operations;
|
pub mod observed_operations;
|
||||||
pub mod otb_verification_service;
|
pub mod otb_verification_service;
|
||||||
mod persisted_beacon_chain;
|
mod persisted_beacon_chain;
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
use derivative::Derivative;
|
use derivative::Derivative;
|
||||||
use smallvec::{smallvec, SmallVec};
|
use smallvec::{smallvec, SmallVec};
|
||||||
use ssz::{Decode, Encode};
|
use ssz::{Decode, Encode};
|
||||||
use state_processing::{SigVerifiedOp, VerifyOperation};
|
use state_processing::{SigVerifiedOp, VerifyOperation, VerifyOperationAt};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use types::{
|
use types::{
|
||||||
AttesterSlashing, BeaconState, ChainSpec, EthSpec, ForkName, ProposerSlashing,
|
AttesterSlashing, BeaconState, ChainSpec, Epoch, EthSpec, ForkName, ProposerSlashing,
|
||||||
SignedBlsToExecutionChange, SignedVoluntaryExit, Slot,
|
SignedBlsToExecutionChange, SignedVoluntaryExit, Slot,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -87,12 +87,16 @@ impl<E: EthSpec> ObservableOperation<E> for SignedBlsToExecutionChange {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<T: ObservableOperation<E>, E: EthSpec> ObservedOperations<T, E> {
|
impl<T: ObservableOperation<E>, E: EthSpec> ObservedOperations<T, E> {
|
||||||
pub fn verify_and_observe(
|
pub fn verify_and_observe_parametric<F>(
|
||||||
&mut self,
|
&mut self,
|
||||||
op: T,
|
op: T,
|
||||||
|
validate: F,
|
||||||
head_state: &BeaconState<E>,
|
head_state: &BeaconState<E>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<ObservationOutcome<T, E>, T::Error> {
|
) -> Result<ObservationOutcome<T, E>, T::Error>
|
||||||
|
where
|
||||||
|
F: Fn(T) -> Result<SigVerifiedOp<T, E>, T::Error>,
|
||||||
|
{
|
||||||
self.reset_at_fork_boundary(head_state.slot(), spec);
|
self.reset_at_fork_boundary(head_state.slot(), spec);
|
||||||
|
|
||||||
let observed_validator_indices = &mut self.observed_validator_indices;
|
let observed_validator_indices = &mut self.observed_validator_indices;
|
||||||
@ -112,7 +116,7 @@ impl<T: ObservableOperation<E>, E: EthSpec> ObservedOperations<T, E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Validate the op using operation-specific logic (`verify_attester_slashing`, etc).
|
// Validate the op using operation-specific logic (`verify_attester_slashing`, etc).
|
||||||
let verified_op = op.validate(head_state, spec)?;
|
let verified_op = validate(op)?;
|
||||||
|
|
||||||
// Add the relevant indices to the set of known indices to prevent processing of duplicates
|
// Add the relevant indices to the set of known indices to prevent processing of duplicates
|
||||||
// in the future.
|
// in the future.
|
||||||
@ -121,6 +125,16 @@ impl<T: ObservableOperation<E>, E: EthSpec> ObservedOperations<T, E> {
|
|||||||
Ok(ObservationOutcome::New(verified_op))
|
Ok(ObservationOutcome::New(verified_op))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn verify_and_observe(
|
||||||
|
&mut self,
|
||||||
|
op: T,
|
||||||
|
head_state: &BeaconState<E>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<ObservationOutcome<T, E>, T::Error> {
|
||||||
|
let validate = |op: T| op.validate(head_state, spec);
|
||||||
|
self.verify_and_observe_parametric(op, validate, head_state, spec)
|
||||||
|
}
|
||||||
|
|
||||||
/// Reset the cache when crossing a fork boundary.
|
/// Reset the cache when crossing a fork boundary.
|
||||||
///
|
///
|
||||||
/// This prevents an attacker from crafting a self-slashing which is only valid before the fork
|
/// This prevents an attacker from crafting a self-slashing which is only valid before the fork
|
||||||
@ -140,3 +154,16 @@ impl<T: ObservableOperation<E>, E: EthSpec> ObservedOperations<T, E> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: ObservableOperation<E> + VerifyOperationAt<E>, E: EthSpec> ObservedOperations<T, E> {
|
||||||
|
pub fn verify_and_observe_at(
|
||||||
|
&mut self,
|
||||||
|
op: T,
|
||||||
|
verify_at_epoch: Epoch,
|
||||||
|
head_state: &BeaconState<E>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<ObservationOutcome<T, E>, T::Error> {
|
||||||
|
let validate = |op: T| op.validate_at(head_state, verify_at_epoch, spec);
|
||||||
|
self.verify_and_observe_parametric(op, validate, head_state, spec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -10,3 +10,4 @@ sensitive_url = { path = "../../common/sensitive_url" }
|
|||||||
eth2 = { path = "../../common/eth2" }
|
eth2 = { path = "../../common/eth2" }
|
||||||
serde = { version = "1.0.116", features = ["derive"] }
|
serde = { version = "1.0.116", features = ["derive"] }
|
||||||
serde_json = "1.0.58"
|
serde_json = "1.0.58"
|
||||||
|
lighthouse_version = { path = "../../common/lighthouse_version" }
|
||||||
|
@ -17,6 +17,9 @@ pub const DEFAULT_TIMEOUT_MILLIS: u64 = 15000;
|
|||||||
/// This timeout is in accordance with v0.2.0 of the [builder specs](https://github.com/flashbots/mev-boost/pull/20).
|
/// This timeout is in accordance with v0.2.0 of the [builder specs](https://github.com/flashbots/mev-boost/pull/20).
|
||||||
pub const DEFAULT_GET_HEADER_TIMEOUT_MILLIS: u64 = 1000;
|
pub const DEFAULT_GET_HEADER_TIMEOUT_MILLIS: u64 = 1000;
|
||||||
|
|
||||||
|
/// Default user agent for HTTP requests.
|
||||||
|
pub const DEFAULT_USER_AGENT: &str = lighthouse_version::VERSION;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Timeouts {
|
pub struct Timeouts {
|
||||||
get_header: Duration,
|
get_header: Duration,
|
||||||
@ -41,23 +44,23 @@ pub struct BuilderHttpClient {
|
|||||||
client: reqwest::Client,
|
client: reqwest::Client,
|
||||||
server: SensitiveUrl,
|
server: SensitiveUrl,
|
||||||
timeouts: Timeouts,
|
timeouts: Timeouts,
|
||||||
|
user_agent: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BuilderHttpClient {
|
impl BuilderHttpClient {
|
||||||
pub fn new(server: SensitiveUrl) -> Result<Self, Error> {
|
pub fn new(server: SensitiveUrl, user_agent: Option<String>) -> Result<Self, Error> {
|
||||||
|
let user_agent = user_agent.unwrap_or(DEFAULT_USER_AGENT.to_string());
|
||||||
|
let client = reqwest::Client::builder().user_agent(&user_agent).build()?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
client: reqwest::Client::new(),
|
client,
|
||||||
server,
|
server,
|
||||||
timeouts: Timeouts::default(),
|
timeouts: Timeouts::default(),
|
||||||
|
user_agent,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_with_timeouts(server: SensitiveUrl, timeouts: Timeouts) -> Result<Self, Error> {
|
pub fn get_user_agent(&self) -> &str {
|
||||||
Ok(Self {
|
&self.user_agent
|
||||||
client: reqwest::Client::new(),
|
|
||||||
server,
|
|
||||||
timeouts,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_with_timeout<T: DeserializeOwned, U: IntoUrl>(
|
async fn get_with_timeout<T: DeserializeOwned, U: IntoUrl>(
|
||||||
|
@ -1289,7 +1289,7 @@ mod test {
|
|||||||
transactions,
|
transactions,
|
||||||
..<_>::default()
|
..<_>::default()
|
||||||
});
|
});
|
||||||
let json = serde_json::to_value(&ep)?;
|
let json = serde_json::to_value(ep)?;
|
||||||
Ok(json.get("transactions").unwrap().clone())
|
Ok(json.get("transactions").unwrap().clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,6 +114,8 @@ pub enum Error {
|
|||||||
transactions_root: Hash256,
|
transactions_root: Hash256,
|
||||||
},
|
},
|
||||||
InvalidJWTSecret(String),
|
InvalidJWTSecret(String),
|
||||||
|
InvalidForkForPayload,
|
||||||
|
InvalidPayloadBody(String),
|
||||||
BeaconStateError(BeaconStateError),
|
BeaconStateError(BeaconStateError),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -278,6 +280,8 @@ pub struct Config {
|
|||||||
pub execution_endpoints: Vec<SensitiveUrl>,
|
pub execution_endpoints: Vec<SensitiveUrl>,
|
||||||
/// Endpoint urls for services providing the builder api.
|
/// Endpoint urls for services providing the builder api.
|
||||||
pub builder_url: Option<SensitiveUrl>,
|
pub builder_url: Option<SensitiveUrl>,
|
||||||
|
/// User agent to send with requests to the builder API.
|
||||||
|
pub builder_user_agent: Option<String>,
|
||||||
/// JWT secrets for the above endpoints running the engine api.
|
/// JWT secrets for the above endpoints running the engine api.
|
||||||
pub secret_files: Vec<PathBuf>,
|
pub secret_files: Vec<PathBuf>,
|
||||||
/// The default fee recipient to use on the beacon node if none if provided from
|
/// The default fee recipient to use on the beacon node if none if provided from
|
||||||
@ -308,6 +312,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
let Config {
|
let Config {
|
||||||
execution_endpoints: urls,
|
execution_endpoints: urls,
|
||||||
builder_url,
|
builder_url,
|
||||||
|
builder_user_agent,
|
||||||
secret_files,
|
secret_files,
|
||||||
suggested_fee_recipient,
|
suggested_fee_recipient,
|
||||||
jwt_id,
|
jwt_id,
|
||||||
@ -368,12 +373,17 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
|
|
||||||
let builder = builder_url
|
let builder = builder_url
|
||||||
.map(|url| {
|
.map(|url| {
|
||||||
let builder_client = BuilderHttpClient::new(url.clone()).map_err(Error::Builder);
|
let builder_client = BuilderHttpClient::new(url.clone(), builder_user_agent)
|
||||||
info!(log,
|
.map_err(Error::Builder)?;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
log,
|
||||||
"Connected to external block builder";
|
"Connected to external block builder";
|
||||||
"builder_url" => ?url,
|
"builder_url" => ?url,
|
||||||
"builder_profit_threshold" => builder_profit_threshold);
|
"builder_profit_threshold" => builder_profit_threshold,
|
||||||
builder_client
|
"local_user_agent" => builder_client.get_user_agent(),
|
||||||
|
);
|
||||||
|
Ok::<_, Error>(builder_client)
|
||||||
})
|
})
|
||||||
.transpose()?;
|
.transpose()?;
|
||||||
|
|
||||||
@ -1676,14 +1686,60 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
.map_err(Error::EngineError)
|
.map_err(Error::EngineError)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_payload_by_block_hash(
|
/// Fetch a full payload from the execution node.
|
||||||
|
///
|
||||||
|
/// This will fail if the payload is not from the finalized portion of the chain.
|
||||||
|
pub async fn get_payload_for_header(
|
||||||
|
&self,
|
||||||
|
header: &ExecutionPayloadHeader<T>,
|
||||||
|
fork: ForkName,
|
||||||
|
) -> Result<Option<ExecutionPayload<T>>, Error> {
|
||||||
|
let hash = header.block_hash();
|
||||||
|
let block_number = header.block_number();
|
||||||
|
|
||||||
|
// Handle default payload body.
|
||||||
|
if header.block_hash() == ExecutionBlockHash::zero() {
|
||||||
|
let payload = match fork {
|
||||||
|
ForkName::Merge => ExecutionPayloadMerge::default().into(),
|
||||||
|
ForkName::Capella => ExecutionPayloadCapella::default().into(),
|
||||||
|
ForkName::Deneb => ExecutionPayloadDeneb::default().into(),
|
||||||
|
ForkName::Base | ForkName::Altair => {
|
||||||
|
return Err(Error::InvalidForkForPayload);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
return Ok(Some(payload));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use efficient payload bodies by range method if supported.
|
||||||
|
let capabilities = self.get_engine_capabilities(None).await?;
|
||||||
|
if capabilities.get_payload_bodies_by_range_v1 {
|
||||||
|
let mut payload_bodies = self.get_payload_bodies_by_range(block_number, 1).await?;
|
||||||
|
|
||||||
|
if payload_bodies.len() != 1 {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let opt_payload_body = payload_bodies.pop().flatten();
|
||||||
|
opt_payload_body
|
||||||
|
.map(|body| {
|
||||||
|
body.to_payload(header.clone())
|
||||||
|
.map_err(Error::InvalidPayloadBody)
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
} else {
|
||||||
|
// Fall back to eth_blockByHash.
|
||||||
|
self.get_payload_by_hash_legacy(hash, fork).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_payload_by_hash_legacy(
|
||||||
&self,
|
&self,
|
||||||
hash: ExecutionBlockHash,
|
hash: ExecutionBlockHash,
|
||||||
fork: ForkName,
|
fork: ForkName,
|
||||||
) -> Result<Option<ExecutionPayload<T>>, Error> {
|
) -> Result<Option<ExecutionPayload<T>>, Error> {
|
||||||
self.engine()
|
self.engine()
|
||||||
.request(|engine| async move {
|
.request(|engine| async move {
|
||||||
self.get_payload_by_block_hash_from_engine(engine, hash, fork)
|
self.get_payload_by_hash_from_engine(engine, hash, fork)
|
||||||
.await
|
.await
|
||||||
})
|
})
|
||||||
.await
|
.await
|
||||||
@ -1691,7 +1747,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
.map_err(Error::EngineError)
|
.map_err(Error::EngineError)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_payload_by_block_hash_from_engine(
|
async fn get_payload_by_hash_from_engine(
|
||||||
&self,
|
&self,
|
||||||
engine: &Engine,
|
engine: &Engine,
|
||||||
hash: ExecutionBlockHash,
|
hash: ExecutionBlockHash,
|
||||||
@ -1705,7 +1761,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())),
|
ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())),
|
||||||
ForkName::Deneb => Ok(Some(ExecutionPayloadDeneb::default().into())),
|
ForkName::Deneb => Ok(Some(ExecutionPayloadDeneb::default().into())),
|
||||||
ForkName::Base | ForkName::Altair => Err(ApiError::UnsupportedForkVariant(
|
ForkName::Base | ForkName::Altair => Err(ApiError::UnsupportedForkVariant(
|
||||||
format!("called get_payload_by_block_hash_from_engine with {}", fork),
|
format!("called get_payload_by_hash_from_engine with {}", fork),
|
||||||
)),
|
)),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -152,6 +152,7 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>(
|
|||||||
None,
|
None,
|
||||||
meta_data,
|
meta_data,
|
||||||
vec![],
|
vec![],
|
||||||
|
false,
|
||||||
&log,
|
&log,
|
||||||
));
|
));
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
//! Generic tests that make use of the (newer) `InteractiveApiTester`
|
//! Generic tests that make use of the (newer) `InteractiveApiTester`
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
chain_config::ReOrgThreshold,
|
chain_config::{DisallowedReOrgOffsets, ReOrgThreshold},
|
||||||
test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy},
|
test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy},
|
||||||
};
|
};
|
||||||
use eth2::types::DepositContractData;
|
use eth2::types::DepositContractData;
|
||||||
@ -110,6 +110,8 @@ pub struct ReOrgTest {
|
|||||||
misprediction: bool,
|
misprediction: bool,
|
||||||
/// Whether to expect withdrawals to change on epoch boundaries.
|
/// Whether to expect withdrawals to change on epoch boundaries.
|
||||||
expect_withdrawals_change_on_epoch: bool,
|
expect_withdrawals_change_on_epoch: bool,
|
||||||
|
/// Epoch offsets to avoid proposing reorg blocks at.
|
||||||
|
disallowed_offsets: Vec<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ReOrgTest {
|
impl Default for ReOrgTest {
|
||||||
@ -127,6 +129,7 @@ impl Default for ReOrgTest {
|
|||||||
should_re_org: true,
|
should_re_org: true,
|
||||||
misprediction: false,
|
misprediction: false,
|
||||||
expect_withdrawals_change_on_epoch: false,
|
expect_withdrawals_change_on_epoch: false,
|
||||||
|
disallowed_offsets: vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -238,6 +241,32 @@ pub async fn proposer_boost_re_org_head_distance() {
|
|||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check that a re-org at a disallowed offset fails.
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
|
pub async fn proposer_boost_re_org_disallowed_offset() {
|
||||||
|
let offset = 4;
|
||||||
|
proposer_boost_re_org_test(ReOrgTest {
|
||||||
|
head_slot: Slot::new(E::slots_per_epoch() + offset - 1),
|
||||||
|
disallowed_offsets: vec![offset],
|
||||||
|
should_re_org: false,
|
||||||
|
..Default::default()
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that a re-org at the *only* allowed offset succeeds.
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
|
pub async fn proposer_boost_re_org_disallowed_offset_exact() {
|
||||||
|
let offset = 4;
|
||||||
|
let disallowed_offsets = (0..E::slots_per_epoch()).filter(|o| *o != offset).collect();
|
||||||
|
proposer_boost_re_org_test(ReOrgTest {
|
||||||
|
head_slot: Slot::new(E::slots_per_epoch() + offset - 1),
|
||||||
|
disallowed_offsets,
|
||||||
|
..Default::default()
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
pub async fn proposer_boost_re_org_very_unhealthy() {
|
pub async fn proposer_boost_re_org_very_unhealthy() {
|
||||||
proposer_boost_re_org_test(ReOrgTest {
|
proposer_boost_re_org_test(ReOrgTest {
|
||||||
@ -286,6 +315,7 @@ pub async fn proposer_boost_re_org_test(
|
|||||||
should_re_org,
|
should_re_org,
|
||||||
misprediction,
|
misprediction,
|
||||||
expect_withdrawals_change_on_epoch,
|
expect_withdrawals_change_on_epoch,
|
||||||
|
disallowed_offsets,
|
||||||
}: ReOrgTest,
|
}: ReOrgTest,
|
||||||
) {
|
) {
|
||||||
assert!(head_slot > 0);
|
assert!(head_slot > 0);
|
||||||
@ -320,6 +350,9 @@ pub async fn proposer_boost_re_org_test(
|
|||||||
.proposer_re_org_max_epochs_since_finalization(Epoch::new(
|
.proposer_re_org_max_epochs_since_finalization(Epoch::new(
|
||||||
max_epochs_since_finalization,
|
max_epochs_since_finalization,
|
||||||
))
|
))
|
||||||
|
.proposer_re_org_disallowed_offsets(
|
||||||
|
DisallowedReOrgOffsets::new::<E>(disallowed_offsets).unwrap(),
|
||||||
|
)
|
||||||
})),
|
})),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
@ -1883,21 +1883,6 @@ impl ApiTester {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(result_ssz, expected, "{:?}", state_id);
|
assert_eq!(result_ssz, expected, "{:?}", state_id);
|
||||||
|
|
||||||
// Check legacy v1 API.
|
|
||||||
let result_v1 = self
|
|
||||||
.client
|
|
||||||
.get_debug_beacon_states_v1(state_id.0)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
if let (Some(json), Some(expected)) = (&result_v1, &expected) {
|
|
||||||
assert_eq!(json.version, None);
|
|
||||||
assert_eq!(json.data, *expected, "{:?}", state_id);
|
|
||||||
} else {
|
|
||||||
assert_eq!(result_v1, None);
|
|
||||||
assert_eq!(expected, None);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that version headers are provided.
|
// Check that version headers are provided.
|
||||||
let url = self
|
let url = self
|
||||||
.client
|
.client
|
||||||
|
@ -101,6 +101,9 @@ pub struct Config {
|
|||||||
/// List of trusted libp2p nodes which are not scored.
|
/// List of trusted libp2p nodes which are not scored.
|
||||||
pub trusted_peers: Vec<PeerIdSerialized>,
|
pub trusted_peers: Vec<PeerIdSerialized>,
|
||||||
|
|
||||||
|
/// Disables peer scoring altogether.
|
||||||
|
pub disable_peer_scoring: bool,
|
||||||
|
|
||||||
/// Client version
|
/// Client version
|
||||||
pub client_version: String,
|
pub client_version: String,
|
||||||
|
|
||||||
@ -309,6 +312,7 @@ impl Default for Config {
|
|||||||
boot_nodes_multiaddr: vec![],
|
boot_nodes_multiaddr: vec![],
|
||||||
libp2p_nodes: vec![],
|
libp2p_nodes: vec![],
|
||||||
trusted_peers: vec![],
|
trusted_peers: vec![],
|
||||||
|
disable_peer_scoring: false,
|
||||||
client_version: lighthouse_version::version_with_platform(),
|
client_version: lighthouse_version::version_with_platform(),
|
||||||
disable_discovery: false,
|
disable_discovery: false,
|
||||||
upnp_enabled: true,
|
upnp_enabled: true,
|
||||||
|
@ -1162,6 +1162,7 @@ mod tests {
|
|||||||
syncnets: Default::default(),
|
syncnets: Default::default(),
|
||||||
}),
|
}),
|
||||||
vec![],
|
vec![],
|
||||||
|
false,
|
||||||
&log,
|
&log,
|
||||||
);
|
);
|
||||||
Discovery::new(&keypair, &config, Arc::new(globals), &log)
|
Discovery::new(&keypair, &config, Arc::new(globals), &log)
|
||||||
|
@ -41,12 +41,14 @@ pub struct PeerDB<TSpec: EthSpec> {
|
|||||||
disconnected_peers: usize,
|
disconnected_peers: usize,
|
||||||
/// Counts banned peers in total and per ip
|
/// Counts banned peers in total and per ip
|
||||||
banned_peers_count: BannedPeersCount,
|
banned_peers_count: BannedPeersCount,
|
||||||
|
/// Specifies if peer scoring is disabled.
|
||||||
|
disable_peer_scoring: bool,
|
||||||
/// PeerDB's logger
|
/// PeerDB's logger
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSpec: EthSpec> PeerDB<TSpec> {
|
impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||||
pub fn new(trusted_peers: Vec<PeerId>, log: &slog::Logger) -> Self {
|
pub fn new(trusted_peers: Vec<PeerId>, disable_peer_scoring: bool, log: &slog::Logger) -> Self {
|
||||||
// Initialize the peers hashmap with trusted peers
|
// Initialize the peers hashmap with trusted peers
|
||||||
let peers = trusted_peers
|
let peers = trusted_peers
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@ -56,6 +58,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
|||||||
log: log.clone(),
|
log: log.clone(),
|
||||||
disconnected_peers: 0,
|
disconnected_peers: 0,
|
||||||
banned_peers_count: BannedPeersCount::default(),
|
banned_peers_count: BannedPeersCount::default(),
|
||||||
|
disable_peer_scoring,
|
||||||
peers,
|
peers,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -704,7 +707,11 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
|||||||
warn!(log_ref, "Updating state of unknown peer";
|
warn!(log_ref, "Updating state of unknown peer";
|
||||||
"peer_id" => %peer_id, "new_state" => ?new_state);
|
"peer_id" => %peer_id, "new_state" => ?new_state);
|
||||||
}
|
}
|
||||||
PeerInfo::default()
|
if self.disable_peer_scoring {
|
||||||
|
PeerInfo::trusted_peer_info()
|
||||||
|
} else {
|
||||||
|
PeerInfo::default()
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// Ban the peer if the score is not already low enough.
|
// Ban the peer if the score is not already low enough.
|
||||||
@ -1300,7 +1307,7 @@ mod tests {
|
|||||||
|
|
||||||
fn get_db() -> PeerDB<M> {
|
fn get_db() -> PeerDB<M> {
|
||||||
let log = build_log(slog::Level::Debug, false);
|
let log = build_log(slog::Level::Debug, false);
|
||||||
PeerDB::new(vec![], &log)
|
PeerDB::new(vec![], false, &log)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -1999,7 +2006,7 @@ mod tests {
|
|||||||
fn test_trusted_peers_score() {
|
fn test_trusted_peers_score() {
|
||||||
let trusted_peer = PeerId::random();
|
let trusted_peer = PeerId::random();
|
||||||
let log = build_log(slog::Level::Debug, false);
|
let log = build_log(slog::Level::Debug, false);
|
||||||
let mut pdb: PeerDB<M> = PeerDB::new(vec![trusted_peer], &log);
|
let mut pdb: PeerDB<M> = PeerDB::new(vec![trusted_peer], false, &log);
|
||||||
|
|
||||||
pdb.connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None);
|
pdb.connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None);
|
||||||
|
|
||||||
@ -2018,4 +2025,28 @@ mod tests {
|
|||||||
Score::max_score().score()
|
Score::max_score().score()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_disable_peer_scoring() {
|
||||||
|
let peer = PeerId::random();
|
||||||
|
let log = build_log(slog::Level::Debug, false);
|
||||||
|
let mut pdb: PeerDB<M> = PeerDB::new(vec![], true, &log);
|
||||||
|
|
||||||
|
pdb.connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap(), None);
|
||||||
|
|
||||||
|
// Check trusted status and score
|
||||||
|
assert!(pdb.peer_info(&peer).unwrap().is_trusted());
|
||||||
|
assert_eq!(
|
||||||
|
pdb.peer_info(&peer).unwrap().score().score(),
|
||||||
|
Score::max_score().score()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Adding/Subtracting score should have no effect on a trusted peer
|
||||||
|
add_score(&mut pdb, &peer, -50.0);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
pdb.peer_info(&peer).unwrap().score().score(),
|
||||||
|
Score::max_score().score()
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -173,6 +173,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|x| PeerId::from(x.clone()))
|
.map(|x| PeerId::from(x.clone()))
|
||||||
.collect(),
|
.collect(),
|
||||||
|
config.disable_peer_scoring,
|
||||||
&log,
|
&log,
|
||||||
);
|
);
|
||||||
Arc::new(globals)
|
Arc::new(globals)
|
||||||
|
@ -39,6 +39,7 @@ impl<TSpec: EthSpec> NetworkGlobals<TSpec> {
|
|||||||
listen_port_tcp6: Option<u16>,
|
listen_port_tcp6: Option<u16>,
|
||||||
local_metadata: MetaData<TSpec>,
|
local_metadata: MetaData<TSpec>,
|
||||||
trusted_peers: Vec<PeerId>,
|
trusted_peers: Vec<PeerId>,
|
||||||
|
disable_peer_scoring: bool,
|
||||||
log: &slog::Logger,
|
log: &slog::Logger,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
NetworkGlobals {
|
NetworkGlobals {
|
||||||
@ -48,7 +49,7 @@ impl<TSpec: EthSpec> NetworkGlobals<TSpec> {
|
|||||||
listen_port_tcp4,
|
listen_port_tcp4,
|
||||||
listen_port_tcp6,
|
listen_port_tcp6,
|
||||||
local_metadata: RwLock::new(local_metadata),
|
local_metadata: RwLock::new(local_metadata),
|
||||||
peers: RwLock::new(PeerDB::new(trusted_peers, log)),
|
peers: RwLock::new(PeerDB::new(trusted_peers, disable_peer_scoring, log)),
|
||||||
gossipsub_subscriptions: RwLock::new(HashSet::new()),
|
gossipsub_subscriptions: RwLock::new(HashSet::new()),
|
||||||
sync_state: RwLock::new(SyncState::Stalled),
|
sync_state: RwLock::new(SyncState::Stalled),
|
||||||
backfill_state: RwLock::new(BackFillState::NotRequired),
|
backfill_state: RwLock::new(BackFillState::NotRequired),
|
||||||
@ -144,6 +145,7 @@ impl<TSpec: EthSpec> NetworkGlobals<TSpec> {
|
|||||||
syncnets: Default::default(),
|
syncnets: Default::default(),
|
||||||
}),
|
}),
|
||||||
vec![],
|
vec![],
|
||||||
|
false,
|
||||||
log,
|
log,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -185,6 +185,7 @@ impl TestRig {
|
|||||||
None,
|
None,
|
||||||
meta_data,
|
meta_data,
|
||||||
vec![],
|
vec![],
|
||||||
|
false,
|
||||||
&log,
|
&log,
|
||||||
));
|
));
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12);
|
|||||||
pub const QUEUED_LIGHT_CLIENT_UPDATE_DELAY: Duration = Duration::from_secs(12);
|
pub const QUEUED_LIGHT_CLIENT_UPDATE_DELAY: Duration = Duration::from_secs(12);
|
||||||
|
|
||||||
/// For how long to queue rpc blocks before sending them back for reprocessing.
|
/// For how long to queue rpc blocks before sending them back for reprocessing.
|
||||||
pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(3);
|
pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(4);
|
||||||
|
|
||||||
/// Set an arbitrary upper-bound on the number of queued blocks to avoid DoS attacks. The fact that
|
/// Set an arbitrary upper-bound on the number of queued blocks to avoid DoS attacks. The fact that
|
||||||
/// we signature-verify blocks before putting them in the queue *should* protect against this, but
|
/// we signature-verify blocks before putting them in the queue *should* protect against this, but
|
||||||
@ -521,7 +521,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Queue the block for 1/4th of a slot
|
// Queue the block for 1/3rd of a slot
|
||||||
self.rpc_block_delay_queue
|
self.rpc_block_delay_queue
|
||||||
.insert(rpc_block, QUEUED_RPC_BLOCK_DELAY);
|
.insert(rpc_block, QUEUED_RPC_BLOCK_DELAY);
|
||||||
}
|
}
|
||||||
|
@ -9,14 +9,17 @@ use crate::sync::manager::{BlockProcessType, SyncMessage};
|
|||||||
use crate::sync::{BatchProcessResult, ChainId};
|
use crate::sync::{BatchProcessResult, ChainId};
|
||||||
use beacon_chain::blob_verification::AsBlock;
|
use beacon_chain::blob_verification::AsBlock;
|
||||||
use beacon_chain::blob_verification::BlockWrapper;
|
use beacon_chain::blob_verification::BlockWrapper;
|
||||||
use beacon_chain::{AvailabilityProcessingStatus, CountUnrealized};
|
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
|
observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms,
|
||||||
BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError,
|
BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError,
|
||||||
NotifyExecutionLayer,
|
NotifyExecutionLayer,
|
||||||
};
|
};
|
||||||
|
use beacon_chain::{AvailabilityProcessingStatus, CountUnrealized};
|
||||||
use lighthouse_network::PeerAction;
|
use lighthouse_network::PeerAction;
|
||||||
use slog::{debug, error, info, warn};
|
use slog::{debug, error, info, warn};
|
||||||
|
use slot_clock::SlotClock;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use types::{Epoch, Hash256, SignedBeaconBlock};
|
use types::{Epoch, Hash256, SignedBeaconBlock};
|
||||||
|
|
||||||
@ -85,6 +88,66 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Returns `true` if the time now is after the 4s attestation deadline.
|
||||||
|
let block_is_late = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
// If we can't read the system time clock then indicate that the
|
||||||
|
// block is late (and therefore should *not* be requeued). This
|
||||||
|
// avoids infinite loops.
|
||||||
|
.map_or(true, |now| {
|
||||||
|
get_block_delay_ms(now, block.message(), &self.chain.slot_clock)
|
||||||
|
> self.chain.slot_clock.unagg_attestation_production_delay()
|
||||||
|
});
|
||||||
|
|
||||||
|
// Checks if a block from this proposer is already known.
|
||||||
|
let proposal_already_known = || {
|
||||||
|
match self
|
||||||
|
.chain
|
||||||
|
.observed_block_producers
|
||||||
|
.read()
|
||||||
|
.proposer_has_been_observed(block.message())
|
||||||
|
{
|
||||||
|
Ok(is_observed) => is_observed,
|
||||||
|
// Both of these blocks will be rejected, so reject them now rather
|
||||||
|
// than re-queuing them.
|
||||||
|
Err(ObserveError::FinalizedBlock { .. })
|
||||||
|
| Err(ObserveError::ValidatorIndexTooHigh { .. }) => false,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// If we've already seen a block from this proposer *and* the block
|
||||||
|
// arrived before the attestation deadline, requeue it to ensure it is
|
||||||
|
// imported late enough that it won't receive a proposer boost.
|
||||||
|
if !block_is_late && proposal_already_known() {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Delaying processing of duplicate RPC block";
|
||||||
|
"block_root" => ?block_root,
|
||||||
|
"proposer" => block.message().proposer_index(),
|
||||||
|
"slot" => block.slot()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Send message to work reprocess queue to retry the block
|
||||||
|
let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock {
|
||||||
|
block_root,
|
||||||
|
block: block.clone(),
|
||||||
|
process_type,
|
||||||
|
seen_timestamp,
|
||||||
|
should_process: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
if reprocess_tx.try_send(reprocess_msg).is_err() {
|
||||||
|
error!(
|
||||||
|
self.log,
|
||||||
|
"Failed to inform block import";
|
||||||
|
"source" => "rpc",
|
||||||
|
"block_root" => %block_root
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
let slot = block.slot();
|
let slot = block.slot();
|
||||||
let parent_root = block.message().parent_root();
|
let parent_root = block.message().parent_root();
|
||||||
|
|
||||||
|
@ -497,7 +497,8 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
|exit| {
|
|exit| {
|
||||||
filter(exit.as_inner())
|
filter(exit.as_inner())
|
||||||
&& exit.signature_is_still_valid(&state.fork())
|
&& exit.signature_is_still_valid(&state.fork())
|
||||||
&& verify_exit(state, exit.as_inner(), VerifySignatures::False, spec).is_ok()
|
&& verify_exit(state, None, exit.as_inner(), VerifySignatures::False, spec)
|
||||||
|
.is_ok()
|
||||||
},
|
},
|
||||||
|exit| exit.as_inner().clone(),
|
|exit| exit.as_inner().clone(),
|
||||||
T::MaxVoluntaryExits::to_usize(),
|
T::MaxVoluntaryExits::to_usize(),
|
||||||
|
@ -240,6 +240,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.help("Disables the discv5 discovery protocol. The node will not search for new peers or participate in the discovery protocol.")
|
.help("Disables the discv5 discovery protocol. The node will not search for new peers or participate in the discovery protocol.")
|
||||||
.takes_value(false),
|
.takes_value(false),
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("disable-peer-scoring")
|
||||||
|
.long("disable-peer-scoring")
|
||||||
|
.help("Disables peer scoring in lighthouse. WARNING: This is a dev only flag is only meant to be used in local testing scenarios \
|
||||||
|
Using this flag on a real network may cause your node to become eclipsed and see a different view of the network")
|
||||||
|
.takes_value(false)
|
||||||
|
.hidden(true),
|
||||||
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("trusted-peers")
|
Arg::with_name("trusted-peers")
|
||||||
.long("trusted-peers")
|
.long("trusted-peers")
|
||||||
@ -919,6 +927,28 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
allowed. Default: 2")
|
allowed. Default: 2")
|
||||||
.conflicts_with("disable-proposer-reorgs")
|
.conflicts_with("disable-proposer-reorgs")
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("proposer-reorg-cutoff")
|
||||||
|
.long("proposer-reorg-cutoff")
|
||||||
|
.value_name("MILLISECONDS")
|
||||||
|
.help("Maximum delay after the start of the slot at which to propose a reorging \
|
||||||
|
block. Lower values can prevent failed reorgs by ensuring the block has \
|
||||||
|
ample time to propagate and be processed by the network. The default is \
|
||||||
|
1/12th of a slot (1 second on mainnet)")
|
||||||
|
.conflicts_with("disable-proposer-reorgs")
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("proposer-reorg-disallowed-offsets")
|
||||||
|
.long("proposer-reorg-disallowed-offsets")
|
||||||
|
.value_name("N1,N2,...")
|
||||||
|
.help("Comma-separated list of integer offsets which can be used to avoid \
|
||||||
|
proposing reorging blocks at certain slots. An offset of N means that \
|
||||||
|
reorging proposals will not be attempted at any slot such that \
|
||||||
|
`slot % SLOTS_PER_EPOCH == N`. By default only re-orgs at offset 0 will be \
|
||||||
|
avoided. Any offsets supplied with this flag will impose additional \
|
||||||
|
restrictions.")
|
||||||
|
.conflicts_with("disable-proposer-reorgs")
|
||||||
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("prepare-payload-lookahead")
|
Arg::with_name("prepare-payload-lookahead")
|
||||||
.long("prepare-payload-lookahead")
|
.long("prepare-payload-lookahead")
|
||||||
@ -1012,6 +1042,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.default_value("0")
|
.default_value("0")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("builder-user-agent")
|
||||||
|
.long("builder-user-agent")
|
||||||
|
.value_name("STRING")
|
||||||
|
.help("The HTTP user agent to send alongside requests to the builder URL. The \
|
||||||
|
default is Lighthouse's version string.")
|
||||||
|
.requires("builder")
|
||||||
|
.takes_value(true)
|
||||||
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("count-unrealized")
|
Arg::with_name("count-unrealized")
|
||||||
.long("count-unrealized")
|
.long("count-unrealized")
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use beacon_chain::chain_config::{
|
use beacon_chain::chain_config::{
|
||||||
ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR,
|
DisallowedReOrgOffsets, ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR,
|
||||||
DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD,
|
DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD,
|
||||||
};
|
};
|
||||||
use beacon_chain::TrustedSetup;
|
use beacon_chain::TrustedSetup;
|
||||||
@ -330,6 +330,9 @@ pub fn get_config<E: EthSpec>(
|
|||||||
let payload_builder =
|
let payload_builder =
|
||||||
parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?;
|
parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?;
|
||||||
el_config.builder_url = Some(payload_builder);
|
el_config.builder_url = Some(payload_builder);
|
||||||
|
|
||||||
|
el_config.builder_user_agent =
|
||||||
|
clap_utils::parse_optional(cli_args, "builder-user-agent")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set config values from parse values.
|
// Set config values from parse values.
|
||||||
@ -723,6 +726,23 @@ pub fn get_config<E: EthSpec>(
|
|||||||
client_config.chain.re_org_max_epochs_since_finalization =
|
client_config.chain.re_org_max_epochs_since_finalization =
|
||||||
clap_utils::parse_optional(cli_args, "proposer-reorg-epochs-since-finalization")?
|
clap_utils::parse_optional(cli_args, "proposer-reorg-epochs-since-finalization")?
|
||||||
.unwrap_or(DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION);
|
.unwrap_or(DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION);
|
||||||
|
client_config.chain.re_org_cutoff_millis =
|
||||||
|
clap_utils::parse_optional(cli_args, "proposer-reorg-cutoff")?;
|
||||||
|
|
||||||
|
if let Some(disallowed_offsets_str) =
|
||||||
|
clap_utils::parse_optional::<String>(cli_args, "proposer-reorg-disallowed-offsets")?
|
||||||
|
{
|
||||||
|
let disallowed_offsets = disallowed_offsets_str
|
||||||
|
.split(',')
|
||||||
|
.map(|s| {
|
||||||
|
s.parse()
|
||||||
|
.map_err(|e| format!("invalid disallowed-offsets: {e:?}"))
|
||||||
|
})
|
||||||
|
.collect::<Result<Vec<u64>, _>>()?;
|
||||||
|
client_config.chain.re_org_disallowed_offsets =
|
||||||
|
DisallowedReOrgOffsets::new::<E>(disallowed_offsets)
|
||||||
|
.map_err(|e| format!("invalid disallowed-offsets: {e:?}"))?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note: This overrides any previous flags that enable this option.
|
// Note: This overrides any previous flags that enable this option.
|
||||||
@ -1045,6 +1065,10 @@ pub fn set_network_config(
|
|||||||
.collect::<Result<Vec<Multiaddr>, _>>()?;
|
.collect::<Result<Vec<Multiaddr>, _>>()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cli_args.is_present("disable-peer-scoring") {
|
||||||
|
config.disable_peer_scoring = true;
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(trusted_peers_str) = cli_args.value_of("trusted-peers") {
|
if let Some(trusted_peers_str) = cli_args.value_of("trusted-peers") {
|
||||||
config.trusted_peers = trusted_peers_str
|
config.trusted_peers = trusted_peers_str
|
||||||
.split(',')
|
.split(',')
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
//! Implementation of historic state reconstruction (given complete block history).
|
//! Implementation of historic state reconstruction (given complete block history).
|
||||||
use crate::hot_cold_store::{HotColdDB, HotColdDBError};
|
use crate::hot_cold_store::{HotColdDB, HotColdDBError};
|
||||||
use crate::{Error, ItemStore, KeyValueStore};
|
use crate::{Error, ItemStore};
|
||||||
use itertools::{process_results, Itertools};
|
use itertools::{process_results, Itertools};
|
||||||
use slog::info;
|
use slog::info;
|
||||||
use state_processing::{
|
use state_processing::{
|
||||||
@ -13,8 +13,8 @@ use types::{EthSpec, Hash256};
|
|||||||
impl<E, Hot, Cold> HotColdDB<E, Hot, Cold>
|
impl<E, Hot, Cold> HotColdDB<E, Hot, Cold>
|
||||||
where
|
where
|
||||||
E: EthSpec,
|
E: EthSpec,
|
||||||
Hot: KeyValueStore<E> + ItemStore<E>,
|
Hot: ItemStore<E>,
|
||||||
Cold: KeyValueStore<E> + ItemStore<E>,
|
Cold: ItemStore<E>,
|
||||||
{
|
{
|
||||||
pub fn reconstruct_historic_states(self: &Arc<Self>) -> Result<(), Error> {
|
pub fn reconstruct_historic_states(self: &Arc<Self>) -> Result<(), Error> {
|
||||||
let mut anchor = if let Some(anchor) = self.get_anchor_info() {
|
let mut anchor = if let Some(anchor) = self.get_anchor_info() {
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
* [Introduction](./intro.md)
|
* [Introduction](./intro.md)
|
||||||
* [Installation](./installation.md)
|
* [Installation](./installation.md)
|
||||||
* [System Requirements](./system-requirements.md)
|
|
||||||
* [Pre-Built Binaries](./installation-binaries.md)
|
* [Pre-Built Binaries](./installation-binaries.md)
|
||||||
* [Docker](./docker.md)
|
* [Docker](./docker.md)
|
||||||
* [Build from Source](./installation-source.md)
|
* [Build from Source](./installation-source.md)
|
||||||
|
@ -16,21 +16,18 @@ way to run Lighthouse without building the image yourself.
|
|||||||
Obtain the latest image with:
|
Obtain the latest image with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker pull sigp/lighthouse
|
docker pull sigp/lighthouse
|
||||||
```
|
```
|
||||||
|
|
||||||
Download and test the image with:
|
Download and test the image with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker run sigp/lighthouse lighthouse --version
|
docker run sigp/lighthouse lighthouse --version
|
||||||
```
|
```
|
||||||
|
|
||||||
If you can see the latest [Lighthouse release](https://github.com/sigp/lighthouse/releases) version
|
If you can see the latest [Lighthouse release](https://github.com/sigp/lighthouse/releases) version
|
||||||
(see example below), then you've successfully installed Lighthouse via Docker.
|
(see example below), then you've successfully installed Lighthouse via Docker.
|
||||||
|
|
||||||
> Pro tip: try the `latest-modern` image for a 20-30% speed-up! See [Available Docker
|
|
||||||
> Images](#available-docker-images) below.
|
|
||||||
|
|
||||||
### Example Version Output
|
### Example Version Output
|
||||||
|
|
||||||
```
|
```
|
||||||
@ -38,6 +35,9 @@ Lighthouse vx.x.xx-xxxxxxxxx
|
|||||||
BLS Library: xxxx-xxxxxxx
|
BLS Library: xxxx-xxxxxxx
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> Pro tip: try the `latest-modern` image for a 20-30% speed-up! See [Available Docker
|
||||||
|
> Images](#available-docker-images) below.
|
||||||
|
|
||||||
### Available Docker Images
|
### Available Docker Images
|
||||||
|
|
||||||
There are several images available on Docker Hub.
|
There are several images available on Docker Hub.
|
||||||
@ -47,11 +47,10 @@ Lighthouse with optimizations enabled. If you are running on older hardware then
|
|||||||
`latest` image bundles a _portable_ version of Lighthouse which is slower but with better hardware
|
`latest` image bundles a _portable_ version of Lighthouse which is slower but with better hardware
|
||||||
compatibility (see [Portability](./installation-binaries.md#portability)).
|
compatibility (see [Portability](./installation-binaries.md#portability)).
|
||||||
|
|
||||||
To install a specific tag (in this case `latest-modern`) add the tag name to your `docker` commands
|
To install a specific tag (in this case `latest-modern`), add the tag name to your `docker` commands:
|
||||||
like so:
|
|
||||||
|
|
||||||
```
|
```
|
||||||
$ docker pull sigp/lighthouse:latest-modern
|
docker pull sigp/lighthouse:latest-modern
|
||||||
```
|
```
|
||||||
|
|
||||||
Image tags follow this format:
|
Image tags follow this format:
|
||||||
@ -65,17 +64,17 @@ The `version` is:
|
|||||||
* `vX.Y.Z` for a tagged Lighthouse release, e.g. `v2.1.1`
|
* `vX.Y.Z` for a tagged Lighthouse release, e.g. `v2.1.1`
|
||||||
* `latest` for the `stable` branch (latest release) or `unstable` branch
|
* `latest` for the `stable` branch (latest release) or `unstable` branch
|
||||||
|
|
||||||
The `stability` is:
|
|
||||||
|
|
||||||
* `-unstable` for the `unstable` branch
|
|
||||||
* empty for a tagged release or the `stable` branch
|
|
||||||
|
|
||||||
The `arch` is:
|
The `arch` is:
|
||||||
|
|
||||||
* `-amd64` for x86_64, e.g. Intel, AMD
|
* `-amd64` for x86_64, e.g. Intel, AMD
|
||||||
* `-arm64` for aarch64, e.g. Raspberry Pi 4
|
* `-arm64` for aarch64, e.g. Raspberry Pi 4
|
||||||
* empty for a multi-arch image (works on either `amd64` or `arm64` platforms)
|
* empty for a multi-arch image (works on either `amd64` or `arm64` platforms)
|
||||||
|
|
||||||
|
The `stability` is:
|
||||||
|
|
||||||
|
* `-unstable` for the `unstable` branch
|
||||||
|
* empty for a tagged release or the `stable` branch
|
||||||
|
|
||||||
The `modernity` is:
|
The `modernity` is:
|
||||||
|
|
||||||
* `-modern` for optimized builds
|
* `-modern` for optimized builds
|
||||||
@ -99,13 +98,13 @@ To build the image from source, navigate to
|
|||||||
the root of the repository and run:
|
the root of the repository and run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker build . -t lighthouse:local
|
docker build . -t lighthouse:local
|
||||||
```
|
```
|
||||||
|
|
||||||
The build will likely take several minutes. Once it's built, test it with:
|
The build will likely take several minutes. Once it's built, test it with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker run lighthouse:local lighthouse --help
|
docker run lighthouse:local lighthouse --help
|
||||||
```
|
```
|
||||||
|
|
||||||
## Using the Docker image
|
## Using the Docker image
|
||||||
@ -113,12 +112,12 @@ $ docker run lighthouse:local lighthouse --help
|
|||||||
You can run a Docker beacon node with the following command:
|
You can run a Docker beacon node with the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse --network mainnet beacon --http --http-address 0.0.0.0
|
docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse --network mainnet beacon --http --http-address 0.0.0.0
|
||||||
```
|
```
|
||||||
|
|
||||||
> To join the Prater testnet, use `--network prater` instead.
|
> To join the Goerli testnet, use `--network goerli` instead.
|
||||||
|
|
||||||
> The `-p` and `-v` and values are described below.
|
> The `-v` (Volumes) and `-p` (Ports) and values are described below.
|
||||||
|
|
||||||
### Volumes
|
### Volumes
|
||||||
|
|
||||||
@ -131,7 +130,7 @@ The following example runs a beacon node with the data directory
|
|||||||
mapped to the users home directory:
|
mapped to the users home directory:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker run -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse beacon
|
docker run -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse beacon
|
||||||
```
|
```
|
||||||
|
|
||||||
### Ports
|
### Ports
|
||||||
@ -140,14 +139,14 @@ In order to be a good peer and serve other peers you should expose port `9000` f
|
|||||||
Use the `-p` flag to do this:
|
Use the `-p` flag to do this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker run -p 9000:9000/tcp -p 9000:9000/udp sigp/lighthouse lighthouse beacon
|
docker run -p 9000:9000/tcp -p 9000:9000/udp sigp/lighthouse lighthouse beacon
|
||||||
```
|
```
|
||||||
|
|
||||||
If you use the `--http` flag you may also want to expose the HTTP port with `-p
|
If you use the `--http` flag you may also want to expose the HTTP port with `-p
|
||||||
127.0.0.1:5052:5052`.
|
127.0.0.1:5052:5052`.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0
|
docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0
|
||||||
```
|
```
|
||||||
|
|
||||||
[docker_hub]: https://hub.docker.com/repository/docker/sigp/lighthouse/
|
[docker_hub]: https://hub.docker.com/repository/docker/sigp/lighthouse/
|
||||||
|
@ -9,6 +9,11 @@
|
|||||||
- [What is "Syncing deposit contract block cache"?](#what-is-syncing-deposit-contract-block-cache)
|
- [What is "Syncing deposit contract block cache"?](#what-is-syncing-deposit-contract-block-cache)
|
||||||
- [Can I use redundancy in my staking setup?](#can-i-use-redundancy-in-my-staking-setup)
|
- [Can I use redundancy in my staking setup?](#can-i-use-redundancy-in-my-staking-setup)
|
||||||
- [How can I monitor my validators?](#how-can-i-monitor-my-validators)
|
- [How can I monitor my validators?](#how-can-i-monitor-my-validators)
|
||||||
|
- [I see beacon logs showing `WARN: Execution engine called failed`, what should I do?](#i-see-beacon-logs-showing-warn-execution-engine-called-failed-what-should-i-do)
|
||||||
|
- [How do I check or update my withdrawal credentials?](#how-do-i-check-or-update-my-withdrawal-credentials)
|
||||||
|
- [I am missing attestations. Why?](#i-am-missing-attestations-why)
|
||||||
|
- [Sometimes I miss the attestation head vote, resulting in penalty. Is this normal?](#sometimes-i-miss-the-attestation-head-vote-resulting-in-penalty-is-this-normal)
|
||||||
|
- [My beacon node is stuck at downloading historical block using checkpoing sync. What can I do?](#my-beacon-node-is-stuck-at-downloading-historical-block-using-checkpoing-sync-what-can-i-do)
|
||||||
|
|
||||||
### Why does it take so long for a validator to be activated?
|
### Why does it take so long for a validator to be activated?
|
||||||
|
|
||||||
@ -185,4 +190,47 @@ However, there are some components which can be configured with redundancy. See
|
|||||||
|
|
||||||
Apart from using block explorers, you may use the "Validator Monitor" built into Lighthouse which
|
Apart from using block explorers, you may use the "Validator Monitor" built into Lighthouse which
|
||||||
provides logging and Prometheus/Grafana metrics for individual validators. See [Validator
|
provides logging and Prometheus/Grafana metrics for individual validators. See [Validator
|
||||||
Monitoring](./validator-monitoring.md) for more information.
|
Monitoring](./validator-monitoring.md) for more information. Lighthouse has also developed Lighthouse UI (Siren) to monitor performance, see [Lighthouse UI (Siren)](./lighthouse-ui.md).
|
||||||
|
|
||||||
|
### I see beacon logs showing `WARN: Execution engine called failed`, what should I do?
|
||||||
|
|
||||||
|
The `WARN Execution engine called failed` log is shown when the beacon node cannot reach the execution engine. When this warning occurs, it will be followed by a detailed message. A frequently encountered example of the error message is:
|
||||||
|
|
||||||
|
`error: Reqwest(reqwest::Error { kind: Request, url: Url { scheme: "http", cannot_be_a_base: false, username: "", password: None, host: Some(Ipv4(127.0.0.1)), port: Some(8551), path: "/", query: None, fragment: None }, source: TimedOut }), service: exec`
|
||||||
|
|
||||||
|
which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. There are a few reasons why this can occur:
|
||||||
|
1. The execution engine is not synced. Check the log of the execution engine to make sure that it is synced. If it is syncing, wait until it is synced and the error will disappear. You will see the beacon node logs `INFO Execution engine online` when it is synced.
|
||||||
|
1. The computer is overloaded. Check the CPU and RAM usage to see if it has overloaded. You can use `htop` to check for CPU and RAM usage.
|
||||||
|
1. Your SSD is slow. Check if your SSD is in "The Bad" list [here](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). If your SSD is in "The Bad" list, it means it cannot keep in sync to the network and you may want to consider upgrading to a better SSD.
|
||||||
|
|
||||||
|
If the reason for the error message is caused by no. 1 above, you may want to look further. If the execution engine is out of sync suddenly, it is usually caused by ungraceful shutdown. The common causes for ungraceful shutdown are:
|
||||||
|
- Power outage. If power outages are an issue at your place, consider getting a UPS to avoid ungraceful shutdown of services.
|
||||||
|
- The service file is not stopped properly. To overcome this, make sure that the process is stop properly, e.g., during client updates.
|
||||||
|
- Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. When this occurs, the log file will show `Main process exited, code=killed, status=9/KILL`. You can also run `sudo journalctl -a --since "18 hours ago" | grep -i "killed process` to confirm that the execution client has been killed due to oom. If you are using geth as the execution client, a short term solution is to reduce the resources used, for example: (1) reduce the cache by adding the flag `--cache 2048` (2) connect to less peers using the flag `--maxpeers 10`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer.
|
||||||
|
|
||||||
|
|
||||||
|
### How do I check or update my withdrawal credentials?
|
||||||
|
Withdrawals will be available after the Capella/Shanghai upgrades on 12<sup>th</sup> April 2023. To check that if you are eligible for withdrawals, go to [Staking launchpad](https://launchpad.ethereum.org/en/withdrawals), enter your validator index and click `verify on mainnet`:
|
||||||
|
- `withdrawals enabled` means you will automatically receive withdrawals to the withdrawal address that you set.
|
||||||
|
- `withdrawals not enabled` means you will need to update your withdrawal credentials from `0x00` type to `0x01` type. The common way to do this is using `Staking deposit CLI` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys).
|
||||||
|
|
||||||
|
For the case of `withdrawals not enabled`, you can update your withdrawal credentials **anytime**, and there is no deadline for that. The catch is that as long as you do not update your withdrawal credentials, your rewards in the beacon chain will continue to be locked in the beacon chain. Only after you update the withdrawal credentials, will the rewards be withdrawn to the withdrawal address.
|
||||||
|
|
||||||
|
|
||||||
|
### I am missing attestations. Why?
|
||||||
|
The first thing is to ensure both consensus and execution clients are synced with the network. If they are synced, there may still be some issues with the node setup itself that is causing the missed attestations. Check the setup to ensure that:
|
||||||
|
- the clock is synced
|
||||||
|
- the computer has sufficient resources and is not overloaded
|
||||||
|
- the internet is working well
|
||||||
|
- you have sufficient peers
|
||||||
|
|
||||||
|
You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). Once the above points are good, missing attestation should be a rare occurance.
|
||||||
|
|
||||||
|
### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal?
|
||||||
|
|
||||||
|
In general it is unavoiadable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone else's performance.
|
||||||
|
|
||||||
|
|
||||||
|
### My beacon node is stuck at downloading historical block using checkpoing sync. What can I do?
|
||||||
|
|
||||||
|
Check the number of peers you are connected to. If you have low peers (less than 50), try to do port forwarding on the port 9000 TCP/UDP to increase peer count.
|
@ -23,21 +23,24 @@ For details, see [Portability](#portability).
|
|||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
Each binary is contained in a `.tar.gz` archive. For this example, lets assume the user needs
|
Each binary is contained in a `.tar.gz` archive. For this example, lets assume the user needs
|
||||||
a portable `x86_64` binary.
|
a `x86_64` binary.
|
||||||
|
|
||||||
### Steps
|
### Steps
|
||||||
|
|
||||||
1. Go to the [Releases](https://github.com/sigp/lighthouse/releases) page and
|
1. Go to the [Releases](https://github.com/sigp/lighthouse/releases) page and
|
||||||
select the latest release.
|
select the latest release.
|
||||||
1. Download the `lighthouse-${VERSION}-x86_64-unknown-linux-gnu-portable.tar.gz` binary.
|
1. Download the `lighthouse-${VERSION}-x86_64-unknown-linux-gnu.tar.gz` binary. For example, to obtain the binary file for v4.0.1 (the latest version at the time of writing), a user can run the following commands in a linux terminal:
|
||||||
1. Extract the archive:
|
```bash
|
||||||
1. `cd Downloads`
|
cd ~
|
||||||
1. `tar -xvf lighthouse-${VERSION}-x86_64-unknown-linux-gnu.tar.gz`
|
curl -LO https://github.com/sigp/lighthouse/releases/download/v4.0.1/lighthouse-v4.0.1-x86_64-unknown-linux-gnu.tar.gz
|
||||||
|
tar -xvf lighthouse-v4.0.1-x86_64-unknown-linux-gnu.tar.gz
|
||||||
|
```
|
||||||
1. Test the binary with `./lighthouse --version` (it should print the version).
|
1. Test the binary with `./lighthouse --version` (it should print the version).
|
||||||
1. (Optional) Move the `lighthouse` binary to a location in your `PATH`, so the `lighthouse` command can be called from anywhere.
|
1. (Optional) Move the `lighthouse` binary to a location in your `PATH`, so the `lighthouse` command can be called from anywhere. For example, to copy `lighthouse` from the current directory to `usr/bin`, run `sudo cp lighthouse /usr/bin`.
|
||||||
- E.g., `cp lighthouse /usr/bin`
|
|
||||||
|
|
||||||
> Windows users will need to execute the commands in Step 3 from PowerShell.
|
|
||||||
|
|
||||||
|
> Windows users will need to execute the commands in Step 2 from PowerShell.
|
||||||
|
|
||||||
## Portability
|
## Portability
|
||||||
|
|
||||||
@ -64,4 +67,4 @@ WARN CPU seems incompatible with optimized Lighthouse build, advice: If you get
|
|||||||
|
|
||||||
On some VPS providers, the virtualization can make it appear as if CPU features are not available,
|
On some VPS providers, the virtualization can make it appear as if CPU features are not available,
|
||||||
even when they are. In this case you might see the warning above, but so long as the client
|
even when they are. In this case you might see the warning above, but so long as the client
|
||||||
continues to function it's nothing to worry about.
|
continues to function, it's nothing to worry about.
|
||||||
|
@ -5,8 +5,20 @@ the instructions below, and then proceed to [Building Lighthouse](#build-lightho
|
|||||||
|
|
||||||
## Dependencies
|
## Dependencies
|
||||||
|
|
||||||
First, **install Rust** using [rustup](https://rustup.rs/). The rustup installer provides an easy way
|
First, **install Rust** using [rustup](https://rustup.rs/):
|
||||||
to update the Rust compiler, and works on all platforms.
|
|
||||||
|
```bash
|
||||||
|
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||||
|
```
|
||||||
|
|
||||||
|
The rustup installer provides an easy way to update the Rust compiler, and works on all platforms.
|
||||||
|
|
||||||
|
> Tips:
|
||||||
|
>
|
||||||
|
> - During installation, when prompted, enter `1` for the default installation.
|
||||||
|
> - After Rust installation completes, try running `cargo version` . If it cannot
|
||||||
|
> be found, run `source $HOME/.cargo/env`. After that, running `cargo version` should return the version, for example `cargo 1.68.2`.
|
||||||
|
> - It's generally advisable to append `source $HOME/.cargo/env` to `~/.bashrc`.
|
||||||
|
|
||||||
With Rust installed, follow the instructions below to install dependencies relevant to your
|
With Rust installed, follow the instructions below to install dependencies relevant to your
|
||||||
operating system.
|
operating system.
|
||||||
@ -19,10 +31,17 @@ Install the following packages:
|
|||||||
sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler
|
sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> Tips:
|
||||||
|
>
|
||||||
|
> - If there are difficulties, try updating the package manager with `sudo apt
|
||||||
|
> update`.
|
||||||
|
|
||||||
> Note: Lighthouse requires CMake v3.12 or newer, which isn't available in the package repositories
|
> Note: Lighthouse requires CMake v3.12 or newer, which isn't available in the package repositories
|
||||||
> of Ubuntu 18.04 or earlier. On these distributions CMake can still be installed via PPA:
|
> of Ubuntu 18.04 or earlier. On these distributions CMake can still be installed via PPA:
|
||||||
> [https://apt.kitware.com/](https://apt.kitware.com)
|
> [https://apt.kitware.com/](https://apt.kitware.com)
|
||||||
|
|
||||||
|
After this, you are ready to [build Lighthouse](#build-lighthouse).
|
||||||
|
|
||||||
#### macOS
|
#### macOS
|
||||||
|
|
||||||
1. Install the [Homebrew][] package manager.
|
1. Install the [Homebrew][] package manager.
|
||||||
@ -39,10 +58,19 @@ brew install protobuf
|
|||||||
|
|
||||||
[Homebrew]: https://brew.sh/
|
[Homebrew]: https://brew.sh/
|
||||||
|
|
||||||
|
After this, you are ready to [build Lighthouse](#build-lighthouse).
|
||||||
|
|
||||||
#### Windows
|
#### Windows
|
||||||
|
|
||||||
1. Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git).
|
1. Install [Git](https://git-scm.com/download/win).
|
||||||
1. Install the [Chocolatey](https://chocolatey.org/install) package manager for Windows.
|
1. Install the [Chocolatey](https://chocolatey.org/install) package manager for Windows.
|
||||||
|
> Tips:
|
||||||
|
> - Use PowerShell to install. In Windows, search for PowerShell and run as administrator.
|
||||||
|
> - You must ensure `Get-ExecutionPolicy` is not Restricted. To test this, run `Get-ExecutionPolicy` in PowerShell. If it returns `restricted`, then run `Set-ExecutionPolicy AllSigned`, and then run
|
||||||
|
```bash
|
||||||
|
Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
|
||||||
|
```
|
||||||
|
> - To verify that Chocolatey is ready, run `choco` and it should return the version.
|
||||||
1. Install Make, CMake, LLVM and protoc using Chocolatey:
|
1. Install Make, CMake, LLVM and protoc using Chocolatey:
|
||||||
|
|
||||||
```
|
```
|
||||||
@ -67,6 +95,8 @@ should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](
|
|||||||
|
|
||||||
[WSL]: https://docs.microsoft.com/en-us/windows/wsl/about
|
[WSL]: https://docs.microsoft.com/en-us/windows/wsl/about
|
||||||
|
|
||||||
|
After this, you are ready to [build Lighthouse](#build-lighthouse).
|
||||||
|
|
||||||
## Build Lighthouse
|
## Build Lighthouse
|
||||||
|
|
||||||
Once you have Rust and the build dependencies you're ready to build Lighthouse:
|
Once you have Rust and the build dependencies you're ready to build Lighthouse:
|
||||||
@ -136,7 +166,7 @@ Commonly used features include:
|
|||||||
* `spec-minimal`: support for the minimal preset (useful for testing).
|
* `spec-minimal`: support for the minimal preset (useful for testing).
|
||||||
|
|
||||||
Default features (e.g. `slasher-mdbx`) may be opted out of using the `--no-default-features`
|
Default features (e.g. `slasher-mdbx`) may be opted out of using the `--no-default-features`
|
||||||
argument for `cargo`, which can plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable.
|
argument for `cargo`, which can be plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable.
|
||||||
E.g.
|
E.g.
|
||||||
|
|
||||||
```
|
```
|
||||||
@ -171,12 +201,11 @@ PROFILE=maxperf make
|
|||||||
Lighthouse will be installed to `CARGO_HOME` or `$HOME/.cargo`. This directory
|
Lighthouse will be installed to `CARGO_HOME` or `$HOME/.cargo`. This directory
|
||||||
needs to be on your `PATH` before you can run `$ lighthouse`.
|
needs to be on your `PATH` before you can run `$ lighthouse`.
|
||||||
|
|
||||||
See ["Configuring the `PATH` environment variable"
|
See ["Configuring the `PATH` environment variable"](https://www.rust-lang.org/tools/install) for more information.
|
||||||
(rust-lang.org)](https://www.rust-lang.org/tools/install) for more information.
|
|
||||||
|
|
||||||
### Compilation error
|
### Compilation error
|
||||||
|
|
||||||
Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply type `rustup update`.
|
Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply run `rustup update`.
|
||||||
|
|
||||||
If you can't install the latest version of Rust you can instead compile using the Minimum Supported
|
If you can't install the latest version of Rust you can instead compile using the Minimum Supported
|
||||||
Rust Version (MSRV) which is listed under the `rust-version` key in Lighthouse's
|
Rust Version (MSRV) which is listed under the `rust-version` key in Lighthouse's
|
||||||
@ -185,7 +214,7 @@ Rust Version (MSRV) which is listed under the `rust-version` key in Lighthouse's
|
|||||||
If compilation fails with `(signal: 9, SIGKILL: kill)`, this could mean your machine ran out of
|
If compilation fails with `(signal: 9, SIGKILL: kill)`, this could mean your machine ran out of
|
||||||
memory during compilation. If you are on a resource-constrained device you can
|
memory during compilation. If you are on a resource-constrained device you can
|
||||||
look into [cross compilation](./cross-compiling.md), or use a [pre-built
|
look into [cross compilation](./cross-compiling.md), or use a [pre-built
|
||||||
binary](./installation-binaries.md).
|
binary](https://github.com/sigp/lighthouse/releases).
|
||||||
|
|
||||||
If compilation fails with `error: linking with cc failed: exit code: 1`, try running `cargo clean`.
|
If compilation fails with `error: linking with cc failed: exit code: 1`, try running `cargo clean`.
|
||||||
|
|
||||||
|
@ -8,24 +8,27 @@ There are three core methods to obtain the Lighthouse application:
|
|||||||
- [Docker images](./docker.md).
|
- [Docker images](./docker.md).
|
||||||
- [Building from source](./installation-source.md).
|
- [Building from source](./installation-source.md).
|
||||||
|
|
||||||
Community-maintained additional installation methods:
|
|
||||||
|
|
||||||
- [Homebrew package](./homebrew.md).
|
|
||||||
- Arch Linux AUR packages: [source](https://aur.archlinux.org/packages/lighthouse-ethereum),
|
|
||||||
[binary](https://aur.archlinux.org/packages/lighthouse-ethereum-bin).
|
|
||||||
|
|
||||||
Additionally, there are two extra guides for specific uses:
|
Additionally, there are two extra guides for specific uses:
|
||||||
|
|
||||||
- [Raspberry Pi 4 guide](./pi.md).
|
- [Raspberry Pi 4 guide](./pi.md).
|
||||||
- [Cross-compiling guide for developers](./cross-compiling.md).
|
- [Cross-compiling guide for developers](./cross-compiling.md).
|
||||||
|
|
||||||
## Minimum System Requirements
|
There are also community-maintained installation methods:
|
||||||
|
|
||||||
* Dual-core CPU, 2015 or newer
|
- [Homebrew package](./homebrew.md).
|
||||||
* 8 GB RAM
|
- Arch Linux AUR packages: [source](https://aur.archlinux.org/packages/lighthouse-ethereum),
|
||||||
* 128 GB solid state storage
|
[binary](https://aur.archlinux.org/packages/lighthouse-ethereum-bin).
|
||||||
* 10 Mb/s download, 5 Mb/s upload broadband connection
|
|
||||||
|
|
||||||
For more information see [System Requirements](./system-requirements.md).
|
|
||||||
|
|
||||||
[WSL]: https://docs.microsoft.com/en-us/windows/wsl/about
|
|
||||||
|
## Recommended System Requirements
|
||||||
|
|
||||||
|
Before [The Merge](https://ethereum.org/en/roadmap/merge/), Lighthouse was able to run on its own with low to mid-range consumer hardware, but would perform best when provided with ample system resources.
|
||||||
|
|
||||||
|
After [The Merge](https://ethereum.org/en/roadmap/merge/) on 15<sup>th</sup> September 2022, it is necessary to run Lighthouse together with an execution client ([Nethermind](https://nethermind.io/), [Besu](https://www.hyperledger.org/use/besu), [Erigon](https://github.com/ledgerwatch/erigon), [Geth](https://geth.ethereum.org/)). The following system requirements listed are therefore for running a Lighthouse beacon node combined with an execution client , and a validator client with a modest number of validator keys (less than 100):
|
||||||
|
|
||||||
|
|
||||||
|
* CPU: Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer
|
||||||
|
* Memory: 16 GB RAM or more
|
||||||
|
* Storage: 2 TB solid state storage
|
||||||
|
* Network: 100 Mb/s download, 20 Mb/s upload broadband connection
|
||||||
|
@ -14,6 +14,15 @@ There are three flags which control the re-orging behaviour:
|
|||||||
* `--proposer-reorg-threshold N`: attempt to orphan blocks with less than N% of the committee vote. If this parameter isn't set then N defaults to 20% when the feature is enabled.
|
* `--proposer-reorg-threshold N`: attempt to orphan blocks with less than N% of the committee vote. If this parameter isn't set then N defaults to 20% when the feature is enabled.
|
||||||
* `--proposer-reorg-epochs-since-finalization N`: only attempt to re-org late blocks when the number of epochs since finalization is less than or equal to N. The default is 2 epochs,
|
* `--proposer-reorg-epochs-since-finalization N`: only attempt to re-org late blocks when the number of epochs since finalization is less than or equal to N. The default is 2 epochs,
|
||||||
meaning re-orgs will only be attempted when the chain is finalizing optimally.
|
meaning re-orgs will only be attempted when the chain is finalizing optimally.
|
||||||
|
* `--proposer-reorg-cutoff T`: only attempt to re-org late blocks when the proposal is being made
|
||||||
|
before T milliseconds into the slot. Delays between the validator client and the beacon node can
|
||||||
|
cause some blocks to be requested later than the start of the slot, which makes them more likely
|
||||||
|
to fail. The default cutoff is 1000ms on mainnet, which gives blocks 3000ms to be signed and
|
||||||
|
propagated before the attestation deadline at 4000ms.
|
||||||
|
* `--proposer-reorg-disallowed-offsets N1,N2,N3...`: Prohibit Lighthouse from attempting to reorg at
|
||||||
|
specific offsets in each epoch. A disallowed offset `N` prevents reorging blocks from being
|
||||||
|
proposed at any `slot` such that `slot % SLOTS_PER_EPOCH == N`. The value to this flag is a
|
||||||
|
comma-separated list of integer offsets.
|
||||||
|
|
||||||
All flags should be applied to `lighthouse bn`. The default configuration is recommended as it
|
All flags should be applied to `lighthouse bn`. The default configuration is recommended as it
|
||||||
balances the chance of the re-org succeeding against the chance of failure due to attestations
|
balances the chance of the re-org succeeding against the chance of failure due to attestations
|
||||||
|
@ -12,18 +12,18 @@ desktop) may be convenient.*
|
|||||||
|
|
||||||
### 1. Install Ubuntu
|
### 1. Install Ubuntu
|
||||||
|
|
||||||
Follow the [Ubuntu Raspberry Pi installation instructions](https://ubuntu.com/download/raspberry-pi).
|
Follow the [Ubuntu Raspberry Pi installation instructions](https://ubuntu.com/download/raspberry-pi). **A 64-bit version is required**
|
||||||
|
|
||||||
**A 64-bit version is required** and latest version is recommended (Ubuntu
|
|
||||||
20.04 LTS was the latest at the time of writing).
|
|
||||||
|
|
||||||
A graphical environment is not required in order to use Lighthouse. Only the
|
A graphical environment is not required in order to use Lighthouse. Only the
|
||||||
terminal and an Internet connection are necessary.
|
terminal and an Internet connection are necessary.
|
||||||
|
|
||||||
### 2. Install Packages
|
### 2. Install Packages
|
||||||
|
|
||||||
Install the [Ubuntu Dependencies](installation-source.md#ubuntu).
|
Install the Ubuntu dependencies:
|
||||||
(I.e., run the `sudo apt install ...` command at that link).
|
|
||||||
|
```bash
|
||||||
|
sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler
|
||||||
|
```
|
||||||
|
|
||||||
> Tips:
|
> Tips:
|
||||||
>
|
>
|
||||||
@ -32,15 +32,18 @@ Install the [Ubuntu Dependencies](installation-source.md#ubuntu).
|
|||||||
|
|
||||||
### 3. Install Rust
|
### 3. Install Rust
|
||||||
|
|
||||||
Install Rust as per [rustup](https://rustup.rs/). (I.e., run the `curl ... `
|
Install Rust as per [rustup](https://rustup.rs/):
|
||||||
command).
|
|
||||||
|
```bash
|
||||||
|
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||||
|
```
|
||||||
|
|
||||||
> Tips:
|
> Tips:
|
||||||
>
|
>
|
||||||
> - When prompted, enter `1` for the default installation.
|
> - During installation, when prompted, enter `1` for the default installation.
|
||||||
> - Try running `cargo version` after Rust installation completes. If it cannot
|
> - After Rust installation completes, try running `cargo version` . If it cannot
|
||||||
> be found, run `source $HOME/.cargo/env`.
|
> be found, run `source $HOME/.cargo/env`. After that, running `cargo version` should return the version, for example `cargo 1.68.2`.
|
||||||
> - It's generally advised to append `source $HOME/.cargo/env` to `~/.bashrc`.
|
> - It's generally advisable to append `source $HOME/.cargo/env` to `~/.bashrc`.
|
||||||
|
|
||||||
### 4. Install Lighthouse
|
### 4. Install Lighthouse
|
||||||
|
|
||||||
|
@ -1,23 +0,0 @@
|
|||||||
# System Requirements
|
|
||||||
|
|
||||||
Lighthouse is able to run on most low to mid-range consumer hardware, but will perform best when
|
|
||||||
provided with ample system resources. The following system requirements are for running a beacon
|
|
||||||
node and a validator client with a modest number of validator keys (less than 100).
|
|
||||||
|
|
||||||
## Minimum
|
|
||||||
|
|
||||||
* Dual-core CPU, 2015 or newer
|
|
||||||
* 8 GB RAM
|
|
||||||
* 128 GB solid state storage
|
|
||||||
* 10 Mb/s download, 5 Mb/s upload broadband connection
|
|
||||||
|
|
||||||
During smooth network conditions, Lighthouse's database will fit within 15 GB, but in case of a long
|
|
||||||
period of non-finality, it is **strongly recommended** that at least 128 GB is available.
|
|
||||||
|
|
||||||
## Recommended
|
|
||||||
|
|
||||||
* Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer
|
|
||||||
* 16 GB RAM
|
|
||||||
* 256 GB solid state storage
|
|
||||||
* 100 Mb/s download, 20 Mb/s upload broadband connection
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "boot_node"
|
name = "boot_node"
|
||||||
version = "4.0.1"
|
version = "4.1.0"
|
||||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
|
@ -1308,23 +1308,6 @@ impl BeaconNodeHttpClient {
|
|||||||
self.get_opt(path).await
|
self.get_opt(path).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `GET v1/debug/beacon/states/{state_id}` (LEGACY)
|
|
||||||
pub async fn get_debug_beacon_states_v1<T: EthSpec>(
|
|
||||||
&self,
|
|
||||||
state_id: StateId,
|
|
||||||
) -> Result<Option<ExecutionOptimisticForkVersionedResponse<BeaconState<T>>>, Error> {
|
|
||||||
let mut path = self.eth_path(V1)?;
|
|
||||||
|
|
||||||
path.path_segments_mut()
|
|
||||||
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
|
||||||
.push("debug")
|
|
||||||
.push("beacon")
|
|
||||||
.push("states")
|
|
||||||
.push(&state_id.to_string());
|
|
||||||
|
|
||||||
self.get_opt(path).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `GET debug/beacon/states/{state_id}`
|
/// `GET debug/beacon/states/{state_id}`
|
||||||
/// `-H "accept: application/octet-stream"`
|
/// `-H "accept: application/octet-stream"`
|
||||||
pub async fn get_debug_beacon_states_ssz<T: EthSpec>(
|
pub async fn get_debug_beacon_states_ssz<T: EthSpec>(
|
||||||
|
@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!(
|
|||||||
// NOTE: using --match instead of --exclude for compatibility with old Git
|
// NOTE: using --match instead of --exclude for compatibility with old Git
|
||||||
"--match=thiswillnevermatchlol"
|
"--match=thiswillnevermatchlol"
|
||||||
],
|
],
|
||||||
prefix = "Lighthouse/v4.0.1-",
|
prefix = "Lighthouse/v4.1.0-",
|
||||||
fallback = "Lighthouse/v4.0.1"
|
fallback = "Lighthouse/v4.1.0"
|
||||||
);
|
);
|
||||||
|
|
||||||
/// Returns `VERSION`, but with platform information appended to the end.
|
/// Returns `VERSION`, but with platform information appended to the end.
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use crate::{ForkChoiceStore, InvalidationOperation};
|
use crate::{ForkChoiceStore, InvalidationOperation};
|
||||||
use proto_array::{
|
use proto_array::{
|
||||||
Block as ProtoBlock, ExecutionStatus, ProposerHeadError, ProposerHeadInfo,
|
Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, ProposerHeadError,
|
||||||
ProtoArrayForkChoice, ReOrgThreshold,
|
ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold,
|
||||||
};
|
};
|
||||||
use slog::{crit, debug, warn, Logger};
|
use slog::{crit, debug, warn, Logger};
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
@ -533,6 +533,7 @@ where
|
|||||||
current_slot: Slot,
|
current_slot: Slot,
|
||||||
canonical_head: Hash256,
|
canonical_head: Hash256,
|
||||||
re_org_threshold: ReOrgThreshold,
|
re_org_threshold: ReOrgThreshold,
|
||||||
|
disallowed_offsets: &DisallowedReOrgOffsets,
|
||||||
max_epochs_since_finalization: Epoch,
|
max_epochs_since_finalization: Epoch,
|
||||||
) -> Result<ProposerHeadInfo, ProposerHeadError<Error<proto_array::Error>>> {
|
) -> Result<ProposerHeadInfo, ProposerHeadError<Error<proto_array::Error>>> {
|
||||||
// Ensure that fork choice has already been updated for the current slot. This prevents
|
// Ensure that fork choice has already been updated for the current slot. This prevents
|
||||||
@ -564,6 +565,7 @@ where
|
|||||||
canonical_head,
|
canonical_head,
|
||||||
self.fc_store.justified_balances(),
|
self.fc_store.justified_balances(),
|
||||||
re_org_threshold,
|
re_org_threshold,
|
||||||
|
disallowed_offsets,
|
||||||
max_epochs_since_finalization,
|
max_epochs_since_finalization,
|
||||||
)
|
)
|
||||||
.map_err(ProposerHeadError::convert_inner_error)
|
.map_err(ProposerHeadError::convert_inner_error)
|
||||||
@ -573,6 +575,7 @@ where
|
|||||||
&self,
|
&self,
|
||||||
canonical_head: Hash256,
|
canonical_head: Hash256,
|
||||||
re_org_threshold: ReOrgThreshold,
|
re_org_threshold: ReOrgThreshold,
|
||||||
|
disallowed_offsets: &DisallowedReOrgOffsets,
|
||||||
max_epochs_since_finalization: Epoch,
|
max_epochs_since_finalization: Epoch,
|
||||||
) -> Result<ProposerHeadInfo, ProposerHeadError<Error<proto_array::Error>>> {
|
) -> Result<ProposerHeadInfo, ProposerHeadError<Error<proto_array::Error>>> {
|
||||||
let current_slot = self.fc_store.get_current_slot();
|
let current_slot = self.fc_store.get_current_slot();
|
||||||
@ -582,6 +585,7 @@ where
|
|||||||
canonical_head,
|
canonical_head,
|
||||||
self.fc_store.justified_balances(),
|
self.fc_store.justified_balances(),
|
||||||
re_org_threshold,
|
re_org_threshold,
|
||||||
|
disallowed_offsets,
|
||||||
max_epochs_since_finalization,
|
max_epochs_since_finalization,
|
||||||
)
|
)
|
||||||
.map_err(ProposerHeadError::convert_inner_error)
|
.map_err(ProposerHeadError::convert_inner_error)
|
||||||
|
@ -50,6 +50,7 @@ pub enum Error {
|
|||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
parent_root: Hash256,
|
parent_root: Hash256,
|
||||||
},
|
},
|
||||||
|
InvalidEpochOffset(u64),
|
||||||
Arith(ArithError),
|
Arith(ArithError),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,8 +8,8 @@ mod ssz_container;
|
|||||||
pub use crate::justified_balances::JustifiedBalances;
|
pub use crate::justified_balances::JustifiedBalances;
|
||||||
pub use crate::proto_array::{calculate_committee_fraction, InvalidationOperation};
|
pub use crate::proto_array::{calculate_committee_fraction, InvalidationOperation};
|
||||||
pub use crate::proto_array_fork_choice::{
|
pub use crate::proto_array_fork_choice::{
|
||||||
Block, DoNotReOrg, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice,
|
Block, DisallowedReOrgOffsets, DoNotReOrg, ExecutionStatus, ProposerHeadError,
|
||||||
ReOrgThreshold,
|
ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold,
|
||||||
};
|
};
|
||||||
pub use error::Error;
|
pub use error::Error;
|
||||||
|
|
||||||
|
@ -250,6 +250,9 @@ pub enum DoNotReOrg {
|
|||||||
ParentDistance,
|
ParentDistance,
|
||||||
HeadDistance,
|
HeadDistance,
|
||||||
ShufflingUnstable,
|
ShufflingUnstable,
|
||||||
|
DisallowedOffset {
|
||||||
|
offset: u64,
|
||||||
|
},
|
||||||
JustificationAndFinalizationNotCompetitive,
|
JustificationAndFinalizationNotCompetitive,
|
||||||
ChainNotFinalizing {
|
ChainNotFinalizing {
|
||||||
epochs_since_finalization: u64,
|
epochs_since_finalization: u64,
|
||||||
@ -271,6 +274,9 @@ impl std::fmt::Display for DoNotReOrg {
|
|||||||
Self::ParentDistance => write!(f, "parent too far from head"),
|
Self::ParentDistance => write!(f, "parent too far from head"),
|
||||||
Self::HeadDistance => write!(f, "head too far from current slot"),
|
Self::HeadDistance => write!(f, "head too far from current slot"),
|
||||||
Self::ShufflingUnstable => write!(f, "shuffling unstable at epoch boundary"),
|
Self::ShufflingUnstable => write!(f, "shuffling unstable at epoch boundary"),
|
||||||
|
Self::DisallowedOffset { offset } => {
|
||||||
|
write!(f, "re-orgs disabled at offset {offset}")
|
||||||
|
}
|
||||||
Self::JustificationAndFinalizationNotCompetitive => {
|
Self::JustificationAndFinalizationNotCompetitive => {
|
||||||
write!(f, "justification or finalization not competitive")
|
write!(f, "justification or finalization not competitive")
|
||||||
}
|
}
|
||||||
@ -304,6 +310,31 @@ impl std::fmt::Display for DoNotReOrg {
|
|||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
pub struct ReOrgThreshold(pub u64);
|
pub struct ReOrgThreshold(pub u64);
|
||||||
|
|
||||||
|
/// New-type for disallowed re-org slots.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
|
#[serde(transparent)]
|
||||||
|
pub struct DisallowedReOrgOffsets {
|
||||||
|
// Vecs are faster than hashmaps for small numbers of items.
|
||||||
|
offsets: Vec<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for DisallowedReOrgOffsets {
|
||||||
|
fn default() -> Self {
|
||||||
|
DisallowedReOrgOffsets { offsets: vec![0] }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DisallowedReOrgOffsets {
|
||||||
|
pub fn new<E: EthSpec>(offsets: Vec<u64>) -> Result<Self, Error> {
|
||||||
|
for &offset in &offsets {
|
||||||
|
if offset >= E::slots_per_epoch() {
|
||||||
|
return Err(Error::InvalidEpochOffset(offset));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(Self { offsets })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(PartialEq)]
|
#[derive(PartialEq)]
|
||||||
pub struct ProtoArrayForkChoice {
|
pub struct ProtoArrayForkChoice {
|
||||||
pub(crate) proto_array: ProtoArray,
|
pub(crate) proto_array: ProtoArray,
|
||||||
@ -460,6 +491,7 @@ impl ProtoArrayForkChoice {
|
|||||||
canonical_head: Hash256,
|
canonical_head: Hash256,
|
||||||
justified_balances: &JustifiedBalances,
|
justified_balances: &JustifiedBalances,
|
||||||
re_org_threshold: ReOrgThreshold,
|
re_org_threshold: ReOrgThreshold,
|
||||||
|
disallowed_offsets: &DisallowedReOrgOffsets,
|
||||||
max_epochs_since_finalization: Epoch,
|
max_epochs_since_finalization: Epoch,
|
||||||
) -> Result<ProposerHeadInfo, ProposerHeadError<Error>> {
|
) -> Result<ProposerHeadInfo, ProposerHeadError<Error>> {
|
||||||
let info = self.get_proposer_head_info::<E>(
|
let info = self.get_proposer_head_info::<E>(
|
||||||
@ -467,6 +499,7 @@ impl ProtoArrayForkChoice {
|
|||||||
canonical_head,
|
canonical_head,
|
||||||
justified_balances,
|
justified_balances,
|
||||||
re_org_threshold,
|
re_org_threshold,
|
||||||
|
disallowed_offsets,
|
||||||
max_epochs_since_finalization,
|
max_epochs_since_finalization,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
@ -501,6 +534,7 @@ impl ProtoArrayForkChoice {
|
|||||||
canonical_head: Hash256,
|
canonical_head: Hash256,
|
||||||
justified_balances: &JustifiedBalances,
|
justified_balances: &JustifiedBalances,
|
||||||
re_org_threshold: ReOrgThreshold,
|
re_org_threshold: ReOrgThreshold,
|
||||||
|
disallowed_offsets: &DisallowedReOrgOffsets,
|
||||||
max_epochs_since_finalization: Epoch,
|
max_epochs_since_finalization: Epoch,
|
||||||
) -> Result<ProposerHeadInfo, ProposerHeadError<Error>> {
|
) -> Result<ProposerHeadInfo, ProposerHeadError<Error>> {
|
||||||
let mut nodes = self
|
let mut nodes = self
|
||||||
@ -545,6 +579,12 @@ impl ProtoArrayForkChoice {
|
|||||||
return Err(DoNotReOrg::ShufflingUnstable.into());
|
return Err(DoNotReOrg::ShufflingUnstable.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check allowed slot offsets.
|
||||||
|
let offset = (re_org_block_slot % E::slots_per_epoch()).as_u64();
|
||||||
|
if disallowed_offsets.offsets.contains(&offset) {
|
||||||
|
return Err(DoNotReOrg::DisallowedOffset { offset }.into());
|
||||||
|
}
|
||||||
|
|
||||||
// Check FFG.
|
// Check FFG.
|
||||||
let ffg_competitive = parent_node.unrealized_justified_checkpoint
|
let ffg_competitive = parent_node.unrealized_justified_checkpoint
|
||||||
== head_node.unrealized_justified_checkpoint
|
== head_node.unrealized_justified_checkpoint
|
||||||
|
@ -41,4 +41,4 @@ pub use per_epoch_processing::{
|
|||||||
errors::EpochProcessingError, process_epoch as per_epoch_processing,
|
errors::EpochProcessingError, process_epoch as per_epoch_processing,
|
||||||
};
|
};
|
||||||
pub use per_slot_processing::{per_slot_processing, Error as SlotProcessingError};
|
pub use per_slot_processing::{per_slot_processing, Error as SlotProcessingError};
|
||||||
pub use verify_operation::{SigVerifiedOp, VerifyOperation};
|
pub use verify_operation::{SigVerifiedOp, VerifyOperation, VerifyOperationAt};
|
||||||
|
@ -283,7 +283,8 @@ pub fn process_exits<T: EthSpec>(
|
|||||||
// Verify and apply each exit in series. We iterate in series because higher-index exits may
|
// Verify and apply each exit in series. We iterate in series because higher-index exits may
|
||||||
// become invalid due to the application of lower-index ones.
|
// become invalid due to the application of lower-index ones.
|
||||||
for (i, exit) in voluntary_exits.iter().enumerate() {
|
for (i, exit) in voluntary_exits.iter().enumerate() {
|
||||||
verify_exit(state, exit, verify_signatures, spec).map_err(|e| e.into_with_index(i))?;
|
verify_exit(state, None, exit, verify_signatures, spec)
|
||||||
|
.map_err(|e| e.into_with_index(i))?;
|
||||||
|
|
||||||
initiate_validator_exit(state, exit.message.validator_index as usize, spec)?;
|
initiate_validator_exit(state, exit.message.validator_index as usize, spec)?;
|
||||||
}
|
}
|
||||||
|
@ -978,8 +978,14 @@ async fn fork_spanning_exit() {
|
|||||||
let head = harness.chain.canonical_head.cached_head();
|
let head = harness.chain.canonical_head.cached_head();
|
||||||
let head_state = &head.snapshot.beacon_state;
|
let head_state = &head.snapshot.beacon_state;
|
||||||
assert!(head_state.current_epoch() < spec.altair_fork_epoch.unwrap());
|
assert!(head_state.current_epoch() < spec.altair_fork_epoch.unwrap());
|
||||||
verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec)
|
verify_exit(
|
||||||
.expect("phase0 exit verifies against phase0 state");
|
head_state,
|
||||||
|
None,
|
||||||
|
&signed_exit,
|
||||||
|
VerifySignatures::True,
|
||||||
|
&spec,
|
||||||
|
)
|
||||||
|
.expect("phase0 exit verifies against phase0 state");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensure the exit verifies after Altair.
|
* Ensure the exit verifies after Altair.
|
||||||
@ -992,8 +998,14 @@ async fn fork_spanning_exit() {
|
|||||||
let head_state = &head.snapshot.beacon_state;
|
let head_state = &head.snapshot.beacon_state;
|
||||||
assert!(head_state.current_epoch() >= spec.altair_fork_epoch.unwrap());
|
assert!(head_state.current_epoch() >= spec.altair_fork_epoch.unwrap());
|
||||||
assert!(head_state.current_epoch() < spec.bellatrix_fork_epoch.unwrap());
|
assert!(head_state.current_epoch() < spec.bellatrix_fork_epoch.unwrap());
|
||||||
verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec)
|
verify_exit(
|
||||||
.expect("phase0 exit verifies against altair state");
|
head_state,
|
||||||
|
None,
|
||||||
|
&signed_exit,
|
||||||
|
VerifySignatures::True,
|
||||||
|
&spec,
|
||||||
|
)
|
||||||
|
.expect("phase0 exit verifies against altair state");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensure the exit no longer verifies after Bellatrix.
|
* Ensure the exit no longer verifies after Bellatrix.
|
||||||
@ -1009,6 +1021,12 @@ async fn fork_spanning_exit() {
|
|||||||
let head = harness.chain.canonical_head.cached_head();
|
let head = harness.chain.canonical_head.cached_head();
|
||||||
let head_state = &head.snapshot.beacon_state;
|
let head_state = &head.snapshot.beacon_state;
|
||||||
assert!(head_state.current_epoch() >= spec.bellatrix_fork_epoch.unwrap());
|
assert!(head_state.current_epoch() >= spec.bellatrix_fork_epoch.unwrap());
|
||||||
verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec)
|
verify_exit(
|
||||||
.expect_err("phase0 exit does not verify against bellatrix state");
|
head_state,
|
||||||
|
None,
|
||||||
|
&signed_exit,
|
||||||
|
VerifySignatures::True,
|
||||||
|
&spec,
|
||||||
|
)
|
||||||
|
.expect_err("phase0 exit does not verify against bellatrix state");
|
||||||
}
|
}
|
||||||
|
@ -20,10 +20,12 @@ fn error(reason: ExitInvalid) -> BlockOperationError<ExitInvalid> {
|
|||||||
/// Spec v0.12.1
|
/// Spec v0.12.1
|
||||||
pub fn verify_exit<T: EthSpec>(
|
pub fn verify_exit<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
|
current_epoch: Option<Epoch>,
|
||||||
signed_exit: &SignedVoluntaryExit,
|
signed_exit: &SignedVoluntaryExit,
|
||||||
verify_signatures: VerifySignatures,
|
verify_signatures: VerifySignatures,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
|
let current_epoch = current_epoch.unwrap_or(state.current_epoch());
|
||||||
let exit = &signed_exit.message;
|
let exit = &signed_exit.message;
|
||||||
|
|
||||||
let validator = state
|
let validator = state
|
||||||
@ -33,7 +35,7 @@ pub fn verify_exit<T: EthSpec>(
|
|||||||
|
|
||||||
// Verify the validator is active.
|
// Verify the validator is active.
|
||||||
verify!(
|
verify!(
|
||||||
validator.is_active_at(state.current_epoch()),
|
validator.is_active_at(current_epoch),
|
||||||
ExitInvalid::NotActive(exit.validator_index)
|
ExitInvalid::NotActive(exit.validator_index)
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -45,9 +47,9 @@ pub fn verify_exit<T: EthSpec>(
|
|||||||
|
|
||||||
// Exits must specify an epoch when they become valid; they are not valid before then.
|
// Exits must specify an epoch when they become valid; they are not valid before then.
|
||||||
verify!(
|
verify!(
|
||||||
state.current_epoch() >= exit.epoch,
|
current_epoch >= exit.epoch,
|
||||||
ExitInvalid::FutureEpoch {
|
ExitInvalid::FutureEpoch {
|
||||||
state: state.current_epoch(),
|
state: current_epoch,
|
||||||
exit: exit.epoch
|
exit: exit.epoch
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
@ -57,9 +59,9 @@ pub fn verify_exit<T: EthSpec>(
|
|||||||
.activation_epoch
|
.activation_epoch
|
||||||
.safe_add(spec.shard_committee_period)?;
|
.safe_add(spec.shard_committee_period)?;
|
||||||
verify!(
|
verify!(
|
||||||
state.current_epoch() >= earliest_exit_epoch,
|
current_epoch >= earliest_exit_epoch,
|
||||||
ExitInvalid::TooYoungToExit {
|
ExitInvalid::TooYoungToExit {
|
||||||
current_epoch: state.current_epoch(),
|
current_epoch,
|
||||||
earliest_exit_epoch,
|
earliest_exit_epoch,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
@ -134,7 +134,7 @@ impl<E: EthSpec> VerifyOperation<E> for SignedVoluntaryExit {
|
|||||||
state: &BeaconState<E>,
|
state: &BeaconState<E>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<SigVerifiedOp<Self, E>, Self::Error> {
|
) -> Result<SigVerifiedOp<Self, E>, Self::Error> {
|
||||||
verify_exit(state, &self, VerifySignatures::True, spec)?;
|
verify_exit(state, None, &self, VerifySignatures::True, spec)?;
|
||||||
Ok(SigVerifiedOp::new(self, state))
|
Ok(SigVerifiedOp::new(self, state))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,3 +205,35 @@ impl<E: EthSpec> VerifyOperation<E> for SignedBlsToExecutionChange {
|
|||||||
smallvec![]
|
smallvec![]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Trait for operations that can be verified and transformed into a
|
||||||
|
/// `SigVerifiedOp`.
|
||||||
|
///
|
||||||
|
/// The `At` suffix indicates that we can specify a particular epoch at which to
|
||||||
|
/// verify the operation.
|
||||||
|
pub trait VerifyOperationAt<E: EthSpec>: VerifyOperation<E> + Sized {
|
||||||
|
fn validate_at(
|
||||||
|
self,
|
||||||
|
state: &BeaconState<E>,
|
||||||
|
validate_at_epoch: Epoch,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<SigVerifiedOp<Self, E>, Self::Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<E: EthSpec> VerifyOperationAt<E> for SignedVoluntaryExit {
|
||||||
|
fn validate_at(
|
||||||
|
self,
|
||||||
|
state: &BeaconState<E>,
|
||||||
|
validate_at_epoch: Epoch,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<SigVerifiedOp<Self, E>, Self::Error> {
|
||||||
|
verify_exit(
|
||||||
|
state,
|
||||||
|
Some(validate_at_epoch),
|
||||||
|
&self,
|
||||||
|
VerifySignatures::True,
|
||||||
|
spec,
|
||||||
|
)?;
|
||||||
|
Ok(SigVerifiedOp::new(self, state))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -403,7 +403,7 @@ impl ValidatorsListTreeHashCache {
|
|||||||
validators.len(),
|
validators.len(),
|
||||||
),
|
),
|
||||||
list_arena,
|
list_arena,
|
||||||
values: ParallelValidatorTreeHash::new::<E>(validators),
|
values: ParallelValidatorTreeHash::new(validators),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -468,7 +468,7 @@ impl ParallelValidatorTreeHash {
|
|||||||
///
|
///
|
||||||
/// Allocates the necessary memory to store all of the cached Merkle trees but does perform any
|
/// Allocates the necessary memory to store all of the cached Merkle trees but does perform any
|
||||||
/// hashing.
|
/// hashing.
|
||||||
fn new<E: EthSpec>(validators: &[Validator]) -> Self {
|
fn new(validators: &[Validator]) -> Self {
|
||||||
let num_arenas = std::cmp::max(
|
let num_arenas = std::cmp::max(
|
||||||
1,
|
1,
|
||||||
(validators.len() + VALIDATORS_PER_ARENA - 1) / VALIDATORS_PER_ARENA,
|
(validators.len() + VALIDATORS_PER_ARENA - 1) / VALIDATORS_PER_ARENA,
|
||||||
|
@ -45,43 +45,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize)]
|
|
||||||
pub struct ExecutionOptimisticForkVersionedResponse<T> {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub version: Option<ForkName>,
|
|
||||||
pub execution_optimistic: Option<bool>,
|
|
||||||
pub data: T,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'de, F> serde::Deserialize<'de> for ExecutionOptimisticForkVersionedResponse<F>
|
|
||||||
where
|
|
||||||
F: ForkVersionDeserialize,
|
|
||||||
{
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: Deserializer<'de>,
|
|
||||||
{
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
struct Helper {
|
|
||||||
version: Option<ForkName>,
|
|
||||||
execution_optimistic: Option<bool>,
|
|
||||||
data: serde_json::Value,
|
|
||||||
}
|
|
||||||
|
|
||||||
let helper = Helper::deserialize(deserializer)?;
|
|
||||||
let data = match helper.version {
|
|
||||||
Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?,
|
|
||||||
None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(ExecutionOptimisticForkVersionedResponse {
|
|
||||||
version: helper.version,
|
|
||||||
execution_optimistic: helper.execution_optimistic,
|
|
||||||
data,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait ForkVersionDeserialize: Sized + DeserializeOwned {
|
pub trait ForkVersionDeserialize: Sized + DeserializeOwned {
|
||||||
fn deserialize_by_fork<'de, D: Deserializer<'de>>(
|
fn deserialize_by_fork<'de, D: Deserializer<'de>>(
|
||||||
value: Value,
|
value: Value,
|
||||||
|
@ -148,9 +148,7 @@ pub use crate::fork::Fork;
|
|||||||
pub use crate::fork_context::ForkContext;
|
pub use crate::fork_context::ForkContext;
|
||||||
pub use crate::fork_data::ForkData;
|
pub use crate::fork_data::ForkData;
|
||||||
pub use crate::fork_name::{ForkName, InconsistentFork};
|
pub use crate::fork_name::{ForkName, InconsistentFork};
|
||||||
pub use crate::fork_versioned_response::{
|
pub use crate::fork_versioned_response::{ForkVersionDeserialize, ForkVersionedResponse};
|
||||||
ExecutionOptimisticForkVersionedResponse, ForkVersionDeserialize, ForkVersionedResponse,
|
|
||||||
};
|
|
||||||
pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN};
|
pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN};
|
||||||
pub use crate::historical_batch::HistoricalBatch;
|
pub use crate::historical_batch::HistoricalBatch;
|
||||||
pub use crate::indexed_attestation::IndexedAttestation;
|
pub use crate::indexed_attestation::IndexedAttestation;
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lcli"
|
name = "lcli"
|
||||||
description = "Lighthouse CLI (modeled after zcli)"
|
description = "Lighthouse CLI (modeled after zcli)"
|
||||||
version = "4.0.1"
|
version = "4.1.0"
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
# `lcli` requires the full project to be in scope, so this should be built either:
|
# `lcli` requires the full project to be in scope, so this should be built either:
|
||||||
# - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .`
|
# - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .`
|
||||||
# - from the current directory with the command: `docker build -f ./Dockerfile ../`
|
# - from the current directory with the command: `docker build -f ./Dockerfile ../`
|
||||||
FROM rust:1.66.0-bullseye AS builder
|
FROM rust:1.68.2-bullseye AS builder
|
||||||
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake clang libclang-dev protobuf-compiler
|
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake clang libclang-dev protobuf-compiler
|
||||||
COPY . lighthouse
|
COPY . lighthouse
|
||||||
ARG PORTABLE
|
ARG PORTABLE
|
||||||
|
@ -2,9 +2,8 @@ use clap::ArgMatches;
|
|||||||
use clap_utils::{parse_required, parse_ssz_required};
|
use clap_utils::{parse_required, parse_ssz_required};
|
||||||
use deposit_contract::{decode_eth1_tx_data, DEPOSIT_DATA_LEN};
|
use deposit_contract::{decode_eth1_tx_data, DEPOSIT_DATA_LEN};
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::EthSpec;
|
|
||||||
|
|
||||||
pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
|
pub fn run(matches: &ArgMatches) -> Result<(), String> {
|
||||||
let rlp_bytes = parse_ssz_required::<Vec<u8>>(matches, "deposit-data")?;
|
let rlp_bytes = parse_ssz_required::<Vec<u8>>(matches, "deposit-data")?;
|
||||||
let amount = parse_required(matches, "deposit-amount")?;
|
let amount = parse_required(matches, "deposit-amount")?;
|
||||||
|
|
||||||
|
@ -874,7 +874,7 @@ fn run<T: EthSpec>(
|
|||||||
}
|
}
|
||||||
("new-testnet", Some(matches)) => new_testnet::run::<T>(testnet_dir, matches)
|
("new-testnet", Some(matches)) => new_testnet::run::<T>(testnet_dir, matches)
|
||||||
.map_err(|e| format!("Failed to run new_testnet command: {}", e)),
|
.map_err(|e| format!("Failed to run new_testnet command: {}", e)),
|
||||||
("check-deposit-data", Some(matches)) => check_deposit_data::run::<T>(matches)
|
("check-deposit-data", Some(matches)) => check_deposit_data::run(matches)
|
||||||
.map_err(|e| format!("Failed to run check-deposit-data command: {}", e)),
|
.map_err(|e| format!("Failed to run check-deposit-data command: {}", e)),
|
||||||
("generate-bootnode-enr", Some(matches)) => generate_bootnode_enr::run::<T>(matches)
|
("generate-bootnode-enr", Some(matches)) => generate_bootnode_enr::run::<T>(matches)
|
||||||
.map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)),
|
.map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)),
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lighthouse"
|
name = "lighthouse"
|
||||||
version = "4.0.1"
|
version = "4.1.0"
|
||||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
autotests = false
|
autotests = false
|
||||||
rust-version = "1.66"
|
rust-version = "1.68.2"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["slasher-mdbx"]
|
default = ["slasher-mdbx"]
|
||||||
|
@ -2,6 +2,7 @@ use beacon_node::ClientConfig as Config;
|
|||||||
|
|
||||||
use crate::exec::{CommandLineTestExec, CompletedTest};
|
use crate::exec::{CommandLineTestExec, CompletedTest};
|
||||||
use beacon_node::beacon_chain::chain_config::{
|
use beacon_node::beacon_chain::chain_config::{
|
||||||
|
DisallowedReOrgOffsets, DEFAULT_RE_ORG_CUTOFF_DENOMINATOR,
|
||||||
DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD,
|
DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD,
|
||||||
};
|
};
|
||||||
use eth1::Eth1Endpoint;
|
use eth1::Eth1Endpoint;
|
||||||
@ -715,6 +716,40 @@ fn builder_fallback_flags() {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn builder_user_agent() {
|
||||||
|
run_payload_builder_flag_test_with_config(
|
||||||
|
"builder",
|
||||||
|
"http://meow.cats",
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
|config| {
|
||||||
|
assert_eq!(
|
||||||
|
config.execution_layer.as_ref().unwrap().builder_user_agent,
|
||||||
|
None
|
||||||
|
);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
run_payload_builder_flag_test_with_config(
|
||||||
|
"builder",
|
||||||
|
"http://meow.cats",
|
||||||
|
Some("builder-user-agent"),
|
||||||
|
Some("anon"),
|
||||||
|
|config| {
|
||||||
|
assert_eq!(
|
||||||
|
config
|
||||||
|
.execution_layer
|
||||||
|
.as_ref()
|
||||||
|
.unwrap()
|
||||||
|
.builder_user_agent
|
||||||
|
.as_ref()
|
||||||
|
.unwrap(),
|
||||||
|
"anon"
|
||||||
|
);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) {
|
fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) {
|
||||||
use sensitive_url::SensitiveUrl;
|
use sensitive_url::SensitiveUrl;
|
||||||
|
|
||||||
@ -1045,6 +1080,13 @@ fn disable_discovery_flag() {
|
|||||||
.with_config(|config| assert!(config.network.disable_discovery));
|
.with_config(|config| assert!(config.network.disable_discovery));
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
|
fn disable_peer_scoring_flag() {
|
||||||
|
CommandLineTest::new()
|
||||||
|
.flag("disable-peer-scoring", None)
|
||||||
|
.run_with_zero_port()
|
||||||
|
.with_config(|config| assert!(config.network.disable_peer_scoring));
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
fn disable_upnp_flag() {
|
fn disable_upnp_flag() {
|
||||||
CommandLineTest::new()
|
CommandLineTest::new()
|
||||||
.flag("disable-upnp", None)
|
.flag("disable-upnp", None)
|
||||||
@ -1920,6 +1962,10 @@ fn enable_proposer_re_orgs_default() {
|
|||||||
config.chain.re_org_max_epochs_since_finalization,
|
config.chain.re_org_max_epochs_since_finalization,
|
||||||
DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION,
|
DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION,
|
||||||
);
|
);
|
||||||
|
assert_eq!(
|
||||||
|
config.chain.re_org_cutoff(12),
|
||||||
|
Duration::from_secs(12) / DEFAULT_RE_ORG_CUTOFF_DENOMINATOR
|
||||||
|
);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1952,6 +1998,49 @@ fn proposer_re_org_max_epochs_since_finalization() {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proposer_re_org_cutoff() {
|
||||||
|
CommandLineTest::new()
|
||||||
|
.flag("proposer-reorg-cutoff", Some("500"))
|
||||||
|
.run_with_zero_port()
|
||||||
|
.with_config(|config| {
|
||||||
|
assert_eq!(config.chain.re_org_cutoff(12), Duration::from_millis(500))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proposer_re_org_disallowed_offsets_default() {
|
||||||
|
CommandLineTest::new()
|
||||||
|
.run_with_zero_port()
|
||||||
|
.with_config(|config| {
|
||||||
|
assert_eq!(
|
||||||
|
config.chain.re_org_disallowed_offsets,
|
||||||
|
DisallowedReOrgOffsets::new::<MainnetEthSpec>(vec![0]).unwrap()
|
||||||
|
)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proposer_re_org_disallowed_offsets_override() {
|
||||||
|
CommandLineTest::new()
|
||||||
|
.flag("--proposer-reorg-disallowed-offsets", Some("1,2,3"))
|
||||||
|
.run_with_zero_port()
|
||||||
|
.with_config(|config| {
|
||||||
|
assert_eq!(
|
||||||
|
config.chain.re_org_disallowed_offsets,
|
||||||
|
DisallowedReOrgOffsets::new::<MainnetEthSpec>(vec![1, 2, 3]).unwrap()
|
||||||
|
)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic]
|
||||||
|
fn proposer_re_org_disallowed_offsets_invalid() {
|
||||||
|
CommandLineTest::new()
|
||||||
|
.flag("--proposer-reorg-disallowed-offsets", Some("32,33,34"))
|
||||||
|
.run_with_zero_port();
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn monitoring_endpoint() {
|
fn monitoring_endpoint() {
|
||||||
CommandLineTest::new()
|
CommandLineTest::new()
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
FROM rust:1.66.1-bullseye AS builder
|
FROM rust:1.68.2-bullseye AS builder
|
||||||
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev clang protobuf-compiler
|
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake clang libclang-dev protobuf-compiler
|
||||||
COPY . lighthouse
|
COPY . lighthouse
|
||||||
|
|
||||||
# Build lighthouse directly with a cargo build command, bypassing the Makefile.
|
# Build lighthouse directly with a cargo build command, bypassing the Makefile.
|
||||||
|
@ -626,9 +626,10 @@ async fn check_payload_reconstruction<E: GenericExecutionEngine>(
|
|||||||
ee: &ExecutionPair<E, MainnetEthSpec>,
|
ee: &ExecutionPair<E, MainnetEthSpec>,
|
||||||
payload: &ExecutionPayload<MainnetEthSpec>,
|
payload: &ExecutionPayload<MainnetEthSpec>,
|
||||||
) {
|
) {
|
||||||
|
// check via legacy eth_getBlockByHash
|
||||||
let reconstructed = ee
|
let reconstructed = ee
|
||||||
.execution_layer
|
.execution_layer
|
||||||
.get_payload_by_block_hash(payload.block_hash(), payload.fork_name())
|
.get_payload_by_hash_legacy(payload.block_hash(), payload.fork_name())
|
||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
Loading…
Reference in New Issue
Block a user