Merge pull request #4719 from jimmygchen/deneb-merge-from-unstable-20230911

Deneb merge from unstable 20230911
This commit is contained in:
realbigsean 2023-09-12 17:41:42 -04:00 committed by GitHub
commit 8e7b57a794
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
73 changed files with 2363 additions and 1000 deletions

View File

@ -80,14 +80,6 @@ jobs:
- name: Get latest version of stable Rust
if: env.SELF_HOSTED_RUNNERS == false
run: rustup update stable
- name: Use Node.js
uses: actions/setup-node@v2
with:
node-version: '14'
- name: Install windows build tools
run: |
choco install python visualstudio2019-workload-vctools -y
npm config set msvs_version 2019
- name: Install Foundry (anvil)
uses: foundry-rs/foundry-toolchain@v1
with:

1241
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -92,7 +92,8 @@ resolver = "2"
[patch]
[patch.crates-io]
warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" }
# TODO: remove when 0.3.6 get's released.
warp = { git = "https://github.com/seanmonstar/warp.git", rev="149913fe" }
[profile.maxperf]
inherits = "release"

View File

@ -214,9 +214,8 @@ arbitrary-fuzz:
# Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database)
audit:
# cargo install --force cargo-audit
cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2022-0093 \
--ignore RUSTSEC-2023-0052 --ignore RUSTSEC-2023-0053
cargo install --force cargo-audit
cargo audit --ignore RUSTSEC-2023-0052
# Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose.
vendor:

View File

@ -1,7 +1,10 @@
[package]
name = "account_manager"
version = "0.3.5"
authors = ["Paul Hauner <paul@paulhauner.com>", "Luke Anderson <luke@sigmaprime.io>"]
authors = [
"Paul Hauner <paul@paulhauner.com>",
"Luke Anderson <luke@sigmaprime.io>",
]
edition = "2021"
[dependencies]
@ -19,13 +22,14 @@ tokio = { version = "1.14.0", features = ["full"] }
eth2_keystore = { path = "../crypto/eth2_keystore" }
account_utils = { path = "../common/account_utils" }
slashing_protection = { path = "../validator_client/slashing_protection" }
eth2 = {path = "../common/eth2"}
safe_arith = {path = "../consensus/safe_arith"}
eth2 = { path = "../common/eth2" }
safe_arith = { path = "../consensus/safe_arith" }
slot_clock = { path = "../common/slot_clock" }
filesystem = { path = "../common/filesystem" }
sensitive_url = { path = "../common/sensitive_url" }
serde = { version = "1.0.116", features = ["derive"] }
serde_json = "1.0.58"
slog = { version = "2.5.2" }
[dev-dependencies]
tempfile = "3.1.0"

View File

@ -10,6 +10,7 @@ use eth2_keystore::Keystore;
use eth2_network_config::Eth2NetworkConfig;
use safe_arith::SafeArith;
use sensitive_url::SensitiveUrl;
use slog::Logger;
use slot_clock::{SlotClock, SystemTimeSlotClock};
use std::path::{Path, PathBuf};
use std::time::Duration;
@ -78,6 +79,12 @@ pub fn cli_run<E: EthSpec>(matches: &ArgMatches, env: Environment<E>) -> Result<
let password_file_path: Option<PathBuf> =
clap_utils::parse_optional(matches, PASSWORD_FILE_FLAG)?;
let genesis_state_url: Option<String> =
clap_utils::parse_optional(matches, "genesis-state-url")?;
let genesis_state_url_timeout =
clap_utils::parse_required(matches, "genesis-state-url-timeout")
.map(Duration::from_secs)?;
let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG);
let no_wait = matches.is_present(NO_WAIT);
let no_confirmation = matches.is_present(NO_CONFIRMATION);
@ -104,6 +111,9 @@ pub fn cli_run<E: EthSpec>(matches: &ArgMatches, env: Environment<E>) -> Result<
&eth2_network_config,
no_wait,
no_confirmation,
genesis_state_url,
genesis_state_url_timeout,
env.core_context().log(),
))?;
Ok(())
@ -120,13 +130,14 @@ async fn publish_voluntary_exit<E: EthSpec>(
eth2_network_config: &Eth2NetworkConfig,
no_wait: bool,
no_confirmation: bool,
genesis_state_url: Option<String>,
genesis_state_url_timeout: Duration,
log: &Logger,
) -> Result<(), String> {
let genesis_data = get_geneisis_data(client).await?;
let testnet_genesis_root = eth2_network_config
.beacon_state::<E>()
.as_ref()
.expect("network should have valid genesis state")
.genesis_validators_root();
.genesis_validators_root::<E>(genesis_state_url.as_deref(), genesis_state_url_timeout, log)?
.ok_or("Genesis state is unknown")?;
// Verify that the beacon node and validator being exited are on the same network.
if genesis_data.genesis_validators_root != testnet_genesis_root {

View File

@ -7,7 +7,8 @@ use slashing_protection::{
use std::fs::File;
use std::path::PathBuf;
use std::str::FromStr;
use types::{BeaconState, Epoch, EthSpec, PublicKeyBytes, Slot};
use std::time::Duration;
use types::{Epoch, EthSpec, PublicKeyBytes, Slot};
pub const CMD: &str = "slashing-protection";
pub const IMPORT_CMD: &str = "import";
@ -82,19 +83,24 @@ pub fn cli_run<T: EthSpec>(
) -> Result<(), String> {
let slashing_protection_db_path = validator_base_dir.join(SLASHING_PROTECTION_FILENAME);
let genesis_state_url: Option<String> =
clap_utils::parse_optional(matches, "genesis-state-url")?;
let genesis_state_url_timeout =
clap_utils::parse_required(matches, "genesis-state-url-timeout")
.map(Duration::from_secs)?;
let context = env.core_context();
let eth2_network_config = env
.eth2_network_config
.ok_or("Unable to get testnet configuration from the environment")?;
let genesis_validators_root = eth2_network_config
.beacon_state::<T>()
.map(|state: BeaconState<T>| state.genesis_validators_root())
.map_err(|e| {
format!(
"Unable to get genesis state, has genesis occurred? Detail: {:?}",
e
)
})?;
.genesis_validators_root::<T>(
genesis_state_url.as_deref(),
genesis_state_url_timeout,
context.log(),
)?
.ok_or_else(|| "Unable to get genesis state, has genesis occurred?".to_string())?;
match matches.subcommand() {
(IMPORT_CMD, Some(matches)) => {

View File

@ -1,7 +1,10 @@
[package]
name = "beacon_node"
version = "4.3.0"
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
version = "4.4.1"
authors = [
"Paul Hauner <paul@paulhauner.com>",
"Age Manning <Age@AgeManning.com",
]
edition = "2021"
[lib]
@ -12,7 +15,9 @@ path = "src/lib.rs"
node_test_rig = { path = "../testing/node_test_rig" }
[features]
write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing.
write_ssz_files = [
"beacon_chain/write_ssz_files",
] # Writes debugging .ssz files to /tmp during block processing.
[dependencies]
eth2_config = { path = "../common/eth2_config" }
@ -21,9 +26,12 @@ types = { path = "../consensus/types" }
store = { path = "./store" }
client = { path = "client" }
clap = "2.33.3"
slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] }
slog = { version = "2.5.2", features = [
"max_level_trace",
"release_max_level_trace",
] }
dirs = "3.0.1"
directory = {path = "../common/directory"}
directory = { path = "../common/directory" }
futures = "0.3.7"
environment = { path = "../lighthouse/environment" }
task_executor = { path = "../common/task_executor" }
@ -42,4 +50,4 @@ monitoring_api = { path = "../common/monitoring_api" }
sensitive_url = { path = "../common/sensitive_url" }
http_api = { path = "http_api" }
unused_port = { path = "../common/unused_port" }
strum = "0.24.1"
strum = "0.24.1"

View File

@ -485,6 +485,21 @@ where
let (_, updated_builder) = self.set_genesis_state(genesis_state)?;
self = updated_builder;
// Fill in the linear block roots between the checkpoint block's slot and the aligned
// state's slot. All slots less than the block's slot will be handled by block backfill,
// while states greater or equal to the checkpoint state will be handled by `migrate_db`.
let block_root_batch = store
.store_frozen_block_root_at_skip_slots(
weak_subj_block.slot(),
weak_subj_state.slot(),
weak_subj_block_root,
)
.map_err(|e| format!("Error writing frozen block roots: {e:?}"))?;
store
.cold_db
.do_atomically(block_root_batch)
.map_err(|e| format!("Error writing frozen block roots: {e:?}"))?;
// Write the state and block non-atomically, it doesn't matter if they're forgotten
// about on a crash restart.
store

View File

@ -433,7 +433,7 @@ async fn forwards_iter_block_and_state_roots_until() {
// The last restore point slot is the point at which the hybrid forwards iterator behaviour
// changes.
let last_restore_point_slot = store.get_latest_restore_point_slot();
let last_restore_point_slot = store.get_latest_restore_point_slot().unwrap();
assert!(last_restore_point_slot > 0);
let chain = &harness.chain;

View File

@ -157,6 +157,7 @@ where
let runtime_context =
runtime_context.ok_or("beacon_chain_start_method requires a runtime context")?;
let context = runtime_context.service_context("beacon".into());
let log = context.log();
let spec = chain_spec.ok_or("beacon_chain_start_method requires a chain spec")?;
let event_handler = if self.http_api_config.enabled {
Some(ServerSentEventHandler::new(
@ -167,7 +168,7 @@ where
None
};
let execution_layer = if let Some(config) = config.execution_layer {
let execution_layer = if let Some(config) = config.execution_layer.clone() {
let context = runtime_context.service_context("exec".into());
let execution_layer = ExecutionLayer::from_config(
config,
@ -208,12 +209,6 @@ where
builder
};
let builder = if let Some(trusted_setup) = config.trusted_setup {
builder.trusted_setup(trusted_setup)
} else {
builder
};
let chain_exists = builder.store_contains_beacon_chain().unwrap_or(false);
// If the client is expect to resume but there's no beacon chain in the database,
@ -258,23 +253,19 @@ where
)?;
builder.genesis_state(genesis_state).map(|v| (v, None))?
}
ClientGenesis::SszBytes {
genesis_state_bytes,
} => {
ClientGenesis::GenesisState => {
info!(
context.log(),
"Starting from known genesis state";
);
let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec)
.map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?;
let genesis_state = genesis_state(&runtime_context, &config, log)?;
builder.genesis_state(genesis_state).map(|v| (v, None))?
}
ClientGenesis::WeakSubjSszBytes {
anchor_state_bytes,
anchor_block_bytes,
genesis_state_bytes,
} => {
info!(context.log(), "Starting checkpoint sync");
if config.chain.genesis_backfill {
@ -288,17 +279,13 @@ where
.map_err(|e| format!("Unable to parse weak subj state SSZ: {:?}", e))?;
let anchor_block = SignedBeaconBlock::from_ssz_bytes(&anchor_block_bytes, &spec)
.map_err(|e| format!("Unable to parse weak subj block SSZ: {:?}", e))?;
let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec)
.map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?;
let genesis_state = genesis_state(&runtime_context, &config, log)?;
builder
.weak_subjectivity_state(anchor_state, anchor_block, genesis_state)
.map(|v| (v, None))?
}
ClientGenesis::CheckpointSyncUrl {
genesis_state_bytes,
url,
} => {
ClientGenesis::CheckpointSyncUrl { url } => {
info!(
context.log(),
"Starting checkpoint sync";
@ -393,8 +380,7 @@ where
debug!(context.log(), "Downloaded finalized block");
let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec)
.map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?;
let genesis_state = genesis_state(&runtime_context, &config, log)?;
info!(
context.log(),
@ -525,6 +511,12 @@ where
ClientGenesis::FromStore => builder.resume_from_db().map(|v| (v, None))?,
};
let beacon_chain_builder = if let Some(trusted_setup) = config.trusted_setup {
beacon_chain_builder.trusted_setup(trusted_setup)
} else {
beacon_chain_builder
};
if config.sync_eth1_chain {
self.eth1_service = eth1_service_option;
}
@ -1105,3 +1097,22 @@ where
Ok(self)
}
}
/// Obtain the genesis state from the `eth2_network_config` in `context`.
fn genesis_state<T: EthSpec>(
context: &RuntimeContext<T>,
config: &ClientConfig,
log: &Logger,
) -> Result<BeaconState<T>, String> {
let eth2_network_config = context
.eth2_network_config
.as_ref()
.ok_or("An eth2_network_config is required to obtain the genesis state")?;
eth2_network_config
.genesis_state::<T>(
config.genesis_state_url.as_deref(),
config.genesis_state_url_timeout,
log,
)?
.ok_or_else(|| "Genesis state is unknown".to_string())
}

View File

@ -8,6 +8,7 @@ use sensitive_url::SensitiveUrl;
use serde_derive::{Deserialize, Serialize};
use std::fs;
use std::path::PathBuf;
use std::time::Duration;
use types::{Graffiti, PublicKeyBytes};
/// Default directory name for the freezer database under the top-level data dir.
const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db";
@ -26,18 +27,13 @@ pub enum ClientGenesis {
/// contract.
#[default]
DepositContract,
/// Loads the genesis state from SSZ-encoded `BeaconState` bytes.
///
/// We include the bytes instead of the `BeaconState<E>` because the `EthSpec` type
/// parameter would be very annoying.
SszBytes { genesis_state_bytes: Vec<u8> },
/// Loads the genesis state from the genesis state in the `Eth2NetworkConfig`.
GenesisState,
WeakSubjSszBytes {
genesis_state_bytes: Vec<u8>,
anchor_state_bytes: Vec<u8>,
anchor_block_bytes: Vec<u8>,
},
CheckpointSyncUrl {
genesis_state_bytes: Vec<u8>,
url: SensitiveUrl,
},
}
@ -85,6 +81,8 @@ pub struct Config {
pub slasher: Option<slasher::Config>,
pub logger_config: LoggerConfig,
pub beacon_processor: BeaconProcessorConfig,
pub genesis_state_url: Option<String>,
pub genesis_state_url_timeout: Duration,
}
impl Default for Config {
@ -114,6 +112,9 @@ impl Default for Config {
validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD,
logger_config: LoggerConfig::default(),
beacon_processor: <_>::default(),
genesis_state_url: <_>::default(),
// This default value should always be overwritten by the CLI default value.
genesis_state_url_timeout: Duration::from_secs(60),
}
}
}

View File

@ -52,4 +52,4 @@ keccak-hash = "0.10.0"
hash256-std-hasher = "0.15.2"
triehash = "0.8.4"
hash-db = "0.15.2"
pretty_reqwest_error = { path = "../../common/pretty_reqwest_error" }
pretty_reqwest_error = { path = "../../common/pretty_reqwest_error" }

View File

@ -39,7 +39,8 @@ use bytes::Bytes;
use directory::DEFAULT_ROOT_DIR;
use eth2::types::{
self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode,
SignedBlockContents, SkipRandaoVerification, ValidatorId, ValidatorStatus,
SignedBlindedBlockContents, SignedBlockContents, SkipRandaoVerification, ValidatorId,
ValidatorStatus,
};
use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
use lighthouse_version::version_with_platform;
@ -139,6 +140,8 @@ pub struct Config {
pub data_dir: PathBuf,
pub sse_capacity_multiplier: usize,
pub enable_beacon_processor: bool,
#[serde(with = "eth2::types::serde_status_code")]
pub duplicate_block_status_code: StatusCode,
}
impl Default for Config {
@ -154,6 +157,7 @@ impl Default for Config {
data_dir: PathBuf::from(DEFAULT_ROOT_DIR),
sse_capacity_multiplier: 1,
enable_beacon_processor: true,
duplicate_block_status_code: StatusCode::ACCEPTED,
}
}
}
@ -510,6 +514,8 @@ pub fn serve<T: BeaconChainTypes>(
let task_spawner_filter =
warp::any().map(move || TaskSpawner::new(beacon_processor_send.clone()));
let duplicate_block_status_code = ctx.config.duplicate_block_status_code;
/*
*
* Start of HTTP method definitions.
@ -1284,11 +1290,11 @@ pub fn serve<T: BeaconChainTypes>(
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|block_contents: SignedBlockContents<T::EthSpec>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
move |block_contents: SignedBlockContents<T::EthSpec>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
publish_blocks::publish_block(
None,
@ -1297,9 +1303,9 @@ pub fn serve<T: BeaconChainTypes>(
&network_tx,
log,
BroadcastValidation::default(),
duplicate_block_status_code,
)
.await
.map(|()| warp::reply().into_response())
})
},
);
@ -1314,11 +1320,11 @@ pub fn serve<T: BeaconChainTypes>(
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|block_bytes: Bytes,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
move |block_bytes: Bytes,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
let block_contents = SignedBlockContents::<T::EthSpec>::from_ssz_bytes(
&block_bytes,
@ -1334,9 +1340,9 @@ pub fn serve<T: BeaconChainTypes>(
&network_tx,
log,
BroadcastValidation::default(),
duplicate_block_status_code,
)
.await
.map(|()| warp::reply().into_response())
})
},
);
@ -1352,12 +1358,12 @@ pub fn serve<T: BeaconChainTypes>(
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|validation_level: api_types::BroadcastValidationQuery,
block_contents: SignedBlockContents<T::EthSpec>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
move |validation_level: api_types::BroadcastValidationQuery,
block_contents: SignedBlockContents<T::EthSpec>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
publish_blocks::publish_block(
None,
@ -1366,9 +1372,9 @@ pub fn serve<T: BeaconChainTypes>(
&network_tx,
log,
validation_level.broadcast_validation,
duplicate_block_status_code,
)
.await
.map(|()| warp::reply().into_response())
})
},
);
@ -1384,12 +1390,12 @@ pub fn serve<T: BeaconChainTypes>(
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|validation_level: api_types::BroadcastValidationQuery,
block_bytes: Bytes,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
move |validation_level: api_types::BroadcastValidationQuery,
block_bytes: Bytes,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
let block_contents = SignedBlockContents::<T::EthSpec>::from_ssz_bytes(
&block_bytes,
@ -1405,9 +1411,9 @@ pub fn serve<T: BeaconChainTypes>(
&network_tx,
log,
validation_level.broadcast_validation,
duplicate_block_status_code,
)
.await
.map(|()| warp::reply().into_response())
})
},
);
@ -1427,11 +1433,11 @@ pub fn serve<T: BeaconChainTypes>(
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|block_contents: SignedBlockContents<T::EthSpec, BlindedPayload<_>>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
move |block_contents: SignedBlindedBlockContents<T::EthSpec>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
publish_blocks::publish_blinded_block(
block_contents,
@ -1439,9 +1445,9 @@ pub fn serve<T: BeaconChainTypes>(
&network_tx,
log,
BroadcastValidation::default(),
duplicate_block_status_code,
)
.await
.map(|()| warp::reply().into_response())
})
},
);
@ -1457,11 +1463,11 @@ pub fn serve<T: BeaconChainTypes>(
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|block_bytes: Bytes,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
move |block_bytes: Bytes,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
let block =
SignedBlockContents::<T::EthSpec, BlindedPayload<_>>::from_ssz_bytes(
@ -1477,9 +1483,9 @@ pub fn serve<T: BeaconChainTypes>(
&network_tx,
log,
BroadcastValidation::default(),
duplicate_block_status_code,
)
.await
.map(|()| warp::reply().into_response())
})
},
);
@ -1495,32 +1501,22 @@ pub fn serve<T: BeaconChainTypes>(
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|validation_level: api_types::BroadcastValidationQuery,
block_contents: SignedBlockContents<T::EthSpec, BlindedPayload<_>>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
task_spawner.spawn_async(Priority::P0, async move {
match publish_blocks::publish_blinded_block(
move |validation_level: api_types::BroadcastValidationQuery,
block_contents: SignedBlindedBlockContents<T::EthSpec>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
publish_blocks::publish_blinded_block(
block_contents,
chain,
&network_tx,
log,
validation_level.broadcast_validation,
duplicate_block_status_code,
)
.await
{
Ok(()) => warp::reply().into_response(),
Err(e) => match warp_utils::reject::handle_rejection(e).await {
Ok(reply) => reply.into_response(),
Err(_) => warp::reply::with_status(
StatusCode::INTERNAL_SERVER_ERROR,
eth2::StatusCode::INTERNAL_SERVER_ERROR,
)
.into_response(),
},
}
})
},
);
@ -1531,48 +1527,36 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::query::<api_types::BroadcastValidationQuery>())
.and(warp::path::end())
.and(warp::body::bytes())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|validation_level: api_types::BroadcastValidationQuery,
block_bytes: Bytes,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move {
let block =
match SignedBlockContents::<T::EthSpec, BlindedPayload<_>>::from_ssz_bytes(
&block_bytes,
&chain.spec,
) {
Ok(data) => data,
Err(_) => {
return warp::reply::with_status(
StatusCode::BAD_REQUEST,
eth2::StatusCode::BAD_REQUEST,
)
.into_response();
}
};
match publish_blocks::publish_blinded_block(
block,
chain,
&network_tx,
log,
validation_level.broadcast_validation,
)
.await
{
Ok(()) => warp::reply().into_response(),
Err(e) => match warp_utils::reject::handle_rejection(e).await {
Ok(reply) => reply.into_response(),
Err(_) => warp::reply::with_status(
StatusCode::INTERNAL_SERVER_ERROR,
eth2::StatusCode::INTERNAL_SERVER_ERROR,
move |validation_level: api_types::BroadcastValidationQuery,
block_bytes: Bytes,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
let block =
SignedBlockContents::<T::EthSpec, BlindedPayload<_>>::from_ssz_bytes(
&block_bytes,
&chain.spec,
)
.into_response(),
},
}
.map_err(|e| {
warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}"))
})?;
publish_blocks::publish_blinded_block(
block,
chain,
&network_tx,
log,
validation_level.broadcast_validation,
duplicate_block_status_code,
)
.await
})
},
);

View File

@ -1,12 +1,12 @@
use crate::metrics;
use beacon_chain::block_verification_types::AsBlock;
use beacon_chain::block_verification_types::{AsBlock, BlockContentsError};
use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
use beacon_chain::{
AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError,
IntoGossipVerifiedBlockContents, NotifyExecutionLayer,
};
use eth2::types::BroadcastValidation;
use eth2::types::{BroadcastValidation, ErrorMessage};
use eth2::types::{FullPayloadContents, SignedBlockContents};
use execution_layer::ProvenancedPayload;
use lighthouse_network::PubsubMessage;
@ -22,7 +22,8 @@ use types::{
AbstractExecPayload, BeaconBlockRef, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash,
ForkName, FullPayload, FullPayloadMerge, Hash256, SignedBeaconBlock, SignedBlobSidecarList,
};
use warp::Rejection;
use warp::http::StatusCode;
use warp::{reply::Response, Rejection, Reply};
pub enum ProvenancedBlock<T: BeaconChainTypes, B: IntoGossipVerifiedBlockContents<T>> {
/// The payload was built using a local EE.
@ -50,7 +51,8 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlockConten
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger,
validation_level: BroadcastValidation,
) -> Result<(), Rejection> {
duplicate_status_code: StatusCode,
) -> Result<Response, Rejection> {
let seen_timestamp = timestamp_now();
let (block_contents, is_locally_built_block) = match provenanced_block {
@ -114,12 +116,31 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlockConten
let blobs_opt = block_contents.inner_blobs();
/* if we can form a `GossipVerifiedBlock`, we've passed our basic gossip checks */
let (gossip_verified_block, gossip_verified_blobs) = block_contents
.into_gossip_verified_block(&chain)
.map_err(|e| {
warn!(log, "Not publishing block, not gossip verified"; "slot" => slot, "error" => ?e);
warp_utils::reject::custom_bad_request(e.to_string())
})?;
let (gossip_verified_block, gossip_verified_blobs) =
match block_contents.into_gossip_verified_block(&chain) {
Ok(b) => b,
Err(BlockContentsError::BlockError(BlockError::BlockIsAlreadyKnown)) => {
// Allow the status code for duplicate blocks to be overridden based on config.
return Ok(warp::reply::with_status(
warp::reply::json(&ErrorMessage {
code: duplicate_status_code.as_u16(),
message: "duplicate block".to_string(),
stacktraces: vec![],
}),
duplicate_status_code,
)
.into_response());
}
Err(e) => {
warn!(
log,
"Not publishing block - not gossip verified";
"slot" => slot,
"error" => ?e
);
return Err(warp_utils::reject::custom_bad_request(e.to_string()));
}
};
// Clone here, so we can take advantage of the `Arc`. The block in `BlockContents` is not,
// `Arc`'d but blobs are.
@ -222,8 +243,7 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlockConten
if is_locally_built_block {
late_block_logging(&chain, seen_timestamp, block.message(), root, "local", &log)
}
Ok(())
Ok(warp::reply().into_response())
}
Ok(AvailabilityProcessingStatus::MissingComponents(_, block_root)) => {
let msg = format!("Missing parts of block with root {:?}", block_root);
@ -246,10 +266,6 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlockConten
Err(BlockError::Slashable) => Err(warp_utils::reject::custom_bad_request(
"proposal for this slot and proposer has already been seen".to_string(),
)),
Err(BlockError::BlockIsAlreadyKnown) => {
info!(log, "Block from HTTP API already known"; "block" => ?block_root);
Ok(())
}
Err(e) => {
if let BroadcastValidation::Gossip = validation_level {
Err(warp_utils::reject::broadcast_without_import(format!("{e}")))
@ -276,7 +292,8 @@ pub async fn publish_blinded_block<T: BeaconChainTypes>(
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger,
validation_level: BroadcastValidation,
) -> Result<(), Rejection> {
duplicate_status_code: StatusCode,
) -> Result<Response, Rejection> {
let block_root = block_contents.signed_block().canonical_root();
let full_block: ProvenancedBlock<T, SignedBlockContents<T::EthSpec>> =
reconstruct_block(chain.clone(), block_root, block_contents, log.clone()).await?;
@ -287,6 +304,7 @@ pub async fn publish_blinded_block<T: BeaconChainTypes>(
network_tx,
log,
validation_level,
duplicate_status_code,
)
.await
}

View File

@ -89,9 +89,7 @@ impl StateId {
} else {
// This block is either old and finalized, or recent and unfinalized, so
// it's safe to fallback to the optimistic status of the finalized block.
chain
.canonical_head
.fork_choice_read_lock()
fork_choice
.is_optimistic_or_invalid_block(&hot_summary.latest_block_root)
.map_err(BeaconChainError::ForkChoiceError)
.map_err(warp_utils::reject::beacon_chain_error)?

View File

@ -159,46 +159,6 @@ impl<E: EthSpec> TaskSpawner<E> {
.and_then(|x| x)
}
}
/// Executes an async task which always returns a `Response`.
pub async fn spawn_async(
self,
priority: Priority,
func: impl Future<Output = Response> + Send + Sync + 'static,
) -> Response {
if let Some(beacon_processor_send) = &self.beacon_processor_send {
// Create a wrapper future that will execute `func` and send the
// result to a channel held by this thread.
let (tx, rx) = oneshot::channel();
let process_fn = async move {
// Await the future, collect the return value.
let func_result = func.await;
// Send the result down the channel. Ignore any failures; the
// send can only fail if the receiver is dropped.
let _ = tx.send(func_result);
};
// Send the function to the beacon processor for execution at some arbitrary time.
let result = send_to_beacon_processor(
beacon_processor_send,
priority,
BlockingOrAsync::Async(Box::pin(process_fn)),
rx,
)
.await;
convert_rejection(result).await
} else {
// There is no beacon processor so spawn a task directly on the
// tokio executor.
tokio::task::spawn(func).await.unwrap_or_else(|e| {
warp::reply::with_status(
warp::reply::json(&format!("Tokio did not execute task: {e:?}")),
eth2::StatusCode::INTERNAL_SERVER_ERROR,
)
.into_response()
})
}
}
}
/// Send a task to the beacon processor and await execution.

View File

@ -21,7 +21,7 @@ use network::{NetworkReceivers, NetworkSenders};
use sensitive_url::SensitiveUrl;
use slog::Logger;
use std::future::Future;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use store::MemoryStore;
@ -217,15 +217,9 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>(
let ctx = Arc::new(Context {
config: Config {
enabled: true,
listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
listen_port: port,
allow_origin: None,
tls_config: None,
allow_sync_stalled: false,
data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR),
spec_fork_name: None,
sse_capacity_multiplier: 1,
enable_beacon_processor: true,
..Config::default()
},
chain: Some(chain),
network_senders: Some(network_senders),

View File

@ -214,19 +214,19 @@ pub async fn gossip_full_pass_ssz() {
let slot_b = slot_a + 1;
let state_a = tester.harness.get_current_state();
let ((block, _), _): ((SignedBeaconBlock<E>, _), _) =
tester.harness.make_block(state_a, slot_b).await;
let (block_contents_tuple, _) = tester.harness.make_block(state_a, slot_b).await;
let block_contents = block_contents_tuple.into();
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blocks_v2_ssz(&block, validation_level)
.post_beacon_blocks_v2_ssz(&block_contents, validation_level)
.await;
assert!(response.is_ok());
assert!(tester
.harness
.chain
.block_is_known_to_fork_choice(&block.canonical_root()));
.block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root()));
}
/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`.
@ -378,13 +378,14 @@ pub async fn consensus_partial_pass_only_consensus() {
/* submit `block_b` which should induce equivocation */
let channel = tokio::sync::mpsc::unbounded_channel();
let publication_result: Result<(), Rejection> = publish_block(
let publication_result = publish_block(
None,
ProvenancedBlock::local(gossip_block_contents_b.unwrap()),
tester.harness.chain.clone(),
&channel.0,
test_logger,
validation_level.unwrap(),
StatusCode::ACCEPTED,
)
.await;
@ -669,13 +670,14 @@ pub async fn equivocation_consensus_late_equivocation() {
let channel = tokio::sync::mpsc::unbounded_channel();
let publication_result: Result<(), Rejection> = publish_block(
let publication_result = publish_block(
None,
ProvenancedBlock::local(gossip_block_contents_b.unwrap()),
tester.harness.chain,
&channel.0,
test_logger,
validation_level.unwrap(),
StatusCode::ACCEPTED,
)
.await;
@ -910,19 +912,19 @@ pub async fn blinded_gossip_full_pass_ssz() {
let slot_b = slot_a + 1;
let state_a = tester.harness.get_current_state();
let ((block, _), _): ((SignedBlindedBeaconBlock<E>, _), _) =
tester.harness.make_blinded_block(state_a, slot_b).await;
let (block_contents_tuple, _) = tester.harness.make_blinded_block(state_a, slot_b).await;
let block_contents = block_contents_tuple.into();
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blinded_blocks_v2_ssz(&block, validation_level)
.post_beacon_blinded_blocks_v2_ssz(&block_contents, validation_level)
.await;
assert!(response.is_ok());
assert!(tester
.harness
.chain
.block_is_known_to_fork_choice(&block.canonical_root()));
.block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root()));
}
/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`.
@ -1335,12 +1337,13 @@ pub async fn blinded_equivocation_consensus_late_equivocation() {
let channel = tokio::sync::mpsc::unbounded_channel();
let publication_result: Result<(), Rejection> = publish_blinded_block(
let publication_result = publish_blinded_block(
SignedBlockContents::new(block_b, blobs_b),
tester.harness.chain,
&channel.0,
test_logger,
validation_level.unwrap(),
StatusCode::ACCEPTED,
)
.await;

View File

@ -8,7 +8,7 @@ use eth2::{
mixin::{RequestAccept, ResponseForkName, ResponseOptional},
reqwest::RequestBuilder,
types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *},
BeaconNodeHttpClient, Error, Timeouts,
BeaconNodeHttpClient, Error, StatusCode, Timeouts,
};
use execution_layer::test_utils::TestingBuilder;
use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI;
@ -1330,6 +1330,71 @@ impl ApiTester {
self
}
pub async fn test_post_beacon_blocks_duplicate(self) -> Self {
let block_contents = self
.harness
.make_block(
self.harness.get_current_state(),
self.harness.get_current_slot(),
)
.await
.0
.into();
assert!(self
.client
.post_beacon_blocks(&block_contents)
.await
.is_ok());
let blinded_block_contents = block_contents.clone_as_blinded();
// Test all the POST methods in sequence, they should all behave the same.
let responses = vec![
self.client
.post_beacon_blocks(&block_contents)
.await
.unwrap_err(),
self.client
.post_beacon_blocks_v2(&block_contents, None)
.await
.unwrap_err(),
self.client
.post_beacon_blocks_ssz(&block_contents)
.await
.unwrap_err(),
self.client
.post_beacon_blocks_v2_ssz(&block_contents, None)
.await
.unwrap_err(),
self.client
.post_beacon_blinded_blocks(&blinded_block_contents)
.await
.unwrap_err(),
self.client
.post_beacon_blinded_blocks_v2(&blinded_block_contents, None)
.await
.unwrap_err(),
self.client
.post_beacon_blinded_blocks_ssz(&blinded_block_contents)
.await
.unwrap_err(),
self.client
.post_beacon_blinded_blocks_v2_ssz(&blinded_block_contents, None)
.await
.unwrap_err(),
];
for (i, response) in responses.into_iter().enumerate() {
assert_eq!(
response.status().unwrap(),
StatusCode::ACCEPTED,
"response {i}"
);
}
self
}
pub async fn test_beacon_blocks(self) -> Self {
for block_id in self.interesting_block_ids() {
let expected = block_id
@ -2591,13 +2656,10 @@ impl ApiTester {
.get_validator_blinded_blocks::<E, Payload>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.deconstruct()
.0;
.data;
let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
let signed_block_contents =
SignedBlockContents::<E, Payload>::Block(signed_block.clone());
block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
self.client
.post_beacon_blinded_blocks(&signed_block_contents)
@ -2605,6 +2667,7 @@ impl ApiTester {
.unwrap();
// This converts the generic `Payload` to a concrete type for comparison.
let signed_block = signed_block_contents.deconstruct().0;
let head_block = SignedBeaconBlock::from(signed_block.clone());
assert_eq!(head_block, signed_block);
@ -2650,23 +2713,23 @@ impl ApiTester {
sk.sign(message).into()
};
let block = self
let block_contents = self
.client
.get_validator_blinded_blocks::<E, Payload>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.deconstruct()
.0;
.data;
let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
let signed_block_contents =
block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
self.client
.post_beacon_blinded_blocks_ssz(&signed_block)
.post_beacon_blinded_blocks_ssz(&signed_block_contents)
.await
.unwrap();
// This converts the generic `Payload` to a concrete type for comparison.
let signed_block = signed_block_contents.deconstruct().0;
let head_block = SignedBeaconBlock::from(signed_block.clone());
assert_eq!(head_block, signed_block);
@ -4725,6 +4788,14 @@ async fn post_beacon_blocks_invalid() {
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn post_beacon_blocks_duplicate() {
ApiTester::new()
.await
.test_post_beacon_blocks_duplicate()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn beacon_pools_post_attestations_valid() {
ApiTester::new()

View File

@ -302,9 +302,16 @@ impl<T: BeaconChainTypes> AttestationService<T> {
/// Gets the long lived subnets the node should be subscribed to during the current epoch and
/// the remaining duration for which they remain valid.
fn recompute_long_lived_subnets_inner(&mut self) -> Result<Duration, ()> {
let current_epoch = self.beacon_chain.epoch().map_err(
|e| error!(self.log, "Failed to get the current epoch from clock"; "err" => ?e),
)?;
let current_epoch = self.beacon_chain.epoch().map_err(|e| {
if !self
.beacon_chain
.slot_clock
.is_prior_to_genesis()
.unwrap_or(false)
{
error!(self.log, "Failed to get the current epoch from clock"; "err" => ?e)
}
})?;
let (subnets, next_subscription_epoch) = SubnetId::compute_subnets_for_epoch::<T::EthSpec>(
self.node_id.raw().into(),

View File

@ -398,6 +398,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.help("Multiplier to apply to the length of HTTP server-sent-event (SSE) channels. \
Increasing this value can prevent messages from being dropped.")
)
.arg(
Arg::with_name("http-duplicate-block-status")
.long("http-duplicate-block-status")
.takes_value(true)
.default_value("202")
.value_name("STATUS_CODE")
.help("Status code to send when a block that is already known is POSTed to the \
HTTP API.")
)
.arg(
Arg::with_name("http-enable-beacon-processor")
.long("http-enable-beacon-processor")
@ -1187,7 +1196,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.arg(
Arg::with_name("gui")
.long("gui")
.hidden(true)
.help("Enable the graphical user interface and all its requirements. \
This enables --http and --validator-monitor-auto and enables SSE logging.")
.takes_value(false)

View File

@ -156,6 +156,9 @@ pub fn get_config<E: EthSpec>(
client_config.http_api.enable_beacon_processor =
parse_required(cli_args, "http-enable-beacon-processor")?;
client_config.http_api.duplicate_block_status_code =
parse_required(cli_args, "http-duplicate-block-status")?;
if let Some(cache_size) = clap_utils::parse_optional(cli_args, "shuffling-cache-size")? {
client_config.chain.shuffling_cache_size = cache_size;
}
@ -506,9 +509,30 @@ pub fn get_config<E: EthSpec>(
client_config.chain.checkpoint_sync_url_timeout =
clap_utils::parse_required::<u64>(cli_args, "checkpoint-sync-url-timeout")?;
client_config.genesis = if let Some(genesis_state_bytes) =
eth2_network_config.genesis_state_bytes.clone()
{
client_config.genesis_state_url_timeout =
clap_utils::parse_required(cli_args, "genesis-state-url-timeout")
.map(Duration::from_secs)?;
let genesis_state_url_opt =
clap_utils::parse_optional::<String>(cli_args, "genesis-state-url")?;
let checkpoint_sync_url_opt =
clap_utils::parse_optional::<String>(cli_args, "checkpoint-sync-url")?;
// If the `--genesis-state-url` is defined, use that to download the
// genesis state bytes. If it's not defined, try `--checkpoint-sync-url`.
client_config.genesis_state_url = if let Some(genesis_state_url) = genesis_state_url_opt {
Some(genesis_state_url)
} else if let Some(checkpoint_sync_url) = checkpoint_sync_url_opt {
// If the checkpoint sync URL is going to be used to download the
// genesis state, adopt the timeout from the checkpoint sync URL too.
client_config.genesis_state_url_timeout =
Duration::from_secs(client_config.chain.checkpoint_sync_url_timeout);
Some(checkpoint_sync_url)
} else {
None
};
client_config.genesis = if eth2_network_config.genesis_state_is_known() {
// Set up weak subjectivity sync, or start from the hardcoded genesis state.
if let (Some(initial_state_path), Some(initial_block_path)) = (
cli_args.value_of("checkpoint-state"),
@ -530,7 +554,6 @@ pub fn get_config<E: EthSpec>(
let anchor_block_bytes = read(initial_block_path)?;
ClientGenesis::WeakSubjSszBytes {
genesis_state_bytes,
anchor_state_bytes,
anchor_block_bytes,
}
@ -538,17 +561,9 @@ pub fn get_config<E: EthSpec>(
let url = SensitiveUrl::parse(remote_bn_url)
.map_err(|e| format!("Invalid checkpoint sync URL: {:?}", e))?;
ClientGenesis::CheckpointSyncUrl {
genesis_state_bytes,
url,
}
ClientGenesis::CheckpointSyncUrl { url }
} else {
// Note: re-serializing the genesis state is not so efficient, however it avoids adding
// trait bounds to the `ClientGenesis` enum. This would have significant flow-on
// effects.
ClientGenesis::SszBytes {
genesis_state_bytes,
}
ClientGenesis::GenesisState
}
} else {
if cli_args.is_present("checkpoint-state") || cli_args.is_present("checkpoint-sync-url") {

View File

@ -30,16 +30,16 @@ where
/// Create a new iterator which can yield elements from `start_vindex` up to the last
/// index stored by the restore point at `last_restore_point_slot`.
///
/// The `last_restore_point` slot should be the slot of a recent restore point as obtained from
/// `HotColdDB::get_latest_restore_point_slot`. We pass it as a parameter so that the caller can
/// The `freezer_upper_limit` slot should be the slot of a recent restore point as obtained from
/// `Root::freezer_upper_limit`. We pass it as a parameter so that the caller can
/// maintain a stable view of the database (see `HybridForwardsBlockRootsIterator`).
pub fn new(
store: &'a HotColdDB<E, Hot, Cold>,
start_vindex: usize,
last_restore_point_slot: Slot,
freezer_upper_limit: Slot,
spec: &ChainSpec,
) -> Self {
let (_, end_vindex) = F::start_and_end_vindex(last_restore_point_slot, spec);
let (_, end_vindex) = F::start_and_end_vindex(freezer_upper_limit, spec);
// Set the next chunk to the one containing `start_vindex`.
let next_cindex = start_vindex / F::chunk_size();

View File

@ -19,6 +19,14 @@ pub trait Root<E: EthSpec>: Field<E, Value = Hash256> {
end_state: BeaconState<E>,
end_root: Hash256,
) -> Result<SimpleForwardsIterator>;
/// The first slot for which this field is *no longer* stored in the freezer database.
///
/// If `None`, then this field is not stored in the freezer database at all due to pruning
/// configuration.
fn freezer_upper_limit<Hot: ItemStore<E>, Cold: ItemStore<E>>(
store: &HotColdDB<E, Hot, Cold>,
) -> Option<Slot>;
}
impl<E: EthSpec> Root<E> for BlockRoots {
@ -39,6 +47,13 @@ impl<E: EthSpec> Root<E> for BlockRoots {
)?;
Ok(SimpleForwardsIterator { values })
}
fn freezer_upper_limit<Hot: ItemStore<E>, Cold: ItemStore<E>>(
store: &HotColdDB<E, Hot, Cold>,
) -> Option<Slot> {
// Block roots are stored for all slots up to the split slot (exclusive).
Some(store.get_split_slot())
}
}
impl<E: EthSpec> Root<E> for StateRoots {
@ -59,6 +74,15 @@ impl<E: EthSpec> Root<E> for StateRoots {
)?;
Ok(SimpleForwardsIterator { values })
}
fn freezer_upper_limit<Hot: ItemStore<E>, Cold: ItemStore<E>>(
store: &HotColdDB<E, Hot, Cold>,
) -> Option<Slot> {
// State roots are stored for all slots up to the latest restore point (exclusive).
// There may not be a latest restore point if state pruning is enabled, in which
// case this function will return `None`.
store.get_latest_restore_point_slot()
}
}
/// Forwards root iterator that makes use of a flat field table in the freezer DB.
@ -118,6 +142,7 @@ impl Iterator for SimpleForwardsIterator {
pub enum HybridForwardsIterator<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, Cold: ItemStore<E>> {
PreFinalization {
iter: Box<FrozenForwardsIterator<'a, E, F, Hot, Cold>>,
end_slot: Option<Slot>,
/// Data required by the `PostFinalization` iterator when we get to it.
continuation_data: Option<Box<(BeaconState<E>, Hash256)>>,
},
@ -129,6 +154,7 @@ pub enum HybridForwardsIterator<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, C
PostFinalization {
iter: SimpleForwardsIterator,
},
Finished,
}
impl<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, Cold: ItemStore<E>>
@ -138,8 +164,8 @@ impl<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, Cold: ItemStore<E>>
///
/// The `get_state` closure should return a beacon state and final block/state root to backtrack
/// from in the case where the iterated range does not lie entirely within the frozen portion of
/// the database. If an `end_slot` is provided and it is before the database's latest restore
/// point slot then the `get_state` closure will not be called at all.
/// the database. If an `end_slot` is provided and it is before the database's freezer upper
/// limit for the field then the `get_state` closure will not be called at all.
///
/// It is OK for `get_state` to hold a lock while this function is evaluated, as the returned
/// iterator is as lazy as possible and won't do any work apart from calling `get_state`.
@ -155,13 +181,15 @@ impl<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, Cold: ItemStore<E>>
) -> Result<Self> {
use HybridForwardsIterator::*;
let latest_restore_point_slot = store.get_latest_restore_point_slot();
// First slot at which this field is *not* available in the freezer. i.e. all slots less
// than this slot have their data available in the freezer.
let freezer_upper_limit = F::freezer_upper_limit(store).unwrap_or(Slot::new(0));
let result = if start_slot < latest_restore_point_slot {
let result = if start_slot < freezer_upper_limit {
let iter = Box::new(FrozenForwardsIterator::new(
store,
start_slot,
latest_restore_point_slot,
freezer_upper_limit,
spec,
));
@ -169,13 +197,14 @@ impl<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, Cold: ItemStore<E>>
// `end_slot`. If it tries to continue further a `NoContinuationData` error will be
// returned.
let continuation_data =
if end_slot.map_or(false, |end_slot| end_slot < latest_restore_point_slot) {
if end_slot.map_or(false, |end_slot| end_slot < freezer_upper_limit) {
None
} else {
Some(Box::new(get_state()?))
};
PreFinalization {
iter,
end_slot,
continuation_data,
}
} else {
@ -195,6 +224,7 @@ impl<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, Cold: ItemStore<E>>
match self {
PreFinalization {
iter,
end_slot,
continuation_data,
} => {
match iter.next() {
@ -203,10 +233,17 @@ impl<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, Cold: ItemStore<E>>
// to a post-finalization iterator beginning from the last slot
// of the pre iterator.
None => {
// If the iterator has an end slot (inclusive) which has already been
// covered by the (exclusive) frozen forwards iterator, then we're done!
let iter_end_slot = Slot::from(iter.inner.end_vindex);
if end_slot.map_or(false, |end_slot| iter_end_slot == end_slot + 1) {
*self = Finished;
return Ok(None);
}
let continuation_data = continuation_data.take();
let store = iter.inner.store;
let start_slot = Slot::from(iter.inner.end_vindex);
let start_slot = iter_end_slot;
*self = PostFinalizationLazy {
continuation_data,
store,
@ -230,6 +267,7 @@ impl<'a, E: EthSpec, F: Root<E>, Hot: ItemStore<E>, Cold: ItemStore<E>>
self.do_next()
}
PostFinalization { iter } => iter.next().transpose(),
Finished => Ok(None),
}
}
}

View File

@ -18,7 +18,7 @@ use crate::metadata::{
};
use crate::metrics;
use crate::{
get_key_for_col, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp,
get_key_for_col, ChunkWriter, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp,
PartialBeaconState, StoreItem, StoreOp,
};
use itertools::process_results;
@ -1195,6 +1195,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
ops.push(op);
// 2. Store updated vector entries.
// Block roots need to be written here as well as by the `ChunkWriter` in `migrate_db`
// because states may require older block roots, and the writer only stores block roots
// between the previous split point and the new split point.
let db = &self.cold_db;
store_updated_vector(BlockRoots, db, state, &self.spec, ops)?;
store_updated_vector(StateRoots, db, state, &self.spec, ops)?;
@ -1497,10 +1500,21 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
};
}
/// Fetch the slot of the most recently stored restore point.
pub fn get_latest_restore_point_slot(&self) -> Slot {
(self.get_split_slot() - 1) / self.config.slots_per_restore_point
* self.config.slots_per_restore_point
/// Fetch the slot of the most recently stored restore point (if any).
pub fn get_latest_restore_point_slot(&self) -> Option<Slot> {
let split_slot = self.get_split_slot();
let anchor = self.get_anchor_info();
// There are no restore points stored if the state upper limit lies in the hot database.
// It hasn't been reached yet, and may never be.
if anchor.map_or(false, |a| a.state_upper_limit >= split_slot) {
None
} else {
Some(
(split_slot - 1) / self.config.slots_per_restore_point
* self.config.slots_per_restore_point,
)
}
}
/// Load the database schema version from disk.
@ -1907,6 +1921,25 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
)
}
/// Update the linear array of frozen block roots with the block root for several skipped slots.
///
/// Write the block root at all slots from `start_slot` (inclusive) to `end_slot` (exclusive).
pub fn store_frozen_block_root_at_skip_slots(
&self,
start_slot: Slot,
end_slot: Slot,
block_root: Hash256,
) -> Result<Vec<KeyValueStoreOp>, Error> {
let mut ops = vec![];
let mut block_root_writer =
ChunkWriter::<BlockRoots, _, _>::new(&self.cold_db, start_slot.as_usize())?;
for slot in start_slot.as_usize()..end_slot.as_usize() {
block_root_writer.set(slot, block_root, &mut ops)?;
}
block_root_writer.write(&mut ops)?;
Ok(ops)
}
/// Try to prune all execution payloads, returning early if there is no need to prune.
pub fn try_prune_execution_payloads(&self, force: bool) -> Result<(), Error> {
let split = self.get_split_info();
@ -2203,7 +2236,14 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
return Err(HotColdDBError::FreezeSlotUnaligned(finalized_state.slot()).into());
}
let mut hot_db_ops: Vec<StoreOp<E>> = Vec::new();
let mut hot_db_ops = vec![];
let mut cold_db_ops = vec![];
// Chunk writer for the linear block roots in the freezer DB.
// Start at the new upper limit because we iterate backwards.
let new_frozen_block_root_upper_limit = finalized_state.slot().as_usize().saturating_sub(1);
let mut block_root_writer =
ChunkWriter::<BlockRoots, _, _>::new(&store.cold_db, new_frozen_block_root_upper_limit)?;
// 1. Copy all of the states between the new finalized state and the split slot, from the hot DB
// to the cold DB. Delete the execution payloads of these now-finalized blocks.
@ -2228,6 +2268,9 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
// Delete the old summary, and the full state if we lie on an epoch boundary.
hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot)));
// Store the block root for this slot in the linear array of frozen block roots.
block_root_writer.set(slot.as_usize(), block_root, &mut cold_db_ops)?;
// Do not try to store states if a restore point is yet to be stored, or will never be
// stored (see `STATE_UPPER_LIMIT_NO_RETAIN`). Make an exception for the genesis state
// which always needs to be copied from the hot DB to the freezer and should not be deleted.
@ -2237,29 +2280,34 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
.map_or(false, |anchor| slot < anchor.state_upper_limit)
{
debug!(store.log, "Pruning finalized state"; "slot" => slot);
continue;
}
let mut cold_db_ops: Vec<KeyValueStoreOp> = Vec::new();
if slot % store.config.slots_per_restore_point == 0 {
let state: BeaconState<E> = get_full_state(&store.hot_db, &state_root, &store.spec)?
.ok_or(HotColdDBError::MissingStateToFreeze(state_root))?;
store.store_cold_state(&state_root, &state, &mut cold_db_ops)?;
}
// Store a pointer from this state root to its slot, so we can later reconstruct states
// from their state root alone.
let cold_state_summary = ColdStateSummary { slot };
let op = cold_state_summary.as_kv_store_op(state_root);
cold_db_ops.push(op);
// There are data dependencies between calls to `store_cold_state()` that prevent us from
// doing one big call to `store.cold_db.do_atomically()` at end of the loop.
store.cold_db.do_atomically(cold_db_ops)?;
if slot % store.config.slots_per_restore_point == 0 {
let state: BeaconState<E> = get_full_state(&store.hot_db, &state_root, &store.spec)?
.ok_or(HotColdDBError::MissingStateToFreeze(state_root))?;
store.store_cold_state(&state_root, &state, &mut cold_db_ops)?;
// Commit the batch of cold DB ops whenever a full state is written. Each state stored
// may read the linear fields of previous states stored.
store
.cold_db
.do_atomically(std::mem::take(&mut cold_db_ops))?;
}
}
// Finish writing the block roots and commit the remaining cold DB ops.
block_root_writer.write(&mut cold_db_ops)?;
store.cold_db.do_atomically(cold_db_ops)?;
// Warning: Critical section. We have to take care not to put any of the two databases in an
// inconsistent state if the OS process dies at any point during the freezing
// procedure.

View File

@ -121,13 +121,13 @@ nodes that do not run directly on a public network.
To listen over only IPv6 use the same parameters as done when listening over
IPv4 only:
- `--listen-addresses :: --port 9909` will listen over IPv6 using port `9909` for
- `--listen-address :: --port 9909` will listen over IPv6 using port `9909` for
TCP and UDP.
- `--listen-addresses :: --port 9909 --discovery-port 9999` will listen over
- `--listen-address :: --port 9909 --discovery-port 9999` will listen over
IPv6 using port `9909` for TCP and port `9999` for UDP.
To listen over both IPv4 and IPv6:
- Set two listening addresses using the `--listen-addresses` flag twice ensuring
- Set two listening addresses using the `--listen-address` flag twice ensuring
the two addresses are one IPv4, and the other IPv6. When doing so, the
`--port` and `--discovery-port` flags will apply exclusively to IPv4. Note
that this behaviour differs from the Ipv6 only case described above.
@ -139,16 +139,16 @@ To listen over both IPv4 and IPv6:
##### Configuration Examples
- `--listen-addresses :: --listen-addresses 0.0.0.0 --port 9909` will listen
- `--listen-address :: --listen-address 0.0.0.0 --port 9909` will listen
over IPv4 using port `9909` for TCP and UDP. It will also listen over IPv6 but
using the default value for `--port6` for UDP and TCP (`9090`).
- `--listen-addresses :: --listen-addresses --port 9909 --discovery-port6 9999`
- `--listen-address :: --listen-address --port 9909 --discovery-port6 9999`
will have the same configuration as before except for the IPv6 UDP socket,
which will use port `9999`.
#### Configuring Lighthouse to advertise IPv6 reachable addresses
Lighthouse supports IPv6 to connect to other nodes both over IPv6 exclusively,
and dual stack using one socket for IPv6 and another socket for IPv6. In both
and dual stack using one socket for IPv4 and another socket for IPv6. In both
scenarios, the previous sections still apply. In summary:
> Beacon nodes must advertise their publicly reachable socket address

View File

@ -40,6 +40,7 @@
- [How do I check the version of Lighthouse that is running?](#misc-version)
- [Does Lighthouse have pruning function like the execution client to save disk space?](#misc-prune)
- [Can I use a HDD for the freezer database and only have the hot db on SSD?](#misc-freezer)
- [Can Lighthouse log in local timestamp instead of UTC?](#misc-timestamp)
## Beacon Node
@ -436,16 +437,14 @@ Monitoring](./validator-monitoring.md) for more information. Lighthouse has also
### <a name="net-bn-vc"></a> My beacon node and validator client are on different servers. How can I point the validator client to the beacon node?
The settings are as follows:
1. On the beacon node:
Specify `lighthouse bn --http-address local_IP` so that the beacon node is listening on the local network rather than on the `localhost`.
1. On the validator client:
The setting on the beacon node is the same for both cases below. In the beacon node, specify `lighthouse bn --http-address local_IP` so that the beacon node is listening on the local network rather than `localhost`. You can find the `local_IP` by running the command `hostname -I | awk '{print $1}'` on the server running the beacon node.
1. If the beacon node and validator clients are on different servers *in the same network*, the setting in the validator client is as follows:
Use the flag `--beacon-nodes` to point to the beacon node. For example, `lighthouse vc --beacon-nodes http://local_IP:5052` where `local_IP` is the local IP address of the beacon node and `5052` is the default `http-port` of the beacon node.
If you have firewall setup, e.g., `ufw`, you will need to allow port 5052 (assuming that the default port is used) with `sudo ufw allow 5052`. Note: this will allow all IP addresses to access the HTTP API of the beacon node. If you are on an untrusted network (e.g., a university or public WiFi) or the host is exposed to the internet, use apply IP-address filtering as described later in this section.
You can test that the setup is working with by running the following command on the validator client host:
```bash
@ -453,8 +452,25 @@ The settings are as follows:
```
You can refer to [Redundancy](./redundancy.md) for more information.
It is also worth noting that the `--beacon-nodes` flag can also be used for redundancy of beacon nodes. For example, let's say you have a beacon node and a validator client running on the same host, and a second beacon node on another server as a backup. In this case, you can use `lighthouse vc --beacon-nodes http://localhost:5052, http://local_IP:5052` on the validator client.
2. If the beacon node and validator clients are on different servers *and different networks*, it is necessary to perform port forwarding of the SSH port (e.g., the default port 22) on the router, and also allow firewall on the SSH port. The connection can be established via port forwarding on the router.
In the validator client, use the flag `--beacon-nodes` to point to the beacon node. However, since the beacon node and the validator client are on different networks, the IP address to use is the public IP address of the beacon node, i.e., `lighthouse vc --beacon-nodes http://public_IP:5052`. You can get the public IP address of the beacon node by running the command ` dig +short myip.opendns.com @resolver1.opendns.com` on the server running the beacon node.
Additionally, port forwarding of port 5052 on the router connected to the beacon node is required for the vc to connect to the bn. To do port forwarding, refer to [how to open ports](./advanced_networking.md#how-to-open-ports).
If you have firewall setup, e.g., `ufw`, you will need to allow connections to port 5052 (assuming that the default port is used). Since the beacon node HTTP/HTTPS API is public-facing (i.e., the 5052 port is now exposed to the internet due to port forwarding), we strongly recommend users to apply IP-address filtering to the BN/VC connection from malicious actors. This can be done using the command:
```
sudo ufw allow from vc_IP_address proto tcp to any port 5052
```
where `vc_IP_address` is the public IP address of the validator client. The command will only allow connections to the beacon node from the validator client IP address to prevent malicious attacks on the beacon node over the internet.
It is also worth noting that the `--beacon-nodes` flag can also be used for redundancy of beacon nodes. For example, let's say you have a beacon node and a validator client running on the same host, and a second beacon node on another server as a backup. In this case, you can use `lighthouse vc --beacon-nodes http://localhost:5052, http://IP-address:5052` on the validator client.
### <a name="net-ip"></a> Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address?
No. Lighthouse will auto-detect the change and update your Ethereum Node Record (ENR). You just need to make sure you are not manually setting the ENR with `--enr-address` (which, for common use cases, this flag is not used).
@ -513,11 +529,9 @@ There is no pruning of Lighthouse database for now. However, since v4.2.0, a fea
Yes, you can do so by using the flag `--freezer-dir /path/to/freezer_db` in the beacon node.
### <a name="misc-timestamp"></a> Can Lighthouse log in local timestamp instead of UTC?
The reason why Lighthouse logs in UTC is due to the dependency on an upstream library that is [yet to be resolved](https://github.com/sigp/lighthouse/issues/3130). Alternatively, using the flag `disable-log-timestamp` in combination with systemd will suppress the UTC timestamps and print the logs in local timestamps.

View File

@ -21,7 +21,7 @@ engine to a merge-ready version.
## When?
All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**, **Gnosis**) have successfully undergone the Bellatrix fork and transitioned to a post-merge Network. Your node must have a merge-ready configuration to continue operating. Table below lists the date at which Bellatrix and The Merge occurred:
All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**, **Chiado**, **Gnosis**) have successfully undergone the Bellatrix fork and transitioned to a post-merge Network. Your node must have a merge-ready configuration to continue operating. Table below lists the date at which Bellatrix and The Merge occurred:
<div align="center">
@ -31,6 +31,7 @@ All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln
| Sepolia | 20<sup>th</sup> June 2022 | 6<sup>th</sup> July 2022 | |
| Goerli | 4<sup>th</sup> August 2022 | 10<sup>th</sup> August 2022 | Previously named `Prater`|
| Mainnet | 6<sup>th</sup> September 2022 | 15<sup>th</sup> September 2022 |
| Chiado | 10<sup>th</sup> October 2022 | 4<sup>th</sup> November 2022 |
| Gnosis| 30<sup>th</sup> November 2022 | 8<sup>th</sup> December 2022
</div>

View File

@ -58,7 +58,8 @@ Notable flags:
- `lighthouse --network mainnet`: Mainnet.
- `lighthouse --network goerli`: Goerli (testnet).
- `lighthouse --network sepolia`: Sepolia (testnet).
- `lighthouse --network gnosis`: Gnosis chain
- `lighthouse --network chiado`: Chiado (testnet).
- `lighthouse --network gnosis`: Gnosis chain.
> Note: Using the correct `--network` flag is very important; using the wrong flag can
result in penalties, slashings or lost deposits. As a rule of thumb, *always*

View File

@ -9,23 +9,20 @@ following configuration screen.
## Connecting to the Clients
This allows you to enter the address and ports of the associated Lighthouse
Both the Beacon node and the Validator client need to have their HTTP APIs enabled. These ports should be accessible from the computer running Siren. This allows you to enter the address and ports of the associated Lighthouse
Beacon node and Lighthouse Validator client.
> The Beacon Node must be run with the `--gui` flag set.
To enable the HTTP API for the beacon node, utilize the `--gui` CLI flag. This action ensures that the HTTP API can be accessed by other software on the same machine.
If you run Siren in the browser (by entering `localhost` in the browser), you will need to allow CORS in the HTTP API. This can be done by adding the flag `--http-allow-origin "*"` for both beacon node and validator client. If you would like to access Siren beyond the local computer, we recommend using an SSH tunnel. This requires a tunnel for 3 ports: `80` (assuming the port is unchanged as per the [installation guide](./ui-installation.md#docker-recommended), `5052` (for beacon node) and `5062` (for validator client). You can use the command below to perform SSH tunneling:
```bash
ssh -N -L 80:127.0.0.1:80 -L 5052:127.0.0.1:5052 -L 5062:127.0.0.1:5062 username@local_ip
```
> The Beacon Node must be run with the `--gui` flag set.
where `username` is the username of the server and `local_ip` is the local IP address of the server. Note that with the `-N` option in an SSH session, you will not be able to execute commands in the CLI to avoid confusion with ordinary shell sessions. The connection will appear to be "hung" upon a successful connection, but that is normal. Once you have successfully connected to the server via SSH tunneling, you should be able to access Siren by entering `localhost` in a web browser.
If you require accessibility from another machine within the network, configure the `--http-address` to match the local LAN IP of the system running the Beacon Node and Validator Client.
You can also access Siren using the app downloaded in the [Siren release page](https://github.com/sigp/siren/releases). To access Siren beyond the local computer, you can use SSH tunneling for ports `5052` and `5062` using the command:
> To access from another machine on the same network (192.168.0.200) set the Beacon Node and Validator Client `--http-address` as `192.168.0.200`.
```bash
ssh -N -L 5052:127.0.0.1:5052 -L 5062:127.0.0.1:5062 username@local_ip
```
In a similar manner, the validator client requires activation of the `--http` flag, along with the optional consideration of configuring the `--http-address` flag. If `--http-address` flag is set on the Validator Client, then the `--unencrypted-http-transport` flag is required as well. These settings will ensure compatibility with Siren's connectivity requirements.
If you run Siren in the browser (by entering `localhost` in the browser), you will need to allow CORS in the HTTP API. This can be done by adding the flag `--http-allow-origin "*"` for both beacon node and validator client.
A green tick will appear once Siren is able to connect to both clients. You
can specify different ports for each client by clicking on the advanced tab.
@ -37,7 +34,7 @@ The API Token is a secret key that allows you to connect to the validator
client. The validator client's HTTP API is guarded by this key because it
contains sensitive validator information and the ability to modify
validators. Please see [`Validator Authorization`](./api-vc-auth-header.md)
for further details.
for further details.
Siren requires this token in order to connect to the Validator client.
The token is located in the default data directory of the validator
@ -49,7 +46,7 @@ entered.
## Name
This is your name, it can be modified and is solely used for aesthetics.
This is your name, it can be modified and is solely used for aesthetics.
## Device

View File

@ -1,7 +1,7 @@
# Frequently Asked Questions
## 1. Are there any requirements to run Siren?
Yes, Siren requires Lighthouse v3.5.1 or higher to function properly. These releases can be found on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository.
Yes, the most current Siren version requires Lighthouse v4.3.0 or higher to function properly. These releases can be found on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository.
## 2. Where can I find my API token?
The required Api token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./api-vc-auth-header.md).
@ -9,13 +9,41 @@ The required Api token may be found in the default data directory of the validat
## 3. How do I fix the Node Network Errors?
If you receive a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui configuration and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients).
## 4. How do I change my Beacon or Validator address after logging in?
Once you have successfully arrived to the main dashboard, use the sidebar to access the settings view. In the top right hand corner there is a `Configuration` action button that will redirect you back to the configuration screen where you can make appropriate changes.
## 4. How do I connect Siren to Lighthouse from a different computer on the same network?
The most effective approach to enable access for a local network computer to Lighthouse's HTTP API ports is by configuring the `--http-address` to match the local LAN IP of the system running the beacon node and validator client. For instance, if the said node operates at `192.168.0.200`, this IP can be specified using the `--http-address` parameter as `--http-address 192.168.0.200`.
Subsequently, by designating the host as `192.168.0.200`, you can seamlessly connect Siren to this specific beacon node and validator client pair from any computer situated within the same network.
## 5. Why doesn't my validator balance graph show any data?
If your graph is not showing data, it usually means your validator node is still caching data. The application must wait at least 3 epochs before it can render any graphical visualizations. This could take up to 20min.
## 5. How can I use Siren to monitor my validators remotely when I am not at home?
## 4. Does Siren support reverse proxy or DNS named addresses?
There are two primary methods to access your Beacon Node and Validator Client remotely: setting up a VPN or utilizing SSH-reverse tunneling.
Most contemporary home routers provide options for VPN access in various ways. A VPN permits a remote computer to establish a connection with internal computers within a home network. With a VPN configuration in place, connecting to the VPN enables you to treat your computer as if it is part of your local home network. The connection process involves following the setup steps for connecting via another machine on the same network on the Siren configuration page and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients).
In the absence of a VPN, an alternative approach involves utilizing an SSH tunnel. To achieve this, you need remote SSH access to the computer hosting the Beacon Node and Validator Client pair (which necessitates a port forward in your router). In this context, while it is not obligatory to set a `--http-address` flag on the Beacon Node and Validator Client, you can configure an SSH tunnel to the local ports on the node and establish a connection through the tunnel. For instructions on setting up an SSH tunnel, refer to [`Connecting Siren via SSH tunnel`](./ui-faqs.md#6-how-do-i-connect-siren-to-lighthouse-via-a-ssh-tunnel) for detailed guidance.
## 6. How do I connect Siren to Lighthouse via a ssh tunnel?
If you would like to access Siren beyond the local network (i.e across the internet), we recommend using an SSH tunnel. This requires a tunnel for 3 ports: `80` (assuming the port is unchanged as per the [installation guide](./ui-installation.md#docker-recommended), `5052` (for beacon node) and `5062` (for validator client). You can use the command below to perform SSH tunneling:
```bash
ssh -N -L 80:127.0.0.1:80 -L 5052:127.0.0.1:5052 -L 5062:127.0.0.1:5062 username@local_ip
```
Where `username` is the username of the server and `local_ip` is the local IP address of the server. Note that with the `-N` option in an SSH session, you will not be able to execute commands in the CLI to avoid confusion with ordinary shell sessions. The connection will appear to be "hung" upon a successful connection, but that is normal. Once you have successfully connected to the server via SSH tunneling, you should be able to access Siren by entering `localhost` in a web browser.
You can also access Siren using the app downloaded in the [Siren release page](https://github.com/sigp/siren/releases). To access Siren beyond the local computer, you can use SSH tunneling for ports `5052` and `5062` using the command:
```bash
ssh -N -L 5052:127.0.0.1:5052 -L 5062:127.0.0.1:5062 username@local_ip
```
## 7. Does Siren support reverse proxy or DNS named addresses?
Yes, if you need to access your beacon or validator from an address such as `https://merp-server:9909/eth2-vc` you should follow the following steps for configuration:
1. Toggle `https` as your protocol
2. Add your address as `merp-server/eth2-vc`
@ -24,3 +52,10 @@ Yes, if you need to access your beacon or validator from an address such as `htt
If you have configured it correctly you should see a green checkmark indicating Siren is now connected to your Validator Client and Beacon Node.
If you have separate address setups for your Validator Client and Beacon Node respectively you should access the `Advance Settings` on the configuration and repeat the steps above for each address.
## 8. How do I change my Beacon or Validator address after logging in?
Once you have successfully arrived to the main dashboard, use the sidebar to access the settings view. In the top right hand corner there is a `Configuration` action button that will redirect you back to the configuration screen where you can make appropriate changes.
## 9. Why doesn't my validator balance graph show any data?
If your graph is not showing data, it usually means your validator node is still caching data. The application must wait at least 3 epochs before it can render any graphical visualizations. This could take up to 20min.

View File

@ -124,7 +124,6 @@ The command will create two files:
The VC which will receive the validators needs to have the following flags at a minimum:
- `--http`
- `--http-port 5062`
- `--enable-doppelganger-protection`
Therefore, the VC command might look like:
@ -133,7 +132,6 @@ Therefore, the VC command might look like:
lighthouse \
vc \
--http \
--http-port 5062 \
--enable-doppelganger-protection
```

View File

@ -69,7 +69,6 @@ In reality, many host configurations are possible. For example:
The source VC needs to have the following flags at a minimum:
- `--http`
- `--http-port 5062`
- `--http-allow-keystore-export`
Therefore, the source VC command might look like:
@ -78,7 +77,6 @@ Therefore, the source VC command might look like:
lighthouse \
vc \
--http \
--http-port 5062 \
--http-allow-keystore-export
```
@ -87,7 +85,6 @@ lighthouse \
The destination VC needs to have the following flags at a minimum:
- `--http`
- `--http-port 5062`
- `--enable-doppelganger-protection`
Therefore, the destination VC command might look like:
@ -96,7 +93,6 @@ Therefore, the destination VC command might look like:
lighthouse \
vc \
--http \
--http-port 5062 \
--enable-doppelganger-protection
```
@ -167,6 +163,8 @@ At the same time, `lighthouse vc` will log:
INFO Importing keystores via standard HTTP API, count: 1
INFO Enabled validator voting_pubkey: 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f, signing_method: local_keystore
INFO Modified key_cache saved successfully
```
Once the operation completes successfully, there is nothing else to be done. The
validators have been removed from the `src-host` and enabled at the `dest-host`.
If the `--enable-doppelganger-protection` flag was used it may take 2-3 epochs
@ -183,6 +181,7 @@ lighthouse \
--dest-vc-token ~/.lighthouse/mainnet/validators/api-token.txt \
--validators 0x9096aab771e44da149bd7c9926d6f7bb96ef465c0eeb4918be5178cd23a1deb4aec232c61d85ff329b54ed4a3bdfff3a,0x90fc4f72d898a8f01ab71242e36f4545aaf87e3887be81632bb8ba4b2ae8fb70753a62f866344d7905e9a07f5a9cdda1
```
Any errors encountered during the operation should include information on how to
proceed. Assistance is also available on our
[Discord](https://discord.gg/cyAszAh).

View File

@ -1,6 +1,6 @@
[package]
name = "boot_node"
version = "4.3.0"
version = "4.4.1"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2021"

View File

@ -10,6 +10,7 @@ use lighthouse_network::{
use serde_derive::{Deserialize, Serialize};
use ssz::Encode;
use std::net::{SocketAddrV4, SocketAddrV6};
use std::time::Duration;
use std::{marker::PhantomData, path::PathBuf};
use types::EthSpec;
@ -90,8 +91,19 @@ impl<T: EthSpec> BootNodeConfig<T> {
let enr_fork = {
let spec = eth2_network_config.chain_spec::<T>()?;
if eth2_network_config.beacon_state_is_known() {
let genesis_state = eth2_network_config.beacon_state::<T>()?;
let genesis_state_url: Option<String> =
clap_utils::parse_optional(matches, "genesis-state-url")?;
let genesis_state_url_timeout =
clap_utils::parse_required(matches, "genesis-state-url-timeout")
.map(Duration::from_secs)?;
if eth2_network_config.genesis_state_is_known() {
let genesis_state = eth2_network_config
.genesis_state::<T>(genesis_state_url.as_deref(), genesis_state_url_timeout, &logger)?
.ok_or_else(|| {
"The genesis state for this network is not known, this is an unsupported mode"
.to_string()
})?;
slog::info!(logger, "Genesis state found"; "root" => genesis_state.canonical_root().to_string());
let enr_fork = spec.enr_fork_id::<T>(

View File

@ -37,14 +37,8 @@ tokio = { version = "1.14.0", features = ["full"] }
[target.'cfg(target_os = "linux")'.dependencies]
psutil = { version = "3.2.2", optional = true }
procinfo = { version = "0.4.2", optional = true }
procfs = { version = "0.15.1", optional = true }
[features]
default = ["lighthouse"]
lighthouse = [
"proto_array",
"psutil",
"procinfo",
"store",
"slashing_protection",
]
lighthouse = ["proto_array", "psutil", "procfs", "store", "slashing_protection"]

View File

@ -707,7 +707,7 @@ impl BeaconNodeHttpClient {
/// Returns `Ok(None)` on a 404 error.
pub async fn post_beacon_blocks_ssz<T: EthSpec, Payload: AbstractExecPayload<T>>(
&self,
block: &SignedBlockContents<T, Payload>,
block_contents: &SignedBlockContents<T, Payload>,
) -> Result<(), Error> {
let mut path = self.eth_path(V1)?;
@ -716,8 +716,12 @@ impl BeaconNodeHttpClient {
.push("beacon")
.push("blocks");
self.post_generic_with_ssz_body(path, block.as_ssz_bytes(), Some(self.timeouts.proposal))
.await?;
self.post_generic_with_ssz_body(
path,
block_contents.as_ssz_bytes(),
Some(self.timeouts.proposal),
)
.await?;
Ok(())
}
@ -747,7 +751,7 @@ impl BeaconNodeHttpClient {
/// Returns `Ok(None)` on a 404 error.
pub async fn post_beacon_blinded_blocks_ssz<T: EthSpec, Payload: AbstractExecPayload<T>>(
&self,
block: &SignedBeaconBlock<T, Payload>,
block: &SignedBlockContents<T, Payload>,
) -> Result<(), Error> {
let mut path = self.eth_path(V1)?;
@ -818,14 +822,14 @@ impl BeaconNodeHttpClient {
/// `POST v2/beacon/blocks`
pub async fn post_beacon_blocks_v2_ssz<T: EthSpec, Payload: AbstractExecPayload<T>>(
&self,
block: &SignedBeaconBlock<T, Payload>,
block_contents: &SignedBlockContents<T, Payload>,
validation_level: Option<BroadcastValidation>,
) -> Result<(), Error> {
self.post_generic_with_consensus_version_and_ssz_body(
self.post_beacon_blocks_v2_path(validation_level)?,
block.as_ssz_bytes(),
block_contents.as_ssz_bytes(),
Some(self.timeouts.proposal),
block.message().body().fork_name(),
block_contents.signed_block().message().body().fork_name(),
)
.await?;
@ -852,14 +856,14 @@ impl BeaconNodeHttpClient {
/// `POST v2/beacon/blinded_blocks`
pub async fn post_beacon_blinded_blocks_v2_ssz<T: EthSpec>(
&self,
block: &SignedBlindedBeaconBlock<T>,
block_contents: &SignedBlindedBlockContents<T>,
validation_level: Option<BroadcastValidation>,
) -> Result<(), Error> {
self.post_generic_with_consensus_version_and_ssz_body(
self.post_beacon_blinded_blocks_v2_path(validation_level)?,
block.as_ssz_bytes(),
block_contents.as_ssz_bytes(),
Some(self.timeouts.proposal),
block.message().body().fork_name(),
block_contents.signed_block().message().body().fork_name(),
)
.await?;

View File

@ -95,8 +95,8 @@ pub struct ValidatorInclusionData {
#[cfg(target_os = "linux")]
use {
procinfo::pid, psutil::cpu::os::linux::CpuTimesExt,
psutil::memory::os::linux::VirtualMemoryExt, psutil::process::Process,
psutil::cpu::os::linux::CpuTimesExt, psutil::memory::os::linux::VirtualMemoryExt,
psutil::process::Process,
};
/// Reports on the health of the Lighthouse instance.
@ -238,7 +238,7 @@ pub struct ProcessHealth {
/// The pid of this process.
pub pid: u32,
/// The number of threads used by this pid.
pub pid_num_threads: i32,
pub pid_num_threads: i64,
/// The total resident memory used by this pid.
pub pid_mem_resident_set_size: u64,
/// The total virtual memory used by this pid.
@ -262,7 +262,12 @@ impl ProcessHealth {
.memory_info()
.map_err(|e| format!("Unable to get process memory info: {:?}", e))?;
let stat = pid::stat_self().map_err(|e| format!("Unable to get stat: {:?}", e))?;
let me = procfs::process::Process::myself()
.map_err(|e| format!("Unable to get process: {:?}", e))?;
let stat = me
.stat()
.map_err(|e| format!("Unable to get stat: {:?}", e))?;
let process_times = process
.cpu_times()
.map_err(|e| format!("Unable to get process cpu times : {:?}", e))?;

View File

@ -1333,6 +1333,26 @@ pub struct BroadcastValidationQuery {
pub broadcast_validation: BroadcastValidation,
}
pub mod serde_status_code {
use crate::StatusCode;
use serde::{de::Error, Deserialize, Serialize};
pub fn serialize<S>(status_code: &StatusCode, ser: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
status_code.as_u16().serialize(ser)
}
pub fn deserialize<'de, D>(de: D) -> Result<StatusCode, D::Error>
where
D: serde::de::Deserializer<'de>,
{
let status_code = u16::deserialize(de)?;
StatusCode::try_from(status_code).map_err(D::Error::custom)
}
}
#[cfg(test)]
mod tests {
use super::*;
@ -1411,6 +1431,26 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockContents<T, Payload> {
BlockContents::Block(block) => (block, None),
}
}
/// Signs `self`, producing a `SignedBlockContents`.
pub fn sign(
self,
secret_key: &SecretKey,
fork: &Fork,
genesis_validators_root: Hash256,
spec: &ChainSpec,
) -> SignedBlockContents<T, Payload> {
let (block, maybe_blobs) = self.deconstruct();
let signed_block = block.sign(secret_key, fork, genesis_validators_root, spec);
let signed_blobs = maybe_blobs.map(|blobs| {
blobs
.into_iter()
.map(|blob| blob.sign(secret_key, fork, genesis_validators_root, spec))
.collect::<Vec<_>>()
.into()
});
SignedBlockContents::new(signed_block, signed_blobs)
}
}
impl<T: EthSpec, Payload: AbstractExecPayload<T>> ForkVersionDeserialize
@ -1465,6 +1505,8 @@ pub type SignedBlockContentsTuple<T, Payload> = (
Option<SignedSidecarList<T, <Payload as AbstractExecPayload<T>>::Sidecar>>,
);
pub type SignedBlindedBlockContents<E> = SignedBlockContents<E, BlindedPayload<E>>;
/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobSidecars`].
#[derive(Clone, Debug, Encode, Serialize, Deserialize)]
#[serde(untagged)]
@ -1593,6 +1635,19 @@ impl<T: EthSpec> SignedBlockContents<T, BlindedPayload<T>> {
}
}
impl<T: EthSpec> SignedBlockContents<T> {
pub fn clone_as_blinded(&self) -> SignedBlindedBlockContents<T> {
let blinded_blobs = self.blobs_cloned().map(|blob_sidecars| {
blob_sidecars
.into_iter()
.map(|blob| blob.into())
.collect::<Vec<_>>()
.into()
});
SignedBlockContents::new(self.signed_block().clone_as_blinded(), blinded_blobs)
}
}
impl<T: EthSpec, Payload: AbstractExecPayload<T>> TryFrom<SignedBeaconBlock<T, Payload>>
for SignedBlockContents<T, Payload>
{

View File

@ -23,6 +23,23 @@ pub const PREDEFINED_NETWORKS_DIR: &str = predefined_networks_dir!();
pub const GENESIS_FILE_NAME: &str = "genesis.ssz";
pub const GENESIS_ZIP_FILE_NAME: &str = "genesis.ssz.zip";
const HOLESKY_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url {
urls: &[
// This is an AWS S3 bucket hosted by Sigma Prime. See Paul Hauner for
// more details.
"https://sigp-public-genesis-states.s3.ap-southeast-2.amazonaws.com/holesky/",
],
checksum: "0x76631cd0b9ddc5b2c766b496e23f16759ce1181446a4efb40e5540cd15b78a07",
genesis_validators_root: "0x9143aa7c615a7f7115e2b6aac319c03529df8242ae705fba9df39b79c59fa8b1",
};
const CHIADO_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url {
// No default checkpoint sources are provided.
urls: &[],
checksum: "0xd4a039454c7429f1dfaa7e11e397ef3d0f50d2d5e4c0e4dc04919d153aa13af1",
genesis_validators_root: "0x9d642dac73058fbf39c0ae41ab1e34e4d889043cb199851ded7095bc99eb4c1e",
};
/// The core configuration of a Lighthouse beacon node.
#[derive(Debug, Clone)]
pub struct Eth2Config {
@ -62,6 +79,32 @@ impl Eth2Config {
}
}
/// Describes how a genesis state may be obtained.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum GenesisStateSource {
/// The genesis state for this network is not yet known.
Unknown,
/// The genesis state for this network is included in the binary via
/// `include_bytes!` or by loading from a testnet dir.
IncludedBytes,
/// The genesis state for this network should be downloaded from a URL.
Url {
/// URLs to try to download the file from, in order.
urls: &'static [&'static str],
/// The SHA256 of the genesis state bytes. This is *not* a hash tree
/// root to simplify the types (i.e., to avoid getting EthSpec
/// involved).
///
/// The format should be 0x-prefixed ASCII bytes.
checksum: &'static str,
/// The `genesis_validators_root` of the genesis state. Used to avoid
/// downloading the state for simple signing operations.
///
/// The format should be 0x-prefixed ASCII bytes.
genesis_validators_root: &'static str,
},
}
/// A directory that can be built by downloading files via HTTP.
///
/// Used by the `eth2_network_config` crate to initialize the network directories during build and
@ -70,7 +113,7 @@ impl Eth2Config {
pub struct Eth2NetArchiveAndDirectory<'a> {
pub name: &'a str,
pub config_dir: &'a str,
pub genesis_is_known: bool,
pub genesis_state_source: GenesisStateSource,
}
impl<'a> Eth2NetArchiveAndDirectory<'a> {
@ -89,15 +132,11 @@ impl<'a> Eth2NetArchiveAndDirectory<'a> {
}
}
/// Indicates that the `genesis.ssz.zip` file is present on the filesystem. This means that the
/// deposit ceremony has concluded and the final genesis `BeaconState` is known.
const GENESIS_STATE_IS_KNOWN: bool = true;
#[derive(Copy, Clone, Debug, PartialEq)]
#[derive(Clone, Debug, PartialEq)]
pub struct HardcodedNet {
pub name: &'static str,
pub config_dir: &'static str,
pub genesis_is_known: bool,
pub genesis_state_source: GenesisStateSource,
pub config: &'static [u8],
pub deploy_block: &'static [u8],
pub boot_enr: &'static [u8],
@ -109,7 +148,7 @@ pub struct HardcodedNet {
/// It also defines a `include_<title>_file!` macro which provides a wrapper around
/// `std::include_bytes`, allowing the inclusion of bytes from the specific testnet directory.
macro_rules! define_archive {
($name_ident: ident, $config_dir: tt, $genesis_is_known: ident) => {
($name_ident: ident, $config_dir: tt, $genesis_state_source: path) => {
paste! {
#[macro_use]
pub mod $name_ident {
@ -118,7 +157,7 @@ macro_rules! define_archive {
pub const ETH2_NET_DIR: Eth2NetArchiveAndDirectory = Eth2NetArchiveAndDirectory {
name: stringify!($name_ident),
config_dir: $config_dir,
genesis_is_known: $genesis_is_known,
genesis_state_source: $genesis_state_source,
};
/// A wrapper around `std::include_bytes` which includes a file from a specific network
@ -151,7 +190,7 @@ macro_rules! define_net {
$this_crate::HardcodedNet {
name: ETH2_NET_DIR.name,
config_dir: ETH2_NET_DIR.config_dir,
genesis_is_known: ETH2_NET_DIR.genesis_is_known,
genesis_state_source: ETH2_NET_DIR.genesis_state_source,
config: $this_crate::$include_file!($this_crate, "../", "config.yaml"),
deploy_block: $this_crate::$include_file!($this_crate, "../", "deploy_block.txt"),
boot_enr: $this_crate::$include_file!($this_crate, "../", "boot_enr.yaml"),
@ -199,9 +238,9 @@ macro_rules! define_nets {
/// `build.rs` which will unzip the genesis states. Then, that `eth2_network_configs` crate can
/// perform the final step of using `std::include_bytes` to bake the files (bytes) into the binary.
macro_rules! define_hardcoded_nets {
($(($name_ident: ident, $config_dir: tt, $genesis_is_known: ident)),+) => {
($(($name_ident: ident, $config_dir: tt, $genesis_state_source: path)),+) => {
$(
define_archive!($name_ident, $config_dir, $genesis_is_known);
define_archive!($name_ident, $config_dir, $genesis_state_source);
)+
pub const ETH2_NET_DIRS: &[Eth2NetArchiveAndDirectory<'static>] = &[$($name_ident::ETH2_NET_DIR,)+];
@ -242,9 +281,8 @@ define_hardcoded_nets!(
// The name of the directory in the `eth2_network_config/built_in_network_configs`
// directory where the configuration files are located for this network.
"mainnet",
// Set to `true` if the genesis state can be found in the `built_in_network_configs`
// directory.
GENESIS_STATE_IS_KNOWN
// Describes how the genesis state can be obtained.
GenesisStateSource::IncludedBytes
),
(
// Network name (must be unique among all networks).
@ -252,9 +290,8 @@ define_hardcoded_nets!(
// The name of the directory in the `eth2_network_config/built_in_network_configs`
// directory where the configuration files are located for this network.
"prater",
// Set to `true` if the genesis state can be found in the `built_in_network_configs`
// directory.
GENESIS_STATE_IS_KNOWN
// Describes how the genesis state can be obtained.
GenesisStateSource::IncludedBytes
),
(
// Network name (must be unique among all networks).
@ -264,9 +301,8 @@ define_hardcoded_nets!(
//
// The Goerli network is effectively an alias to Prater.
"prater",
// Set to `true` if the genesis state can be found in the `built_in_network_configs`
// directory.
GENESIS_STATE_IS_KNOWN
// Describes how the genesis state can be obtained.
GenesisStateSource::IncludedBytes
),
(
// Network name (must be unique among all networks).
@ -274,9 +310,18 @@ define_hardcoded_nets!(
// The name of the directory in the `eth2_network_config/built_in_network_configs`
// directory where the configuration files are located for this network.
"gnosis",
// Describes how the genesis state can be obtained.
GenesisStateSource::IncludedBytes
),
(
// Network name (must be unique among all networks).
chiado,
// The name of the directory in the `eth2_network_config/built_in_network_configs`
// directory where the configuration files are located for this network.
"chiado",
// Set to `true` if the genesis state can be found in the `built_in_network_configs`
// directory.
GENESIS_STATE_IS_KNOWN
CHIADO_GENESIS_STATE_SOURCE
),
(
// Network name (must be unique among all networks).
@ -284,8 +329,16 @@ define_hardcoded_nets!(
// The name of the directory in the `eth2_network_config/built_in_network_configs`
// directory where the configuration files are located for this network.
"sepolia",
// Set to `true` if the genesis state can be found in the `built_in_network_configs`
// directory.
GENESIS_STATE_IS_KNOWN
// Describes how the genesis state can be obtained.
GenesisStateSource::IncludedBytes
),
(
// Network name (must be unique among all networks).
holesky,
// The name of the directory in the `eth2_network_config/built_in_network_configs`
// directory where the configuration files are located for this network.
"holesky",
// Describes how the genesis state can be obtained.
HOLESKY_GENESIS_STATE_SOURCE
)
);

View File

@ -7,8 +7,8 @@ edition = "2021"
build = "build.rs"
[build-dependencies]
zip = "0.5.8"
eth2_config = { path = "../eth2_config"}
zip = "0.6"
eth2_config = { path = "../eth2_config" }
[dev-dependencies]
tempfile = "3.1.0"
@ -16,8 +16,15 @@ tempfile = "3.1.0"
[dependencies]
serde_yaml = "0.8.13"
serde_json = "1.0.58"
types = { path = "../../consensus/types"}
types = { path = "../../consensus/types" }
kzg = { path = "../../crypto/kzg" }
ethereum_ssz = "0.5.0"
eth2_config = { path = "../eth2_config"}
discv5 = "0.3.1"
eth2_config = { path = "../eth2_config" }
discv5 = "0.3.1"
reqwest = { version = "0.11.0", features = ["blocking"] }
pretty_reqwest_error = { path = "../pretty_reqwest_error" }
sha2 = "0.10"
url = "2.2.2"
sensitive_url = { path = "../sensitive_url" }
slog = "2.5.2"
logging = { path = "../logging" }

View File

@ -1,5 +1,7 @@
//! Extracts zipped genesis states on first run.
use eth2_config::{Eth2NetArchiveAndDirectory, ETH2_NET_DIRS, GENESIS_FILE_NAME};
use eth2_config::{
Eth2NetArchiveAndDirectory, GenesisStateSource, ETH2_NET_DIRS, GENESIS_FILE_NAME,
};
use std::fs::File;
use std::io;
use zip::ZipArchive;
@ -26,7 +28,7 @@ fn uncompress_state(network: &Eth2NetArchiveAndDirectory<'static>) -> Result<(),
return Ok(());
}
if network.genesis_is_known {
if network.genesis_state_source == GenesisStateSource::IncludedBytes {
// Extract genesis state from genesis.ssz.zip
let archive_path = network.genesis_state_archive();
let archive_file = File::open(&archive_path)
@ -46,7 +48,8 @@ fn uncompress_state(network: &Eth2NetArchiveAndDirectory<'static>) -> Result<(),
io::copy(&mut file, &mut outfile)
.map_err(|e| format!("Error writing file {:?}: {}", genesis_ssz_path, e))?;
} else {
// Create empty genesis.ssz if genesis is unknown
// Create empty genesis.ssz if genesis is unknown or to be downloaded via URL.
// This is a bit of a hack to make `include_bytes!` easier to deal with.
File::create(genesis_ssz_path)
.map_err(|e| format!("Failed to create {}: {}", GENESIS_FILE_NAME, e))?;
}

View File

@ -0,0 +1,8 @@
# chiado-teku-0
- "enr:-Ly4QLYLNqrjvSxD3lpAPBUNlxa6cIbe79JqLZLFcZZjWoCjZcw-85agLUErHiygG2weRSCLnd5V460qTbLbwJQsfZkoh2F0dG5ldHOI__________-EZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhKq7mu-Jc2VjcDI1NmsxoQP900YAYa9kdvzlSKGjVo-F3XVzATjOYp3BsjLjSophO4hzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA"
# chiado-teku-1
- "enr:-Ly4QCGeYvTCNOGKi0mKRUd45rLj96b4pH98qG7B9TCUGXGpHZALtaL2-XfjASQyhbCqENccI4PGXVqYTIehNT9KJMQgh2F0dG5ldHOI__________-EZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhIuQrVSJc2VjcDI1NmsxoQP9iDchx2PGl3JyJ29B9fhLCvVMN6n23pPAIIeFV-sHOIhzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA"
#GnosisDAO Bootnode: 3.71.132.231
- "enr:-Ly4QAtr21x5Ps7HYhdZkIBRBgcBkvlIfEel1YNjtFWf4cV3au2LgBGICz9PtEs9-p2HUl_eME8m1WImxTxSB3AkCMwBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhANHhOeJc2VjcDI1NmsxoQNLp1QPV8-pyMCohOtj6xGtSBM_GtVTqzlbvNsCF4ezkYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA"
#GnosisDAO Bootnode: 3.69.35.13
- "enr:-Ly4QLgn8Bx6faigkKUGZQvd1HDToV2FAxZIiENK-lczruzQb90qJK-4E65ADly0s4__dQOW7IkLMW7ZAyJy2vtiLy8Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhANFIw2Jc2VjcDI1NmsxoQMa-fWEy9UJHfOl_lix3wdY5qust78sHAqZnWwEiyqKgYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA"

View File

@ -0,0 +1,154 @@
# Extends the mainnet preset
PRESET_BASE: gnosis
# needs to exist because of Prysm. Otherwise it conflicts with mainnet genesis
CONFIG_NAME: chiado
# Genesis
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 6000
# 10 October 2022 10:00:00 GMT+0000
MIN_GENESIS_TIME: 1665396000
GENESIS_DELAY: 300
# Projected time: 2022-11-04T15:00:00.000Z, block: 680928
TERMINAL_TOTAL_DIFFICULTY: 231707791542740786049188744689299064356246512
# Deposit contract
# ---------------------------------------------------------------
# NOTE: Don't use a value too high, or Teku rejects it (4294906129 NOK)
DEPOSIT_CHAIN_ID: 10200
DEPOSIT_NETWORK_ID: 10200
DEPOSIT_CONTRACT_ADDRESS: 0xb97036A26259B7147018913bD58a774cf91acf25
# Misc
# ---------------------------------------------------------------
# 2**6 (= 64)
MAX_COMMITTEES_PER_SLOT: 64
# 2**7 (= 128)
TARGET_COMMITTEE_SIZE: 128
# 2**11 (= 2,048)
MAX_VALIDATORS_PER_COMMITTEE: 2048
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# 2**12 (= 4096)
CHURN_LIMIT_QUOTIENT: 4096
# See issue 563
SHUFFLE_ROUND_COUNT: 90
# 4
HYSTERESIS_QUOTIENT: 4
# 1 (minus 0.25)
HYSTERESIS_DOWNWARD_MULTIPLIER: 1
# 5 (plus 1.25)
HYSTERESIS_UPWARD_MULTIPLIER: 5
# Validator
# ---------------------------------------------------------------
# 2**10 (= 1024) ~1.4 hour
ETH1_FOLLOW_DISTANCE: 1024
# 2**4 (= 16)
TARGET_AGGREGATORS_PER_COMMITTEE: 16
# 2**0 (= 1)
RANDOM_SUBNETS_PER_VALIDATOR: 1
# 2**8 (= 256)
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256
# 6 (estimate from xDai mainnet)
SECONDS_PER_ETH1_BLOCK: 6
# Gwei values
# ---------------------------------------------------------------
# 2**0 * 10**9 (= 1,000,000,000) Gwei
MIN_DEPOSIT_AMOUNT: 1000000000
# 2**5 * 10**9 (= 32,000,000,000) Gwei
MAX_EFFECTIVE_BALANCE: 32000000000
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**0 * 10**9 (= 1,000,000,000) Gwei
EFFECTIVE_BALANCE_INCREMENT: 1000000000
# Initial values
# ---------------------------------------------------------------
# GBC area code
GENESIS_FORK_VERSION: 0x0000006f
BLS_WITHDRAWAL_PREFIX: 0x00
# Time parameters
# ---------------------------------------------------------------
# 5 seconds
SECONDS_PER_SLOT: 5
# 2**0 (= 1) slots 12 seconds
MIN_ATTESTATION_INCLUSION_DELAY: 1
# 2**4 (= 16) slots 1.87 minutes
SLOTS_PER_EPOCH: 16
# 2**0 (= 1) epochs 1.87 minutes
MIN_SEED_LOOKAHEAD: 1
# 2**2 (= 4) epochs 7.47 minutes
MAX_SEED_LOOKAHEAD: 4
# 2**6 (= 64) epochs ~2 hours
EPOCHS_PER_ETH1_VOTING_PERIOD: 64
# 2**13 (= 8,192) slots ~15.9 hours
SLOTS_PER_HISTORICAL_ROOT: 8192
# 2**8 (= 256) epochs ~8 hours
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# 2**8 (= 256) epochs ~8 hours
SHARD_COMMITTEE_PERIOD: 256
# 2**2 (= 4) epochs 7.47 minutes
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
# State vector lengths
# ---------------------------------------------------------------
# 2**16 (= 65,536) epochs ~85 days
EPOCHS_PER_HISTORICAL_VECTOR: 65536
# 2**13 (= 8,192) epochs ~10.6 days
EPOCHS_PER_SLASHINGS_VECTOR: 8192
# 2**24 (= 16,777,216) historical roots, ~15,243 years
HISTORICAL_ROOTS_LIMIT: 16777216
# 2**40 (= 1,099,511,627,776) validator spots
VALIDATOR_REGISTRY_LIMIT: 1099511627776
# Reward and penalty quotients
# ---------------------------------------------------------------
# 25
BASE_REWARD_FACTOR: 25
# 2**9 (= 512)
WHISTLEBLOWER_REWARD_QUOTIENT: 512
# 2**3 (= 8)
PROPOSER_REWARD_QUOTIENT: 8
# 2**26 (= 67,108,864)
INACTIVITY_PENALTY_QUOTIENT: 67108864
# 2**7 (= 128) (lower safety margin at Phase 0 genesis)
MIN_SLASHING_PENALTY_QUOTIENT: 128
# 1 (lower safety margin at Phase 0 genesis)
PROPORTIONAL_SLASHING_MULTIPLIER: 1
# Max operations per block
# ---------------------------------------------------------------
# 2**4 (= 16)
MAX_PROPOSER_SLASHINGS: 16
# 2**1 (= 2)
MAX_ATTESTER_SLASHINGS: 2
# 2**7 (= 128)
MAX_ATTESTATIONS: 128
# 2**4 (= 16)
MAX_DEPOSITS: 16
# 2**4 (= 16)
MAX_VOLUNTARY_EXITS: 16
# Signature domains
# ---------------------------------------------------------------
DOMAIN_BEACON_PROPOSER: 0x00000000
DOMAIN_BEACON_ATTESTER: 0x01000000
DOMAIN_RANDAO: 0x02000000
DOMAIN_DEPOSIT: 0x03000000
DOMAIN_VOLUNTARY_EXIT: 0x04000000
DOMAIN_SELECTION_PROOF: 0x05000000
DOMAIN_AGGREGATE_AND_PROOF: 0x06000000
DOMAIN_SYNC_COMMITTEE: 0x07000000
DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF: 0x08000000
DOMAIN_CONTRIBUTION_AND_PROOF: 0x09000000
# Altair
ALTAIR_FORK_VERSION: 0x0100006f
ALTAIR_FORK_EPOCH: 90 # Mon Oct 10 2022 12:00:00 GMT+0000
# Bellatrix
BELLATRIX_FORK_VERSION: 0x0200006f
BELLATRIX_FORK_EPOCH: 180 # Mon Oct 10 2022 14:00:00 GMT+0000
# Capella
CAPELLA_FORK_VERSION: 0x0300006f
CAPELLA_FORK_EPOCH: 244224 # Wed May 24 2023 13:12:00 GMT+0000
INACTIVITY_SCORE_BIAS: 4
# 2**4 (= 16)
INACTIVITY_SCORE_RECOVERY_RATE: 16

View File

@ -0,0 +1,8 @@
# EF
- enr:-Iq4QJk4WqRkjsX5c2CXtOra6HnxN-BMXnWhmhEQO9Bn9iABTJGdjUOurM7Btj1ouKaFkvTRoju5vz2GPmVON2dffQKGAX53x8JigmlkgnY0gmlwhLKAlv6Jc2VjcDI1NmsxoQK6S-Cii_KmfFdUJL2TANL3ksaKUnNXvTCv1tLwXs0QgIN1ZHCCIyk
- enr:-KG4QF6d6vMSboSujAXTI4vYqArccm0eIlXfcxf2Lx_VE1q6IkQo_2D5LAO3ZSBVUs0w5rrVDmABJZuMzISe_pZundADhGV0aDKQqX6DZjABcAAAAQAAAAAAAIJpZIJ2NIJpcISygIjpiXNlY3AyNTZrMaEDF3aSa7QSCvdqLpANNd8GML4PLEZVg45fKQwMWhDZjd2DdGNwgiMog3VkcIIjKA
- enr:-Ly4QJLXSSAj3ggPBIcodvBU6IyfpU_yW7E9J-5syoJorBuvcYj_Fokcjr303bQoTdWXADf8po0ssh75Mr5wVGzZZsMBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCpfoNmMAFwAAABAAAAAAAAgmlkgnY0gmlwhJK-DYCJc2VjcDI1NmsxoQJrIlXIQDvQ6t9yDySqJYDXgZgLXzTvq8W7OI51jfmxJohzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA
# Teku
- enr:-LK4QMlzEff6d-M0A1pSFG5lJ2c56i_I-ZftdojZbW3ehkGNM4pkQuHQqzVvF1BG9aDjIakjnmO23mCBFFZ2w5zOsugEh2F0dG5ldHOIAAAAAAYAAACEZXRoMpCpfoNmMAFwAAABAAAAAAAAgmlkgnY0gmlwhKyuI_mJc2VjcDI1NmsxoQIH1kQRCZW-4AIVyAeXj5o49m_IqNFKRHp6tSpfXMUrSYN0Y3CCIyiDdWRwgiMo
# Sigma Prime
- enr:-Le4QI88slOwzz66Ksq8Vnz324DPb1BzSiY-WYPvnoJIl-lceW9bmSJnwDzgNbCjp5wsBigg76x4tValvGgQPxxSjrMBhGV0aDKQqX6DZjABcAAAAQAAAAAAAIJpZIJ2NIJpcIQ5gR6Wg2lwNpAgAUHQBwEQAAAAAAAAADR-iXNlY3AyNTZrMaEDPMSNdcL92uNIyCsS177Z6KTXlbZakQqxv3aQcWawNXeDdWRwgiMohHVkcDaCI4I

View File

@ -0,0 +1,117 @@
# Extends the mainnet preset
PRESET_BASE: 'mainnet'
CONFIG_NAME: holesky
# Genesis
# ---------------------------------------------------------------
# `2**14` (= 16,384)
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384
# Sep-15-2023 13:55:00 +UTC
MIN_GENESIS_TIME: 1694786100
GENESIS_FORK_VERSION: 0x00017000
# Genesis delay 5 mins
GENESIS_DELAY: 300
# Forking
# ---------------------------------------------------------------
# Some forks are disabled for now:
# - These may be re-assigned to another fork-version later
# - Temporarily set to max uint64 value: 2**64 - 1
# Altair
ALTAIR_FORK_VERSION: 0x10017000
ALTAIR_FORK_EPOCH: 0
# Merge
BELLATRIX_FORK_VERSION: 0x20017000
BELLATRIX_FORK_EPOCH: 0
TERMINAL_TOTAL_DIFFICULTY: 0
TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000
TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615
# Capella
CAPELLA_FORK_VERSION: 0x30017000
CAPELLA_FORK_EPOCH: 256
# DENEB
DENEB_FORK_VERSION: 0x40017000
DENEB_FORK_EPOCH: 18446744073709551615
# Time parameters
# ---------------------------------------------------------------
# 12 seconds
SECONDS_PER_SLOT: 12
# 14 (estimate from Eth1 mainnet)
SECONDS_PER_ETH1_BLOCK: 14
# 2**8 (= 256) epochs ~27 hours
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# 2**8 (= 256) epochs ~27 hours
SHARD_COMMITTEE_PERIOD: 256
# 2**11 (= 2,048) Eth1 blocks ~8 hours
ETH1_FOLLOW_DISTANCE: 2048
# Validator cycle
# ---------------------------------------------------------------
# 2**2 (= 4)
INACTIVITY_SCORE_BIAS: 4
# 2**4 (= 16)
INACTIVITY_SCORE_RECOVERY_RATE: 16
# 28,000,000,000 Gwei to ensure quicker ejection
EJECTION_BALANCE: 28000000000
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# 2**16 (= 65,536)
CHURN_LIMIT_QUOTIENT: 65536
# Fork choice
# ---------------------------------------------------------------
# 40%
PROPOSER_SCORE_BOOST: 40
# Deposit contract
# ---------------------------------------------------------------
DEPOSIT_CHAIN_ID: 17000
DEPOSIT_NETWORK_ID: 17000
DEPOSIT_CONTRACT_ADDRESS: 0x4242424242424242424242424242424242424242
# Networking
# ---------------------------------------------------------------
# `10 * 2**20` (= 10485760, 10 MiB)
GOSSIP_MAX_SIZE: 10485760
# `2**10` (= 1024)
MAX_REQUEST_BLOCKS: 1024
# `2**8` (= 256)
EPOCHS_PER_SUBNET_SUBSCRIPTION: 256
# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months)
MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024
# `10 * 2**20` (=10485760, 10 MiB)
MAX_CHUNK_SIZE: 10485760
# 5s
TTFB_TIMEOUT: 5
# 10s
RESP_TIMEOUT: 10
ATTESTATION_PROPAGATION_SLOT_RANGE: 32
# 500ms
MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500
MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000
MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000
# 2 subnets per node
SUBNETS_PER_NODE: 2
# 2**8 (= 64)
ATTESTATION_SUBNET_COUNT: 64
ATTESTATION_SUBNET_EXTRA_BITS: 0
# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS
ATTESTATION_SUBNET_PREFIX_BITS: 6
# Deneb
# `2**7` (=128)
MAX_REQUEST_BLOCKS_DENEB: 128
# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK
MAX_REQUEST_BLOB_SIDECARS: 768
# `2**12` (= 4096 epochs, ~18 days)
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096
# `6`
BLOB_SIDECAR_SUBNET_COUNT: 6
# `uint64(6)`
MAX_BLOBS_PER_BLOCK: 6

View File

@ -14,11 +14,20 @@
use discv5::enr::{CombinedKey, Enr};
use eth2_config::{instantiate_hardcoded_nets, HardcodedNet};
use kzg::{KzgPreset, KzgPresetId, TrustedSetup};
use pretty_reqwest_error::PrettyReqwestError;
use reqwest::blocking::Client;
use sensitive_url::SensitiveUrl;
use sha2::{Digest, Sha256};
use slog::{info, warn, Logger};
use std::fs::{create_dir_all, File};
use std::io::{Read, Write};
use std::path::PathBuf;
use std::str::FromStr;
use types::{BeaconState, ChainSpec, Config, Epoch, EthSpec, EthSpecId};
use std::time::Duration;
use types::{BeaconState, ChainSpec, Config, Epoch, EthSpec, EthSpecId, Hash256};
use url::Url;
pub use eth2_config::GenesisStateSource;
pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt";
pub const BOOT_ENR_FILE: &str = "boot_enr.yaml";
@ -70,6 +79,35 @@ fn get_trusted_setup_from_config(config: &Config) -> Result<Option<TrustedSetup>
.transpose()
}
/// A simple slice-or-vec enum to avoid cloning the beacon state bytes in the
/// binary whilst also supporting loading them from a file at runtime.
#[derive(Clone, PartialEq, Debug)]
pub enum GenesisStateBytes {
Slice(&'static [u8]),
Vec(Vec<u8>),
}
impl AsRef<[u8]> for GenesisStateBytes {
fn as_ref(&self) -> &[u8] {
match self {
GenesisStateBytes::Slice(slice) => slice,
GenesisStateBytes::Vec(vec) => vec.as_ref(),
}
}
}
impl From<&'static [u8]> for GenesisStateBytes {
fn from(slice: &'static [u8]) -> Self {
GenesisStateBytes::Slice(slice)
}
}
impl From<Vec<u8>> for GenesisStateBytes {
fn from(vec: Vec<u8>) -> Self {
GenesisStateBytes::Vec(vec)
}
}
/// Specifies an Eth2 network.
///
/// See the crate-level documentation for more details.
@ -79,7 +117,8 @@ pub struct Eth2NetworkConfig {
/// value to be the block number where the first deposit occurs.
pub deposit_contract_deploy_block: u64,
pub boot_enr: Option<Vec<Enr<CombinedKey>>>,
pub genesis_state_bytes: Option<Vec<u8>>,
pub genesis_state_source: GenesisStateSource,
pub genesis_state_bytes: Option<GenesisStateBytes>,
pub config: Config,
pub kzg_trusted_setup: Option<TrustedSetup>,
}
@ -107,8 +146,10 @@ impl Eth2NetworkConfig {
serde_yaml::from_reader(net.boot_enr)
.map_err(|e| format!("Unable to parse boot enr: {:?}", e))?,
),
genesis_state_bytes: Some(net.genesis_state_bytes.to_vec())
.filter(|bytes| !bytes.is_empty()),
genesis_state_source: net.genesis_state_source,
genesis_state_bytes: Some(net.genesis_state_bytes)
.filter(|bytes| !bytes.is_empty())
.map(Into::into),
config,
kzg_trusted_setup,
})
@ -123,8 +164,37 @@ impl Eth2NetworkConfig {
}
/// Returns `true` if this configuration contains a `BeaconState`.
pub fn beacon_state_is_known(&self) -> bool {
self.genesis_state_bytes.is_some()
pub fn genesis_state_is_known(&self) -> bool {
self.genesis_state_source != GenesisStateSource::Unknown
}
/// The `genesis_validators_root` of the genesis state. May download the
/// genesis state if the value is not already available.
pub fn genesis_validators_root<E: EthSpec>(
&self,
genesis_state_url: Option<&str>,
timeout: Duration,
log: &Logger,
) -> Result<Option<Hash256>, String> {
if let GenesisStateSource::Url {
genesis_validators_root,
..
} = self.genesis_state_source
{
Hash256::from_str(genesis_validators_root)
.map(Option::Some)
.map_err(|e| {
format!(
"Unable to parse genesis state genesis_validators_root: {:?}",
e
)
})
} else {
self.genesis_state::<E>(genesis_state_url, timeout, log)?
.map(|state| state.genesis_validators_root())
.map(Result::Ok)
.transpose()
}
}
/// Construct a consolidated `ChainSpec` from the YAML config.
@ -138,15 +208,65 @@ impl Eth2NetworkConfig {
}
/// Attempts to deserialize `self.beacon_state`, returning an error if it's missing or invalid.
pub fn beacon_state<E: EthSpec>(&self) -> Result<BeaconState<E>, String> {
///
/// If the genesis state is configured to be downloaded from a URL, then the
/// `genesis_state_url` will override the built-in list of download URLs.
pub fn genesis_state<E: EthSpec>(
&self,
genesis_state_url: Option<&str>,
timeout: Duration,
log: &Logger,
) -> Result<Option<BeaconState<E>>, String> {
let spec = self.chain_spec::<E>()?;
let genesis_state_bytes = self
.genesis_state_bytes
.as_ref()
.ok_or("Genesis state is unknown")?;
match &self.genesis_state_source {
GenesisStateSource::Unknown => Ok(None),
GenesisStateSource::IncludedBytes => {
let state = self
.genesis_state_bytes
.as_ref()
.map(|bytes| {
BeaconState::from_ssz_bytes(bytes.as_ref(), &spec).map_err(|e| {
format!("Built-in genesis state SSZ bytes are invalid: {:?}", e)
})
})
.ok_or("Genesis state bytes missing from Eth2NetworkConfig")??;
Ok(Some(state))
}
GenesisStateSource::Url {
urls: built_in_urls,
checksum,
genesis_validators_root,
} => {
let checksum = Hash256::from_str(checksum).map_err(|e| {
format!("Unable to parse genesis state bytes checksum: {:?}", e)
})?;
let bytes = if let Some(specified_url) = genesis_state_url {
download_genesis_state(&[specified_url], timeout, checksum, log)
} else {
download_genesis_state(built_in_urls, timeout, checksum, log)
}?;
let state = BeaconState::from_ssz_bytes(bytes.as_ref(), &spec).map_err(|e| {
format!("Downloaded genesis state SSZ bytes are invalid: {:?}", e)
})?;
BeaconState::from_ssz_bytes(genesis_state_bytes, &spec)
.map_err(|e| format!("Genesis state SSZ bytes are invalid: {:?}", e))
let genesis_validators_root =
Hash256::from_str(genesis_validators_root).map_err(|e| {
format!(
"Unable to parse genesis state genesis_validators_root: {:?}",
e
)
})?;
if state.genesis_validators_root() != genesis_validators_root {
return Err(format!(
"Downloaded genesis validators root {:?} does not match expected {:?}",
state.genesis_validators_root(),
genesis_validators_root
));
}
Ok(Some(state))
}
}
}
/// Write the files to the directory.
@ -204,7 +324,7 @@ impl Eth2NetworkConfig {
File::create(&file)
.map_err(|e| format!("Unable to create {:?}: {:?}", file, e))
.and_then(|mut file| {
file.write_all(genesis_state_bytes)
file.write_all(genesis_state_bytes.as_ref())
.map_err(|e| format!("Unable to write {:?}: {:?}", file, e))
})?;
}
@ -240,7 +360,7 @@ impl Eth2NetworkConfig {
// The genesis state is a special case because it uses SSZ, not YAML.
let genesis_file_path = base_dir.join(GENESIS_STATE_FILE);
let genesis_state_bytes = if genesis_file_path.exists() {
let (genesis_state_bytes, genesis_state_source) = if genesis_file_path.exists() {
let mut bytes = vec![];
File::open(&genesis_file_path)
.map_err(|e| format!("Unable to open {:?}: {:?}", genesis_file_path, e))
@ -249,9 +369,15 @@ impl Eth2NetworkConfig {
.map_err(|e| format!("Unable to read {:?}: {:?}", file, e))
})?;
Some(bytes).filter(|bytes| !bytes.is_empty())
let state = Some(bytes).filter(|bytes| !bytes.is_empty());
let genesis_state_source = if state.is_some() {
GenesisStateSource::IncludedBytes
} else {
GenesisStateSource::Unknown
};
(state, genesis_state_source)
} else {
None
(None, GenesisStateSource::Unknown)
};
let kzg_trusted_setup = get_trusted_setup_from_config(&config)?;
@ -259,13 +385,92 @@ impl Eth2NetworkConfig {
Ok(Self {
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes,
genesis_state_source,
genesis_state_bytes: genesis_state_bytes.map(Into::into),
config,
kzg_trusted_setup,
})
}
}
/// Try to download a genesis state from each of the `urls` in the order they
/// are defined. Return `Ok` if any url returns a response that matches the
/// given `checksum`.
fn download_genesis_state(
urls: &[&str],
timeout: Duration,
checksum: Hash256,
log: &Logger,
) -> Result<Vec<u8>, String> {
if urls.is_empty() {
return Err(
"The genesis state is not present in the binary and there are no known download URLs. \
Please use --checkpoint-sync-url or --genesis-state-url."
.to_string(),
);
}
let mut errors = vec![];
for url in urls {
// URLs are always expected to be the base URL of a server that supports
// the beacon-API.
let url = parse_state_download_url(url)?;
let redacted_url = SensitiveUrl::new(url.clone())
.map(|url| url.to_string())
.unwrap_or_else(|_| "<REDACTED>".to_string());
info!(
log,
"Downloading genesis state";
"server" => &redacted_url,
"timeout" => ?timeout,
"info" => "this may take some time on testnets with large validator counts"
);
let client = Client::new();
let response = client
.get(url)
.header("Accept", "application/octet-stream")
.timeout(timeout)
.send()
.and_then(|r| r.error_for_status().and_then(|r| r.bytes()));
match response {
Ok(bytes) => {
// Check the server response against our local checksum.
if Sha256::digest(bytes.as_ref())[..] == checksum[..] {
return Ok(bytes.into());
} else {
warn!(
log,
"Genesis state download failed";
"server" => &redacted_url,
"timeout" => ?timeout,
);
errors.push(format!(
"Response from {} did not match local checksum",
redacted_url
))
}
}
Err(e) => errors.push(PrettyReqwestError::from(e).to_string()),
}
}
Err(format!(
"Unable to download a genesis state from {} source(s): {}",
errors.len(),
errors.join(",")
))
}
/// Parses the `url` and joins the necessary state download path.
fn parse_state_download_url(url: &str) -> Result<Url, String> {
Url::parse(url)
.map_err(|e| format!("Invalid genesis state URL: {:?}", e))?
.join("eth/v2/debug/beacon/states/genesis")
.map_err(|e| format!("Failed to append genesis state path to URL: {:?}", e))
}
#[cfg(test)]
mod tests {
use super::*;
@ -305,7 +510,9 @@ mod tests {
#[test]
fn mainnet_genesis_state() {
let config = Eth2NetworkConfig::from_hardcoded_net(&MAINNET).unwrap();
config.beacon_state::<E>().expect("beacon state can decode");
config
.genesis_state::<E>(None, Duration::from_secs(1), &logging::test_logger())
.expect("beacon state can decode");
}
#[test]
@ -319,10 +526,10 @@ mod tests {
fn hard_coded_nets_work() {
for net in HARDCODED_NETS {
let config = Eth2NetworkConfig::from_hardcoded_net(net)
.unwrap_or_else(|_| panic!("{:?}", net.name));
.unwrap_or_else(|e| panic!("{:?}: {:?}", net.name, e));
// Ensure we can parse the YAML config to a chain spec.
if net.name == types::GNOSIS {
if config.config.preset_base == types::GNOSIS {
config.chain_spec::<GnosisEthSpec>().unwrap();
} else {
config.chain_spec::<MainnetEthSpec>().unwrap();
@ -330,10 +537,25 @@ mod tests {
assert_eq!(
config.genesis_state_bytes.is_some(),
net.genesis_is_known,
net.genesis_state_source == GenesisStateSource::IncludedBytes,
"{:?}",
net.name
);
if let GenesisStateSource::Url {
urls,
checksum,
genesis_validators_root,
} = net.genesis_state_source
{
Hash256::from_str(checksum).expect("the checksum must be a valid 32-byte value");
Hash256::from_str(genesis_validators_root)
.expect("the GVR must be a valid 32-byte value");
for url in urls {
parse_state_download_url(url).expect("url must be valid");
}
}
assert_eq!(config.config.config_name, Some(net.config_dir.to_string()));
}
}
@ -369,10 +591,20 @@ mod tests {
let base_dir = temp_dir.path().join("my_testnet");
let deposit_contract_deploy_block = 42;
let genesis_state_source = if genesis_state.is_some() {
GenesisStateSource::IncludedBytes
} else {
GenesisStateSource::Unknown
};
let testnet = Eth2NetworkConfig {
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes),
genesis_state_source,
genesis_state_bytes: genesis_state
.as_ref()
.map(Encode::as_ssz_bytes)
.map(Into::into),
config,
kzg_trusted_setup: None,
};

View File

@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!(
// NOTE: using --match instead of --exclude for compatibility with old Git
"--match=thiswillnevermatchlol"
],
prefix = "Lighthouse/v4.3.0-",
fallback = "Lighthouse/v4.3.0"
prefix = "Lighthouse/v4.4.1-",
fallback = "Lighthouse/v4.4.1"
);
/// Returns `VERSION`, but with platform information appended to the end.

View File

@ -17,6 +17,6 @@ sloggers = { version = "2.1.1", features = ["json"] }
slog-async = "2.7.0"
take_mut = "0.2.2"
parking_lot = "0.12.1"
serde = "1.0.153"
serde = "1.0.153"
serde_json = "1.0.94"
chrono = "0.4.23"
chrono = { version = "0.4", default-features = false, features = ["clock", "std"] }

View File

@ -55,6 +55,12 @@ impl fmt::Debug for PrettyReqwestError {
}
}
impl fmt::Display for PrettyReqwestError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl From<reqwest::Error> for PrettyReqwestError {
fn from(inner: reqwest::Error) -> Self {
Self(inner)

View File

@ -10,10 +10,14 @@ pub fn set_builder_origins(
default_origin: (IpAddr, u16),
) -> Result<Builder, String> {
if let Some(allow_origin) = allow_origin {
let origins = allow_origin
.split(',')
.map(|s| verify_cors_origin_str(s).map(|_| s))
.collect::<Result<Vec<_>, _>>()?;
let mut origins = vec![];
for origin in allow_origin.split(',') {
verify_cors_origin_str(origin)?;
if origin == "*" {
return Ok(builder.allow_any_origin());
}
origins.push(origin)
}
Ok(builder.allow_origins(origins))
} else {
let origin = match default_origin.0 {

View File

@ -87,7 +87,7 @@ pub fn scrape_process_health_metrics() {
// This will silently fail if we are unable to observe the health. This is desired behaviour
// since we don't support `Health` for all platforms.
if let Ok(health) = ProcessHealth::observe() {
set_gauge(&PROCESS_NUM_THREADS, health.pid_num_threads as i64);
set_gauge(&PROCESS_NUM_THREADS, health.pid_num_threads);
set_gauge(&PROCESS_RES_MEM, health.pid_mem_resident_set_size as i64);
set_gauge(&PROCESS_VIRT_MEM, health.pid_mem_virtual_memory_size as i64);
set_gauge(&PROCESS_SECONDS, health.pid_process_seconds_total as i64);

View File

@ -1,6 +1,5 @@
use crate::test_utils::TestRandom;
use crate::{Blob, ChainSpec, Domain, EthSpec, Fork, Hash256, SignedBlobSidecar, SignedRoot, Slot};
use bls::SecretKey;
use crate::{Blob, EthSpec, Hash256, SignedRoot, Slot};
use derivative::Derivative;
use kzg::{Kzg, KzgCommitment, KzgPreset, KzgProof};
use rand::Rng;
@ -10,7 +9,6 @@ use ssz_derive::{Decode, Encode};
use ssz_types::{FixedVector, VariableList};
use std::fmt::Debug;
use std::hash::Hash;
use std::marker::PhantomData;
use std::sync::Arc;
use test_random_derive::TestRandom;
use tree_hash::TreeHash;
@ -171,31 +169,6 @@ impl<T: EthSpec> BlobSidecar<T> {
// Fixed part
Self::empty().as_ssz_bytes().len()
}
// this is mostly not used except for in testing
pub fn sign(
self: Arc<Self>,
secret_key: &SecretKey,
fork: &Fork,
genesis_validators_root: Hash256,
spec: &ChainSpec,
) -> SignedBlobSidecar<T> {
let signing_epoch = self.slot.epoch(T::slots_per_epoch());
let domain = spec.get_domain(
signing_epoch,
Domain::BlobSidecar,
fork,
genesis_validators_root,
);
let message = self.signing_root(domain);
let signature = secret_key.sign(message);
SignedBlobSidecar {
message: self,
signature,
_phantom: PhantomData,
}
}
}
#[derive(

View File

@ -2,14 +2,17 @@ use crate::beacon_block_body::KzgCommitments;
use crate::test_utils::TestRandom;
use crate::{
AbstractExecPayload, BeaconBlock, BlindedBlobSidecar, BlindedBlobSidecarList, BlobRootsList,
BlobSidecar, BlobSidecarList, BlobsList, EthSpec, SidecarList, SignedRoot, Slot,
BlobSidecar, BlobSidecarList, BlobsList, ChainSpec, Domain, EthSpec, Fork, Hash256,
SidecarList, SignedRoot, SignedSidecar, Slot,
};
use bls::SecretKey;
use kzg::KzgProof;
use serde::de::DeserializeOwned;
use ssz::{Decode, Encode};
use ssz_types::VariableList;
use std::fmt::Debug;
use std::hash::Hash;
use std::marker::PhantomData;
use std::sync::Arc;
use tree_hash::TreeHash;
@ -29,13 +32,40 @@ pub trait Sidecar<E: EthSpec>:
+ for<'a> arbitrary::Arbitrary<'a>
{
type BlobItems: BlobItems<E>;
fn slot(&self) -> Slot;
fn build_sidecar<Payload: AbstractExecPayload<E>>(
blob_items: Self::BlobItems,
block: &BeaconBlock<E, Payload>,
expected_kzg_commitments: &KzgCommitments<E>,
kzg_proofs: Vec<KzgProof>,
) -> Result<SidecarList<E, Self>, String>;
// this is mostly not used except for in testing
fn sign(
self: Arc<Self>,
secret_key: &SecretKey,
fork: &Fork,
genesis_validators_root: Hash256,
spec: &ChainSpec,
) -> SignedSidecar<E, Self> {
let signing_epoch = self.slot().epoch(E::slots_per_epoch());
let domain = spec.get_domain(
signing_epoch,
Domain::BlobSidecar,
fork,
genesis_validators_root,
);
let message = self.signing_root(domain);
let signature = secret_key.sign(message);
SignedSidecar {
message: self,
signature,
_phantom: PhantomData,
}
}
}
pub trait BlobItems<T: EthSpec>: Sync + Send + Sized {

View File

@ -111,3 +111,4 @@ pub type SignedBlindedBlobSidecar<T> = SignedSidecar<T, BlindedBlobSidecar>;
pub type SignedSidecarList<T, Sidecar> =
VariableList<SignedSidecar<T, Sidecar>, <T as EthSpec>::MaxBlobsPerBlock>;
pub type SignedBlobSidecarList<T> = SignedSidecarList<T, BlobSidecar<T>>;
pub type SignedBlindedBlobSidecarList<T> = SignedSidecarList<T, BlindedBlobSidecar>;

View File

@ -1,7 +1,7 @@
[package]
name = "lcli"
description = "Lighthouse CLI (modeled after zcli)"
version = "4.3.0"
version = "4.4.1"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2021"

View File

@ -49,7 +49,7 @@ pub fn run<T: EthSpec>(
.wait_for_genesis_state::<T>(ETH1_GENESIS_UPDATE_INTERVAL, spec)
.await
.map(move |genesis_state| {
eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes());
eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes().into());
eth2_network_config.force_write_to_file(testnet_dir)
})
.map_err(|e| format!("Failed to find genesis: {}", e))?;

View File

@ -42,7 +42,7 @@ pub fn run<T: EthSpec>(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(),
&spec,
)?;
eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes());
eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes().into());
eth2_network_config.force_write_to_file(testnet_dir)?;
Ok(())

View File

@ -1,7 +1,7 @@
use account_utils::eth2_keystore::keypair_from_secret;
use clap::ArgMatches;
use clap_utils::{parse_optional, parse_required, parse_ssz_optional};
use eth2_network_config::{get_trusted_setup, Eth2NetworkConfig};
use eth2_network_config::{get_trusted_setup, Eth2NetworkConfig, GenesisStateSource};
use eth2_wallet::bip39::Seed;
use eth2_wallet::bip39::{Language, Mnemonic};
use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType};
@ -212,7 +212,8 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul
let testnet = Eth2NetworkConfig {
deposit_contract_deploy_block,
boot_enr: Some(vec![]),
genesis_state_bytes,
genesis_state_bytes: genesis_state_bytes.map(Into::into),
genesis_state_source: GenesisStateSource::IncludedBytes,
config: Config::from_chain_spec::<T>(&spec),
kzg_trusted_setup,
};

View File

@ -1,6 +1,6 @@
[package]
name = "lighthouse"
version = "4.3.0"
version = "4.4.1"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2021"
autotests = false

View File

@ -324,6 +324,30 @@ fn main() {
.takes_value(true)
.global(true)
)
.arg(
Arg::with_name("genesis-state-url")
.long("genesis-state-url")
.value_name("URL")
.help(
"A URL of a beacon-API compatible server from which to download the genesis state. \
Checkpoint sync server URLs can generally be used with this flag. \
If not supplied, a default URL or the --checkpoint-sync-url may be used. \
If the genesis state is already included in this binary then this value will be ignored.",
)
.takes_value(true)
.global(true),
)
.arg(
Arg::with_name("genesis-state-url-timeout")
.long("genesis-state-url-timeout")
.value_name("SECONDS")
.help(
"The timeout in seconds for the request to --genesis-state-url.",
)
.takes_value(true)
.default_value("180")
.global(true),
)
.subcommand(beacon_node::cli_app())
.subcommand(boot_node::cli_app())
.subcommand(validator_client::cli_app())

View File

@ -11,6 +11,7 @@ use lighthouse_network::PeerId;
use std::fs::File;
use std::io::{Read, Write};
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use std::str::FromStr;
@ -1492,15 +1493,20 @@ fn disable_inbound_rate_limiter_flag() {
#[test]
fn http_allow_origin_flag() {
CommandLineTest::new()
.flag("http-allow-origin", Some("127.0.0.99"))
.flag("http", None)
.flag("http-allow-origin", Some("http://127.0.0.99"))
.run_with_zero_port()
.with_config(|config| {
assert_eq!(config.http_api.allow_origin, Some("127.0.0.99".to_string()));
assert_eq!(
config.http_api.allow_origin,
Some("http://127.0.0.99".to_string())
);
});
}
#[test]
fn http_allow_origin_all_flag() {
CommandLineTest::new()
.flag("http", None)
.flag("http-allow-origin", Some("*"))
.run_with_zero_port()
.with_config(|config| assert_eq!(config.http_api.allow_origin, Some("*".to_string())));
@ -1508,6 +1514,7 @@ fn http_allow_origin_all_flag() {
#[test]
fn http_allow_sync_stalled_flag() {
CommandLineTest::new()
.flag("http", None)
.flag("http-allow-sync-stalled", None)
.run_with_zero_port()
.with_config(|config| assert_eq!(config.http_api.allow_sync_stalled, true));
@ -1515,32 +1522,29 @@ fn http_allow_sync_stalled_flag() {
#[test]
fn http_enable_beacon_processor() {
CommandLineTest::new()
.flag("http", None)
.run_with_zero_port()
.with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true));
CommandLineTest::new()
.flag("http", None)
.flag("http-enable-beacon-processor", Some("true"))
.run_with_zero_port()
.with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true));
CommandLineTest::new()
.flag("http", None)
.flag("http-enable-beacon-processor", Some("false"))
.run_with_zero_port()
.with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, false));
}
#[test]
fn http_tls_flags() {
let dir = TempDir::new().expect("Unable to create temporary directory");
CommandLineTest::new()
.flag("http", None)
.flag("http-enable-tls", None)
.flag(
"http-tls-cert",
dir.path().join("certificate.crt").as_os_str().to_str(),
)
.flag(
"http-tls-key",
dir.path().join("private.key").as_os_str().to_str(),
)
.flag("http-tls-cert", Some("tests/tls/cert.pem"))
.flag("http-tls-key", Some("tests/tls/key.rsa"))
.run_with_zero_port()
.with_config(|config| {
let tls_config = config
@ -1548,14 +1552,15 @@ fn http_tls_flags() {
.tls_config
.as_ref()
.expect("tls_config was empty.");
assert_eq!(tls_config.cert, dir.path().join("certificate.crt"));
assert_eq!(tls_config.key, dir.path().join("private.key"));
assert_eq!(tls_config.cert, Path::new("tests/tls/cert.pem"));
assert_eq!(tls_config.key, Path::new("tests/tls/key.rsa"));
});
}
#[test]
fn http_spec_fork_default() {
CommandLineTest::new()
.flag("http", None)
.run_with_zero_port()
.with_config(|config| assert_eq!(config.http_api.spec_fork_name, None));
}
@ -1563,6 +1568,7 @@ fn http_spec_fork_default() {
#[test]
fn http_spec_fork_override() {
CommandLineTest::new()
.flag("http", None)
.flag("http-spec-fork", Some("altair"))
.run_with_zero_port()
.with_config(|config| assert_eq!(config.http_api.spec_fork_name, Some(ForkName::Altair)));
@ -2452,3 +2458,47 @@ fn http_sse_capacity_multiplier_override() {
.run_with_zero_port()
.with_config(|config| assert_eq!(config.http_api.sse_capacity_multiplier, 10));
}
#[test]
fn http_duplicate_block_status_default() {
CommandLineTest::new()
.run_with_zero_port()
.with_config(|config| {
assert_eq!(config.http_api.duplicate_block_status_code.as_u16(), 202)
});
}
#[test]
fn http_duplicate_block_status_override() {
CommandLineTest::new()
.flag("http-duplicate-block-status", Some("301"))
.run_with_zero_port()
.with_config(|config| {
assert_eq!(config.http_api.duplicate_block_status_code.as_u16(), 301)
});
}
#[test]
fn genesis_state_url_default() {
CommandLineTest::new()
.run_with_zero_port()
.with_config(|config| {
assert_eq!(config.genesis_state_url, None);
assert_eq!(config.genesis_state_url_timeout, Duration::from_secs(180));
});
}
#[test]
fn genesis_state_url_value() {
CommandLineTest::new()
.flag("genesis-state-url", Some("http://genesis.com"))
.flag("genesis-state-url-timeout", Some("42"))
.run_with_zero_port()
.with_config(|config| {
assert_eq!(
config.genesis_state_url.as_deref(),
Some("http://genesis.com")
);
assert_eq!(config.genesis_state_url_timeout, Duration::from_secs(42));
});
}

View File

@ -0,0 +1,24 @@
-----BEGIN CERTIFICATE-----
MIIEADCCAmigAwIBAgICAcgwDQYJKoZIhvcNAQELBQAwLDEqMCgGA1UEAwwhcG9u
eXRvd24gUlNBIGxldmVsIDIgaW50ZXJtZWRpYXRlMB4XDTE2MDgxMzE2MDcwNFoX
DTIyMDIwMzE2MDcwNFowGTEXMBUGA1UEAwwOdGVzdHNlcnZlci5jb20wggEiMA0G
CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpVhh1/FNP2qvWenbZSghari/UThwe
dynfnHG7gc3JmygkEdErWBO/CHzHgsx7biVE5b8sZYNEDKFojyoPHGWK2bQM/FTy
niJCgNCLdn6hUqqxLAml3cxGW77hAWu94THDGB1qFe+eFiAUnDmob8gNZtAzT6Ky
b/JGJdrEU0wj+Rd7wUb4kpLInNH/Jc+oz2ii2AjNbGOZXnRz7h7Kv3sO9vABByYe
LcCj3qnhejHMqVhbAT1MD6zQ2+YKBjE52MsQKU/xhUpu9KkUyLh0cxkh3zrFiKh4
Vuvtc+n7aeOv2jJmOl1dr0XLlSHBlmoKqH6dCTSbddQLmlK7dms8vE01AgMBAAGj
gb4wgbswDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBsAwHQYDVR0OBBYEFMeUzGYV
bXwJNQVbY1+A8YXYZY8pMEIGA1UdIwQ7MDmAFJvEsUi7+D8vp8xcWvnEdVBGkpoW
oR6kHDAaMRgwFgYDVQQDDA9wb255dG93biBSU0EgQ0GCAXswOwYDVR0RBDQwMoIO
dGVzdHNlcnZlci5jb22CFXNlY29uZC50ZXN0c2VydmVyLmNvbYIJbG9jYWxob3N0
MA0GCSqGSIb3DQEBCwUAA4IBgQBsk5ivAaRAcNgjc7LEiWXFkMg703AqDDNx7kB1
RDgLalLvrjOfOp2jsDfST7N1tKLBSQ9bMw9X4Jve+j7XXRUthcwuoYTeeo+Cy0/T
1Q78ctoX74E2nB958zwmtRykGrgE/6JAJDwGcgpY9kBPycGxTlCN926uGxHsDwVs
98cL6ZXptMLTR6T2XP36dAJZuOICSqmCSbFR8knc/gjUO36rXTxhwci8iDbmEVaf
BHpgBXGU5+SQ+QM++v6bHGf4LNQC5NZ4e4xvGax8ioYu/BRsB/T3Lx+RlItz4zdU
XuxCNcm3nhQV2ZHquRdbSdoyIxV5kJXel4wCmOhWIq7A2OBKdu5fQzIAzzLi65EN
RPAKsKB4h7hGgvciZQ7dsMrlGw0DLdJ6UrFyiR5Io7dXYT/+JP91lP5xsl6Lhg9O
FgALt7GSYRm2cZdgi9pO9rRr83Br1VjQT1vHz6yoZMXSqc4A2zcN2a2ZVq//rHvc
FZygs8miAhWPzqnpmgTj1cPiU1M=
-----END CERTIFICATE-----

View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAqVYYdfxTT9qr1np22UoIWq4v1E4cHncp35xxu4HNyZsoJBHR
K1gTvwh8x4LMe24lROW/LGWDRAyhaI8qDxxlitm0DPxU8p4iQoDQi3Z+oVKqsSwJ
pd3MRlu+4QFrveExwxgdahXvnhYgFJw5qG/IDWbQM0+ism/yRiXaxFNMI/kXe8FG
+JKSyJzR/yXPqM9ootgIzWxjmV50c+4eyr97DvbwAQcmHi3Ao96p4XoxzKlYWwE9
TA+s0NvmCgYxOdjLEClP8YVKbvSpFMi4dHMZId86xYioeFbr7XPp+2njr9oyZjpd
Xa9Fy5UhwZZqCqh+nQk0m3XUC5pSu3ZrPLxNNQIDAQABAoIBAFKtZJgGsK6md4vq
kyiYSufrcBLaaEQ/rkQtYCJKyC0NAlZKFLRy9oEpJbNLm4cQSkYPXn3Qunx5Jj2k
2MYz+SgIDy7f7KHgr52Ew020dzNQ52JFvBgt6NTZaqL1TKOS1fcJSSNIvouTBerK
NCSXHzfb4P+MfEVe/w1c4ilE+kH9SzdEo2jK/sRbzHIY8TX0JbmQ4SCLLayr22YG
usIxtIYcWt3MMP/G2luRnYzzBCje5MXdpAhlHLi4TB6x4h5PmBKYc57uOVNngKLd
YyrQKcszW4Nx5v0a4HG3A5EtUXNCco1+5asXOg2lYphQYVh2R+1wgu5WiDjDVu+6
EYgjFSkCgYEA0NBk6FDoxE/4L/4iJ4zIhu9BptN8Je/uS5c6wRejNC/VqQyw7SHb
hRFNrXPvq5Y+2bI/DxtdzZLKAMXOMjDjj0XEgfOIn2aveOo3uE7zf1i+njxwQhPu
uSYA9AlBZiKGr2PCYSDPnViHOspVJjxRuAgyWM1Qf+CTC0D95aj0oz8CgYEAz5n4
Cb3/WfUHxMJLljJ7PlVmlQpF5Hk3AOR9+vtqTtdxRjuxW6DH2uAHBDdC3OgppUN4
CFj55kzc2HUuiHtmPtx8mK6G+otT7Lww+nLSFL4PvZ6CYxqcio5MPnoYd+pCxrXY
JFo2W7e4FkBOxb5PF5So5plg+d0z/QiA7aFP1osCgYEAtgi1rwC5qkm8prn4tFm6
hkcVCIXc+IWNS0Bu693bXKdGr7RsmIynff1zpf4ntYGpEMaeymClCY0ppDrMYlzU
RBYiFNdlBvDRj6s/H+FTzHRk2DT/99rAhY9nzVY0OQFoQIXK8jlURGrkmI/CYy66
XqBmo5t4zcHM7kaeEBOWEKkCgYAYnO6VaRtPNQfYwhhoFFAcUc+5t+AVeHGW/4AY
M5qlAlIBu64JaQSI5KqwS0T4H+ZgG6Gti68FKPO+DhaYQ9kZdtam23pRVhd7J8y+
xMI3h1kiaBqZWVxZ6QkNFzizbui/2mtn0/JB6YQ/zxwHwcpqx0tHG8Qtm5ZAV7PB
eLCYhQKBgQDALJxU/6hMTdytEU5CLOBSMby45YD/RrfQrl2gl/vA0etPrto4RkVq
UrkDO/9W4mZORClN3knxEFSTlYi8YOboxdlynpFfhcs82wFChs+Ydp1eEsVHAqtu
T+uzn0sroycBiBfVB949LExnzGDFUkhG0i2c2InarQYLTsIyHCIDEA==
-----END RSA PRIVATE KEY-----

View File

@ -260,6 +260,7 @@ fn http_flag() {
fn http_address_flag() {
let addr = "127.0.0.99".parse::<IpAddr>().unwrap();
CommandLineTest::new()
.flag("http", None)
.flag("http-address", Some("127.0.0.99"))
.flag("unencrypted-http-transport", None)
.run()
@ -269,6 +270,7 @@ fn http_address_flag() {
fn http_address_ipv6_flag() {
let addr = "::1".parse::<IpAddr>().unwrap();
CommandLineTest::new()
.flag("http", None)
.flag("http-address", Some("::1"))
.flag("unencrypted-http-transport", None)
.run()
@ -279,6 +281,7 @@ fn http_address_ipv6_flag() {
fn missing_unencrypted_http_transport_flag() {
let addr = "127.0.0.99".parse::<IpAddr>().unwrap();
CommandLineTest::new()
.flag("http", None)
.flag("http-address", Some("127.0.0.99"))
.run()
.with_config(|config| assert_eq!(config.http_api.listen_addr, addr));
@ -286,6 +289,7 @@ fn missing_unencrypted_http_transport_flag() {
#[test]
fn http_port_flag() {
CommandLineTest::new()
.flag("http", None)
.flag("http-port", Some("9090"))
.run()
.with_config(|config| assert_eq!(config.http_api.listen_port, 9090));
@ -293,6 +297,7 @@ fn http_port_flag() {
#[test]
fn http_allow_origin_flag() {
CommandLineTest::new()
.flag("http", None)
.flag("http-allow-origin", Some("http://localhost:9009"))
.run()
.with_config(|config| {
@ -305,6 +310,7 @@ fn http_allow_origin_flag() {
#[test]
fn http_allow_origin_all_flag() {
CommandLineTest::new()
.flag("http", None)
.flag("http-allow-origin", Some("*"))
.run()
.with_config(|config| assert_eq!(config.http_api.allow_origin, Some("*".to_string())));
@ -312,12 +318,14 @@ fn http_allow_origin_all_flag() {
#[test]
fn http_allow_keystore_export_default() {
CommandLineTest::new()
.flag("http", None)
.run()
.with_config(|config| assert!(!config.http_api.allow_keystore_export));
}
#[test]
fn http_allow_keystore_export_present() {
CommandLineTest::new()
.flag("http", None)
.flag("http-allow-keystore-export", None)
.run()
.with_config(|config| assert!(config.http_api.allow_keystore_export));
@ -325,12 +333,14 @@ fn http_allow_keystore_export_present() {
#[test]
fn http_store_keystore_passwords_in_secrets_dir_default() {
CommandLineTest::new()
.flag("http", None)
.run()
.with_config(|config| assert!(!config.http_api.store_passwords_in_secrets_dir));
}
#[test]
fn http_store_keystore_passwords_in_secrets_dir_present() {
CommandLineTest::new()
.flag("http", None)
.flag("http-store-passwords-in-secrets-dir", None)
.run()
.with_config(|config| assert!(config.http_api.store_passwords_in_secrets_dir));
@ -348,6 +358,7 @@ fn metrics_flag() {
fn metrics_address_flag() {
let addr = "127.0.0.99".parse::<IpAddr>().unwrap();
CommandLineTest::new()
.flag("metrics", None)
.flag("metrics-address", Some("127.0.0.99"))
.run()
.with_config(|config| assert_eq!(config.http_metrics.listen_addr, addr));
@ -356,6 +367,7 @@ fn metrics_address_flag() {
fn metrics_address_ipv6_flag() {
let addr = "::1".parse::<IpAddr>().unwrap();
CommandLineTest::new()
.flag("metrics", None)
.flag("metrics-address", Some("::1"))
.run()
.with_config(|config| assert_eq!(config.http_metrics.listen_addr, addr));
@ -363,6 +375,7 @@ fn metrics_address_ipv6_flag() {
#[test]
fn metrics_port_flag() {
CommandLineTest::new()
.flag("metrics", None)
.flag("metrics-port", Some("9090"))
.run()
.with_config(|config| assert_eq!(config.http_metrics.listen_port, 9090));
@ -370,6 +383,7 @@ fn metrics_port_flag() {
#[test]
fn metrics_allow_origin_flag() {
CommandLineTest::new()
.flag("metrics", None)
.flag("metrics-allow-origin", Some("http://localhost:9009"))
.run()
.with_config(|config| {
@ -382,6 +396,7 @@ fn metrics_allow_origin_flag() {
#[test]
fn metrics_allow_origin_all_flag() {
CommandLineTest::new()
.flag("metrics", None)
.flag("metrics-allow-origin", Some("*"))
.run()
.with_config(|config| assert_eq!(config.http_metrics.allow_origin, Some("*".to_string())));

View File

@ -26,6 +26,6 @@ serde_derive = "1.0.116"
serde_yaml = "0.8.13"
eth2_network_config = { path = "../../common/eth2_network_config" }
serde_json = "1.0.58"
zip = "0.5.13"
zip = "0.6"
lazy_static = "1.4.0"
parking_lot = "0.12.0"
parking_lot = "0.12.0"

View File

@ -51,7 +51,7 @@ mod tests {
/// If the we are unable to reach the Web3Signer HTTP API within this time out then we will
/// assume it failed to start.
const UPCHECK_TIMEOUT: Duration = Duration::from_secs(20);
const UPCHECK_TIMEOUT: Duration = Duration::from_secs(30);
/// Set to `false` to send the Web3Signer logs to the console during tests. Logs are useful when
/// debugging.

View File

@ -12,7 +12,7 @@ use crate::{
use bls::SignatureBytes;
use environment::RuntimeContext;
use eth2::types::{BlockContents, SignedBlockContents};
use eth2::BeaconNodeHttpClient;
use eth2::{BeaconNodeHttpClient, StatusCode};
use slog::{crit, debug, error, info, trace, warn, Logger};
use slot_clock::SlotClock;
use std::fmt::Debug;
@ -573,7 +573,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
RequireSynced::No,
OfflineOnFailure::Yes,
|beacon_node| async {
Self::publish_signed_block_contents::<Payload>(
self.publish_signed_block_contents::<Payload>(
&signed_block_contents,
beacon_node,
)
@ -596,9 +596,12 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
}
async fn publish_signed_block_contents<Payload: AbstractExecPayload<E>>(
&self,
signed_block_contents: &SignedBlockContents<E, Payload>,
beacon_node: &BeaconNodeHttpClient,
) -> Result<(), BlockError> {
let log = self.context.log();
let slot = signed_block_contents.signed_block().slot();
match Payload::block_type() {
BlockType::Full => {
let _post_timer = metrics::start_timer_vec(
@ -608,12 +611,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
beacon_node
.post_beacon_blocks(signed_block_contents)
.await
.map_err(|e| {
BlockError::Irrecoverable(format!(
"Error from beacon node when publishing block: {:?}",
e
))
})?
.or_else(|e| handle_block_post_error(e, slot, log))?
}
BlockType::Blinded => {
let _post_timer = metrics::start_timer_vec(
@ -623,12 +621,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
beacon_node
.post_beacon_blinded_blocks(signed_block_contents)
.await
.map_err(|e| {
BlockError::Irrecoverable(format!(
"Error from beacon node when publishing block: {:?}",
e
))
})?
.or_else(|e| handle_block_post_error(e, slot, log))?
}
}
Ok::<_, BlockError>(())
@ -695,3 +688,29 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
Ok::<_, BlockError>(block_contents)
}
}
fn handle_block_post_error(err: eth2::Error, slot: Slot, log: &Logger) -> Result<(), BlockError> {
// Handle non-200 success codes.
if let Some(status) = err.status() {
if status == StatusCode::ACCEPTED {
info!(
log,
"Block is already known to BN or might be invalid";
"slot" => slot,
"status_code" => status.as_u16(),
);
return Ok(());
} else if status.is_success() {
debug!(
log,
"Block published with non-standard success code";
"slot" => slot,
"status_code" => status.as_u16(),
);
return Ok(());
}
}
Err(BlockError::Irrecoverable(format!(
"Error from beacon node when publishing block: {err:?}",
)))
}

View File

@ -41,6 +41,8 @@ tokio-postgres = "0.7.5"
http_api = { path = "../beacon_node/http_api" }
beacon_chain = { path = "../beacon_node/beacon_chain" }
network = { path = "../beacon_node/network" }
testcontainers = "0.14.0"
# TODO: update to 0.15 when released: https://github.com/testcontainers/testcontainers-rs/issues/497
testcontainers = { git = "https://github.com/testcontainers/testcontainers-rs/", rev = "0f2c9851" }
unused_port = { path = "../common/unused_port" }
task_executor = { path = "../common/task_executor" }
logging = { path = "../common/logging" }

View File

@ -7,12 +7,21 @@ use beacon_chain::{
};
use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts};
use http_api::test_utils::{create_api_server, ApiServer};
use log::error;
use logging::test_logger;
use network::NetworkReceivers;
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use std::collections::HashMap;
use std::env;
use std::net::SocketAddr;
use std::time::Duration;
use testcontainers::{clients::Cli, core::WaitFor, Image, RunnableImage};
use tokio::sync::oneshot;
use tokio::{runtime, task::JoinHandle};
use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls};
use types::{Hash256, MainnetEthSpec, Slot};
use unused_port::unused_tcp4_port;
use url::Url;
use watch::{
client::WatchHttpClient,
@ -22,15 +31,40 @@ use watch::{
updater::{handler::*, run_updater, Config as UpdaterConfig, WatchSpec},
};
use log::error;
use std::env;
use std::net::SocketAddr;
use std::time::Duration;
use tokio::{runtime, task::JoinHandle};
use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls};
use unused_port::unused_tcp4_port;
#[derive(Debug)]
pub struct Postgres(HashMap<String, String>);
use testcontainers::{clients::Cli, images::postgres::Postgres, RunnableImage};
impl Default for Postgres {
fn default() -> Self {
let mut env_vars = HashMap::new();
env_vars.insert("POSTGRES_DB".to_owned(), "postgres".to_owned());
env_vars.insert("POSTGRES_HOST_AUTH_METHOD".into(), "trust".into());
Self(env_vars)
}
}
impl Image for Postgres {
type Args = ();
fn name(&self) -> String {
"postgres".to_owned()
}
fn tag(&self) -> String {
"11-alpine".to_owned()
}
fn ready_conditions(&self) -> Vec<WaitFor> {
vec![WaitFor::message_on_stderr(
"database system is ready to accept connections",
)]
}
fn env_vars(&self) -> Box<dyn Iterator<Item = (&String, &String)> + '_> {
Box::new(self.0.iter())
}
}
type E = MainnetEthSpec;
@ -96,6 +130,7 @@ impl TesterBuilder {
reconstruct_historic_states: true,
..ChainConfig::default()
})
.logger(test_logger())
.deterministic_keypairs(VALIDATOR_COUNT)
.fresh_ephemeral_store()
.build();