Add merge support to simulator (#3292)

## Issue Addressed

N/A

## Proposed Changes

Make simulator merge compatible. Adds a `--post_merge` flag to the eth1 simulator that enables a ttd and simulates the merge transition. Uses the `MockServer` in the execution layer test utils to simulate a dummy execution node.

Adds the merge transition simulation to CI.
This commit is contained in:
Pawan Dhananjay 2022-07-18 23:15:40 +00:00
parent da7b7a0f60
commit f9b9658711
17 changed files with 389 additions and 69 deletions

View File

@ -18,4 +18,5 @@ async-wrapper-methods = [
"warp_utils::task::blocking_json_task",
"validator_client::http_api::blocking_signed_json_task",
"execution_layer::test_utils::MockServer::new",
"execution_layer::test_utils::MockServer::new_with_config",
]

View File

@ -158,6 +158,18 @@ jobs:
run: sudo npm install -g ganache
- name: Run the beacon chain sim that starts from an eth1 contract
run: cargo run --release --bin simulator eth1-sim
merge-transition-ubuntu:
name: merge-transition-ubuntu
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v1
- name: Get latest version of stable Rust
run: rustup update stable
- name: Install ganache
run: sudo npm install -g ganache
- name: Run the beacon chain sim and go through the merge transition
run: cargo run --release --bin simulator eth1-sim --post-merge
no-eth1-simulator-ubuntu:
name: no-eth1-simulator-ubuntu
runs-on: ubuntu-latest

1
Cargo.lock generated
View File

@ -4045,6 +4045,7 @@ dependencies = [
"beacon_node",
"environment",
"eth2",
"execution_layer",
"sensitive_url",
"tempfile",
"types",

View File

@ -11,7 +11,9 @@ use crate::{
StateSkipConfig,
};
use bls::get_withdrawal_credentials;
use execution_layer::test_utils::DEFAULT_JWT_SECRET;
use execution_layer::{
auth::JwtKey,
test_utils::{ExecutionBlockGenerator, MockExecutionLayer, DEFAULT_TERMINAL_BLOCK},
ExecutionLayer,
};
@ -361,6 +363,7 @@ where
DEFAULT_TERMINAL_BLOCK,
spec.terminal_block_hash,
spec.terminal_block_hash_activation_epoch,
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
None,
);
self.execution_layer = Some(mock.el.clone());

View File

@ -25,7 +25,7 @@ impl From<jsonwebtoken::errors::Error> for Error {
}
/// Provides wrapper around `[u8; JWT_SECRET_LENGTH]` that implements `Zeroize`.
#[derive(Zeroize)]
#[derive(Zeroize, Clone)]
#[zeroize(drop)]
pub struct JwtKey([u8; JWT_SECRET_LENGTH as usize]);
@ -159,12 +159,12 @@ pub struct Claims {
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::JWT_SECRET;
use crate::test_utils::DEFAULT_JWT_SECRET;
#[test]
fn test_roundtrip() {
let auth = Auth::new(
JwtKey::from_slice(&JWT_SECRET).unwrap(),
JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(),
Some("42".into()),
Some("Lighthouse".into()),
);
@ -172,7 +172,7 @@ mod tests {
let token = auth.generate_token_with_claims(&claims).unwrap();
assert_eq!(
Auth::validate_token(&token, &JwtKey::from_slice(&JWT_SECRET).unwrap())
Auth::validate_token(&token, &JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap())
.unwrap()
.claims,
claims

View File

@ -708,7 +708,7 @@ impl HttpJsonRpc {
mod test {
use super::auth::JwtKey;
use super::*;
use crate::test_utils::{MockServer, JWT_SECRET};
use crate::test_utils::{MockServer, DEFAULT_JWT_SECRET};
use std::future::Future;
use std::str::FromStr;
use std::sync::Arc;
@ -728,8 +728,10 @@ mod test {
let echo_url = SensitiveUrl::parse(&format!("{}/echo", server.url())).unwrap();
// Create rpc clients that include JWT auth headers if `with_auth` is true.
let (rpc_client, echo_client) = if with_auth {
let rpc_auth = Auth::new(JwtKey::from_slice(&JWT_SECRET).unwrap(), None, None);
let echo_auth = Auth::new(JwtKey::from_slice(&JWT_SECRET).unwrap(), None, None);
let rpc_auth =
Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None);
let echo_auth =
Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None);
(
Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth).unwrap()),
Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth).unwrap()),

View File

@ -1,10 +1,13 @@
use crate::engine_api::{
json_structures::{
JsonForkchoiceUpdatedV1Response, JsonPayloadStatusV1, JsonPayloadStatusV1Status,
},
ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status,
};
use crate::engines::ForkChoiceState;
use crate::{
engine_api::{
json_structures::{
JsonForkchoiceUpdatedV1Response, JsonPayloadStatusV1, JsonPayloadStatusV1Status,
},
ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status,
},
ExecutionBlockWithTransactions,
};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use tree_hash::TreeHash;
@ -66,6 +69,28 @@ impl<T: EthSpec> Block<T> {
},
}
}
pub fn as_execution_block_with_tx(&self) -> Option<ExecutionBlockWithTransactions<T>> {
match self {
Block::PoS(payload) => Some(ExecutionBlockWithTransactions {
parent_hash: payload.parent_hash,
fee_recipient: payload.fee_recipient,
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom.clone(),
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,
timestamp: payload.timestamp,
extra_data: payload.extra_data.clone(),
base_fee_per_gas: payload.base_fee_per_gas,
block_hash: payload.block_hash,
transactions: vec![],
}),
Block::PoW(_) => None,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize, TreeHash)]
@ -153,6 +178,14 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
.map(|block| block.as_execution_block(self.terminal_total_difficulty))
}
pub fn execution_block_with_txs_by_hash(
&self,
hash: ExecutionBlockHash,
) -> Option<ExecutionBlockWithTransactions<T>> {
self.block_by_hash(hash)
.and_then(|block| block.as_execution_block_with_tx())
}
pub fn move_to_block_prior_to_terminal_block(&mut self) -> Result<(), String> {
let target_block = self
.terminal_block_number

View File

@ -48,13 +48,25 @@ pub async fn handle_rpc<T: EthSpec>(
s.parse()
.map_err(|e| format!("unable to parse hash: {:?}", e))
})?;
Ok(serde_json::to_value(
ctx.execution_block_generator
.read()
.execution_block_by_hash(hash),
)
.unwrap())
let full_tx = params
.get(1)
.and_then(JsonValue::as_bool)
.ok_or_else(|| "missing/invalid params[1] value".to_string())?;
if full_tx {
Ok(serde_json::to_value(
ctx.execution_block_generator
.read()
.execution_block_with_txs_by_hash(hash),
)
.unwrap())
} else {
Ok(serde_json::to_value(
ctx.execution_block_generator
.read()
.execution_block_by_hash(hash),
)
.unwrap())
}
}
ENGINE_NEW_PAYLOAD_V1 => {
let request: JsonExecutionPayloadV1<T> = get_param(params, 0)?;

View File

@ -1,5 +1,7 @@
use crate::{
test_utils::{MockServer, DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY, JWT_SECRET},
test_utils::{
MockServer, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY,
},
Config, *,
};
use sensitive_url::SensitiveUrl;
@ -22,6 +24,7 @@ impl<T: EthSpec> MockExecutionLayer<T> {
DEFAULT_TERMINAL_BLOCK,
ExecutionBlockHash::zero(),
Epoch::new(0),
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
None,
)
}
@ -32,6 +35,7 @@ impl<T: EthSpec> MockExecutionLayer<T> {
terminal_block: u64,
terminal_block_hash: ExecutionBlockHash,
terminal_block_hash_activation_epoch: Epoch,
jwt_key: Option<JwtKey>,
builder_url: Option<SensitiveUrl>,
) -> Self {
let handle = executor.handle().unwrap();
@ -41,8 +45,10 @@ impl<T: EthSpec> MockExecutionLayer<T> {
spec.terminal_block_hash = terminal_block_hash;
spec.terminal_block_hash_activation_epoch = terminal_block_hash_activation_epoch;
let jwt_key = jwt_key.unwrap_or_else(JwtKey::random);
let server = MockServer::new(
&handle,
jwt_key,
terminal_total_difficulty,
terminal_block,
terminal_block_hash,
@ -52,7 +58,7 @@ impl<T: EthSpec> MockExecutionLayer<T> {
let file = NamedTempFile::new().unwrap();
let path = file.path().into();
std::fs::write(&path, hex::encode(JWT_SECRET)).unwrap();
std::fs::write(&path, hex::encode(DEFAULT_JWT_SECRET)).unwrap();
let config = Config {
execution_endpoints: vec![url],

View File

@ -26,12 +26,33 @@ pub use mock_execution_layer::MockExecutionLayer;
pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400;
pub const DEFAULT_TERMINAL_BLOCK: u64 = 64;
pub const JWT_SECRET: [u8; 32] = [42; 32];
pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32];
mod execution_block_generator;
mod handle_rpc;
mod mock_execution_layer;
/// Configuration for the MockExecutionLayer.
pub struct MockExecutionConfig {
pub server_config: Config,
pub jwt_key: JwtKey,
pub terminal_difficulty: Uint256,
pub terminal_block: u64,
pub terminal_block_hash: ExecutionBlockHash,
}
impl Default for MockExecutionConfig {
fn default() -> Self {
Self {
jwt_key: JwtKey::random(),
terminal_difficulty: DEFAULT_TERMINAL_DIFFICULTY.into(),
terminal_block: DEFAULT_TERMINAL_BLOCK,
terminal_block_hash: ExecutionBlockHash::zero(),
server_config: Config::default(),
}
}
}
pub struct MockServer<T: EthSpec> {
_shutdown_tx: oneshot::Sender<()>,
listen_socket_addr: SocketAddr,
@ -43,25 +64,29 @@ impl<T: EthSpec> MockServer<T> {
pub fn unit_testing() -> Self {
Self::new(
&runtime::Handle::current(),
JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(),
DEFAULT_TERMINAL_DIFFICULTY.into(),
DEFAULT_TERMINAL_BLOCK,
ExecutionBlockHash::zero(),
)
}
pub fn new(
handle: &runtime::Handle,
terminal_difficulty: Uint256,
terminal_block: u64,
terminal_block_hash: ExecutionBlockHash,
) -> Self {
pub fn new_with_config(handle: &runtime::Handle, config: MockExecutionConfig) -> Self {
let MockExecutionConfig {
jwt_key,
terminal_difficulty,
terminal_block,
terminal_block_hash,
server_config,
} = config;
let last_echo_request = Arc::new(RwLock::new(None));
let preloaded_responses = Arc::new(Mutex::new(vec![]));
let execution_block_generator =
ExecutionBlockGenerator::new(terminal_difficulty, terminal_block, terminal_block_hash);
let ctx: Arc<Context<T>> = Arc::new(Context {
config: <_>::default(),
config: server_config,
jwt_key,
log: null_logger().unwrap(),
last_echo_request: last_echo_request.clone(),
execution_block_generator: RwLock::new(execution_block_generator),
@ -99,6 +124,25 @@ impl<T: EthSpec> MockServer<T> {
}
}
pub fn new(
handle: &runtime::Handle,
jwt_key: JwtKey,
terminal_difficulty: Uint256,
terminal_block: u64,
terminal_block_hash: ExecutionBlockHash,
) -> Self {
Self::new_with_config(
handle,
MockExecutionConfig {
server_config: Config::default(),
jwt_key,
terminal_difficulty,
terminal_block,
terminal_block_hash,
},
)
}
pub fn execution_block_generator(&self) -> RwLockWriteGuard<'_, ExecutionBlockGenerator<T>> {
self.ctx.execution_block_generator.write()
}
@ -351,6 +395,7 @@ impl warp::reject::Reject for AuthError {}
/// The server will gracefully handle the case where any fields are `None`.
pub struct Context<T: EthSpec> {
pub config: Config,
pub jwt_key: JwtKey,
pub log: Logger,
pub last_echo_request: Arc<RwLock<Option<Bytes>>>,
pub execution_block_generator: RwLock<ExecutionBlockGenerator<T>>,
@ -386,28 +431,30 @@ struct ErrorMessage {
/// Returns a `warp` header which filters out request that has a missing or incorrectly
/// signed JWT token.
fn auth_header_filter() -> warp::filters::BoxedFilter<()> {
fn auth_header_filter(jwt_key: JwtKey) -> warp::filters::BoxedFilter<()> {
warp::any()
.and(warp::filters::header::optional("Authorization"))
.and_then(move |authorization: Option<String>| async move {
match authorization {
None => Err(warp::reject::custom(AuthError(
"auth absent from request".to_string(),
))),
Some(auth) => {
if let Some(token) = auth.strip_prefix("Bearer ") {
let secret = JwtKey::from_slice(&JWT_SECRET).unwrap();
match Auth::validate_token(token, &secret) {
Ok(_) => Ok(()),
Err(e) => Err(warp::reject::custom(AuthError(format!(
"Auth failure: {:?}",
e
)))),
.and_then(move |authorization: Option<String>| {
let secret = jwt_key.clone();
async move {
match authorization {
None => Err(warp::reject::custom(AuthError(
"auth absent from request".to_string(),
))),
Some(auth) => {
if let Some(token) = auth.strip_prefix("Bearer ") {
match Auth::validate_token(token, &secret) {
Ok(_) => Ok(()),
Err(e) => Err(warp::reject::custom(AuthError(format!(
"Auth failure: {:?}",
e
)))),
}
} else {
Err(warp::reject::custom(AuthError(
"Bearer token not present in auth header".to_string(),
)))
}
} else {
Err(warp::reject::custom(AuthError(
"Bearer token not present in auth header".to_string(),
)))
}
}
}
@ -523,7 +570,7 @@ pub fn serve<T: EthSpec>(
});
let routes = warp::post()
.and(auth_header_filter())
.and(auth_header_filter(ctx.jwt_key.clone()))
.and(root.or(echo))
.recover(handle_rejection)
// Add a `Server` header.

View File

@ -7,6 +7,7 @@ status = [
"ef-tests-ubuntu",
"dockerfile-ubuntu",
"eth1-simulator-ubuntu",
"merge-transition-ubuntu",
"no-eth1-simulator-ubuntu",
"check-benchmarks",
"check-consensus",

View File

@ -13,3 +13,4 @@ eth2 = { path = "../../common/eth2" }
validator_client = { path = "../../validator_client" }
validator_dir = { path = "../../common/validator_dir", features = ["insecure_keys"] }
sensitive_url = { path = "../../common/sensitive_url" }
execution_layer = { path = "../../beacon_node/execution_layer" }

View File

@ -17,6 +17,9 @@ use validator_dir::insecure_keys::build_deterministic_validator_dirs;
pub use beacon_node::{ClientConfig, ClientGenesis, ProductionClient};
pub use environment;
pub use eth2;
pub use execution_layer::test_utils::{
Config as MockServerConfig, MockExecutionConfig, MockServer,
};
pub use validator_client::Config as ValidatorConfig;
/// The global timeout for HTTP requests to the beacon node.
@ -211,3 +214,29 @@ impl<E: EthSpec> LocalValidatorClient<E> {
})
}
}
/// Provides an execution engine api server that is running in the current process on a given tokio executor (it
/// is _local_ to this process).
///
/// Intended for use in testing and simulation. Not for production.
pub struct LocalExecutionNode<E: EthSpec> {
pub server: MockServer<E>,
pub datadir: TempDir,
}
impl<E: EthSpec> LocalExecutionNode<E> {
pub fn new(context: RuntimeContext<E>, config: MockExecutionConfig) -> Self {
let datadir = TempBuilder::new()
.prefix("lighthouse_node_test_rig_el")
.tempdir()
.expect("should create temp directory for client datadir");
let jwt_file_path = datadir.path().join("jwt.hex");
if let Err(e) = std::fs::write(&jwt_file_path, config.jwt_key.hex_string()) {
panic!("Failed to write jwt file {}", e);
}
Self {
server: MockServer::new_with_config(&context.executor.handle().unwrap(), config),
datadir,
}
}
}

View File

@ -1,7 +1,7 @@
use crate::local_network::LocalNetwork;
use node_test_rig::eth2::types::{BlockId, StateId};
use std::time::Duration;
use types::{Epoch, EthSpec, Slot, Unsigned};
use types::{Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, Unsigned};
/// Checks that all of the validators have on-boarded by the start of the second eth1 voting
/// period.
@ -149,19 +149,19 @@ pub async fn verify_fork_version<E: EthSpec>(
network: LocalNetwork<E>,
fork_epoch: Epoch,
slot_duration: Duration,
altair_fork_version: [u8; 4],
fork_version: [u8; 4],
) -> Result<(), String> {
epoch_delay(fork_epoch, slot_duration, E::slots_per_epoch()).await;
for remote_node in network.remote_nodes()? {
let fork_version = remote_node
let remote_fork_version = remote_node
.get_beacon_states_fork(StateId::Head)
.await
.map(|resp| resp.unwrap().data.current_version)
.map_err(|e| format!("Failed to get fork from beacon node: {:?}", e))?;
if fork_version != altair_fork_version {
if fork_version != remote_fork_version {
return Err(format!(
"Fork version after FORK_EPOCH is incorrect, got: {:?}, expected: {:?}",
fork_version, altair_fork_version,
remote_fork_version, fork_version,
));
}
}
@ -207,3 +207,39 @@ pub async fn verify_full_sync_aggregates_up_to<E: EthSpec>(
Ok(())
}
/// Verify that the first merged PoS block got finalized.
pub async fn verify_transition_block_finalized<E: EthSpec>(
network: LocalNetwork<E>,
transition_epoch: Epoch,
slot_duration: Duration,
should_verify: bool,
) -> Result<(), String> {
if !should_verify {
return Ok(());
}
epoch_delay(transition_epoch + 2, slot_duration, E::slots_per_epoch()).await;
let mut block_hashes = Vec::new();
for remote_node in network.remote_nodes()?.iter() {
let execution_block_hash: ExecutionBlockHash = remote_node
.get_beacon_blocks::<E>(BlockId::Finalized)
.await
.map(|body| body.unwrap().data)
.map_err(|e| format!("Get state root via http failed: {:?}", e))?
.message()
.execution_payload()
.map(|payload| payload.execution_payload.block_hash)
.map_err(|e| format!("Execution payload does not exist: {:?}", e))?;
block_hashes.push(execution_block_hash);
}
let first = block_hashes[0];
if first.into_root() != Hash256::zero() && block_hashes.iter().all(|&item| item == first) {
Ok(())
} else {
Err(format!(
"Terminal block not finalized on all nodes Finalized block hashes:{:?}",
block_hashes
))
}
}

View File

@ -36,6 +36,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.takes_value(true)
.default_value("3")
.help("Speed up factor. Please use a divisor of 12."))
.arg(Arg::with_name("post-merge")
.short("m")
.long("post-merge")
.takes_value(false)
.help("Simulate the merge transition"))
.arg(Arg::with_name("continue_after_checks")
.short("c")
.long("continue_after_checks")

View File

@ -1,4 +1,4 @@
use crate::local_network::INVALID_ADDRESS;
use crate::local_network::{EXECUTION_PORT, INVALID_ADDRESS, TERMINAL_BLOCK, TERMINAL_DIFFICULTY};
use crate::{checks, LocalNetwork, E};
use clap::ArgMatches;
use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID};
@ -18,8 +18,12 @@ use std::time::Duration;
use tokio::time::sleep;
use types::{Epoch, EthSpec, MinimalEthSpec};
const FORK_EPOCH: u64 = 2;
const END_EPOCH: u64 = 16;
const ALTAIR_FORK_EPOCH: u64 = 1;
const BELLATRIX_FORK_EPOCH: u64 = 2;
const SUGGESTED_FEE_RECIPIENT: [u8; 20] =
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1];
pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
let node_count = value_t!(matches, "nodes", usize).expect("missing nodes default");
@ -28,10 +32,12 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
let speed_up_factor =
value_t!(matches, "speed_up_factor", u64).expect("missing speed_up_factor default");
let continue_after_checks = matches.is_present("continue_after_checks");
let post_merge_sim = matches.is_present("post-merge");
println!("Beacon Chain Simulator:");
println!(" nodes:{}", node_count);
println!(" validators_per_node:{}", validators_per_node);
println!(" post merge simulation:{}", post_merge_sim);
println!(" continue_after_checks:{}", continue_after_checks);
// Generate the directories and keystores required for the validator clients.
@ -72,6 +78,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
let total_validator_count = validators_per_node * node_count;
let altair_fork_version = spec.altair_fork_version;
let bellatrix_fork_version = spec.bellatrix_fork_version;
spec.seconds_per_slot /= speed_up_factor;
spec.seconds_per_slot = max(1, spec.seconds_per_slot);
@ -80,8 +87,14 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
spec.min_genesis_time = 0;
spec.min_genesis_active_validator_count = total_validator_count as u64;
spec.seconds_per_eth1_block = eth1_block_time.as_secs();
spec.altair_fork_epoch = Some(Epoch::new(FORK_EPOCH));
spec.altair_fork_epoch = Some(Epoch::new(ALTAIR_FORK_EPOCH));
// Set these parameters only if we are doing a merge simulation
if post_merge_sim {
spec.terminal_total_difficulty = TERMINAL_DIFFICULTY.into();
spec.bellatrix_fork_epoch = Some(Epoch::new(BELLATRIX_FORK_EPOCH));
}
let seconds_per_slot = spec.seconds_per_slot;
let slot_duration = Duration::from_secs(spec.seconds_per_slot);
let initial_validator_count = spec.min_genesis_active_validator_count as usize;
let deposit_amount = env.eth2_config.spec.max_effective_balance;
@ -137,6 +150,19 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
if post_merge_sim {
let el_config = execution_layer::Config {
execution_endpoints: vec![SensitiveUrl::parse(&format!(
"http://localhost:{}",
EXECUTION_PORT
))
.unwrap()],
..Default::default()
};
beacon_config.execution_layer = Some(el_config);
}
/*
* Create a new `LocalNetwork` with one beacon node.
*/
@ -168,9 +194,13 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
let network_1 = network.clone();
executor.spawn(
async move {
let mut validator_config = testing_validator_config();
if post_merge_sim {
validator_config.fee_recipient = Some(SUGGESTED_FEE_RECIPIENT.into());
}
println!("Adding validator client {}", i);
network_1
.add_validator_client(testing_validator_config(), i, files, i % 2 == 0)
.add_validator_client(validator_config, i, files, i % 2 == 0)
.await
.expect("should add validator");
},
@ -182,6 +212,21 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
println!("Duration to genesis: {}", duration_to_genesis.as_secs());
sleep(duration_to_genesis).await;
if post_merge_sim {
let executor = executor.clone();
let network_2 = network.clone();
executor.spawn(
async move {
println!("Mining pow blocks");
let mut interval = tokio::time::interval(Duration::from_secs(seconds_per_slot));
for i in 1..=TERMINAL_BLOCK + 1 {
interval.tick().await;
let _ = network_2.mine_pow_blocks(i);
}
},
"pow_mining",
);
}
/*
* Start the checks that ensure the network performs as expected.
*
@ -190,7 +235,16 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
* tests start at the right time. Whilst this is works well for now, it's subject to
* breakage by changes to the VC.
*/
let (finalization, block_prod, validator_count, onboarding, fork, sync_aggregate) = futures::join!(
let (
finalization,
block_prod,
validator_count,
onboarding,
fork,
sync_aggregate,
transition,
) = futures::join!(
// Check that the chain finalizes at the first given opportunity.
checks::verify_first_finalization(network.clone(), slot_duration),
// Check that a block is produced at every slot.
@ -212,21 +266,36 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
slot_duration,
total_validator_count,
),
// Check that all nodes have transitioned to the new fork.
// Check that all nodes have transitioned to the required fork.
checks::verify_fork_version(
network.clone(),
Epoch::new(FORK_EPOCH),
if post_merge_sim {
Epoch::new(BELLATRIX_FORK_EPOCH)
} else {
Epoch::new(ALTAIR_FORK_EPOCH)
},
slot_duration,
altair_fork_version
if post_merge_sim {
bellatrix_fork_version
} else {
altair_fork_version
}
),
// Check that all sync aggregates are full.
checks::verify_full_sync_aggregates_up_to(
network.clone(),
// Start checking for sync_aggregates at `FORK_EPOCH + 1` to account for
// inefficiencies in finding subnet peers at the `fork_slot`.
Epoch::new(FORK_EPOCH + 1).start_slot(MinimalEthSpec::slots_per_epoch()),
Epoch::new(ALTAIR_FORK_EPOCH + 1).start_slot(MinimalEthSpec::slots_per_epoch()),
Epoch::new(END_EPOCH).start_slot(MinimalEthSpec::slots_per_epoch()),
slot_duration,
),
// Check that the transition block is finalized.
checks::verify_transition_block_finalized(
network.clone(),
Epoch::new(TERMINAL_BLOCK / MinimalEthSpec::slots_per_epoch()),
slot_duration,
post_merge_sim
)
);
@ -236,6 +305,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
onboarding?;
fork?;
sync_aggregate?;
transition?;
// The `final_future` either completes immediately or never completes, depending on the value
// of `continue_after_checks`.

View File

@ -1,7 +1,8 @@
use node_test_rig::{
environment::RuntimeContext,
eth2::{types::StateId, BeaconNodeHttpClient},
ClientConfig, LocalBeaconNode, LocalValidatorClient, ValidatorConfig, ValidatorFiles,
ClientConfig, LocalBeaconNode, LocalExecutionNode, LocalValidatorClient, MockExecutionConfig,
MockServerConfig, ValidatorConfig, ValidatorFiles,
};
use parking_lot::RwLock;
use sensitive_url::SensitiveUrl;
@ -15,11 +16,17 @@ use types::{Epoch, EthSpec};
const BOOTNODE_PORT: u16 = 42424;
pub const INVALID_ADDRESS: &str = "http://127.0.0.1:42423";
pub const EXECUTION_PORT: u16 = 4000;
pub const TERMINAL_DIFFICULTY: u64 = 6400;
pub const TERMINAL_BLOCK: u64 = 64;
/// Helper struct to reduce `Arc` usage.
pub struct Inner<E: EthSpec> {
pub context: RuntimeContext<E>,
pub beacon_nodes: RwLock<Vec<LocalBeaconNode<E>>>,
pub validator_clients: RwLock<Vec<LocalValidatorClient<E>>>,
pub execution_nodes: RwLock<Vec<LocalExecutionNode<E>>>,
}
/// Represents a set of interconnected `LocalBeaconNode` and `LocalValidatorClient`.
@ -46,7 +53,7 @@ impl<E: EthSpec> Deref for LocalNetwork<E> {
}
impl<E: EthSpec> LocalNetwork<E> {
/// Creates a new network with a single `BeaconNode`.
/// Creates a new network with a single `BeaconNode` and a connected `ExecutionNode`.
pub async fn new(
context: RuntimeContext<E>,
mut beacon_config: ClientConfig,
@ -56,6 +63,30 @@ impl<E: EthSpec> LocalNetwork<E> {
beacon_config.network.enr_udp_port = Some(BOOTNODE_PORT);
beacon_config.network.enr_tcp_port = Some(BOOTNODE_PORT);
beacon_config.network.discv5_config.table_filter = |_| true;
let execution_node = if let Some(el_config) = &mut beacon_config.execution_layer {
let mock_execution_config = MockExecutionConfig {
server_config: MockServerConfig {
listen_port: EXECUTION_PORT,
..Default::default()
},
terminal_block: TERMINAL_BLOCK,
terminal_difficulty: TERMINAL_DIFFICULTY.into(),
..Default::default()
};
let execution_node = LocalExecutionNode::new(
context.service_context("boot_node_el".into()),
mock_execution_config,
);
el_config.default_datadir = execution_node.datadir.path().to_path_buf();
el_config.secret_files = vec![execution_node.datadir.path().join("jwt.hex")];
el_config.execution_endpoints =
vec![SensitiveUrl::parse(&execution_node.server.url()).unwrap()];
vec![execution_node]
} else {
vec![]
};
let beacon_node =
LocalBeaconNode::production(context.service_context("boot_node".into()), beacon_config)
.await?;
@ -63,6 +94,7 @@ impl<E: EthSpec> LocalNetwork<E> {
inner: Arc::new(Inner {
context,
beacon_nodes: RwLock::new(vec![beacon_node]),
execution_nodes: RwLock::new(execution_node),
validator_clients: RwLock::new(vec![]),
}),
})
@ -87,6 +119,7 @@ impl<E: EthSpec> LocalNetwork<E> {
/// Adds a beacon node to the network, connecting to the 0'th beacon node via ENR.
pub async fn add_beacon_node(&self, mut beacon_config: ClientConfig) -> Result<(), String> {
let self_1 = self.clone();
let count = self.beacon_node_count() as u16;
println!("Adding beacon node..");
{
let read_lock = self.beacon_nodes.read();
@ -99,20 +132,38 @@ impl<E: EthSpec> LocalNetwork<E> {
.enr()
.expect("bootnode must have a network"),
);
let count = self.beacon_node_count() as u16;
beacon_config.network.discovery_port = BOOTNODE_PORT + count;
beacon_config.network.libp2p_port = BOOTNODE_PORT + count;
beacon_config.network.enr_udp_port = Some(BOOTNODE_PORT + count);
beacon_config.network.enr_tcp_port = Some(BOOTNODE_PORT + count);
beacon_config.network.discv5_config.table_filter = |_| true;
}
if let Some(el_config) = &mut beacon_config.execution_layer {
let config = MockExecutionConfig {
server_config: MockServerConfig {
listen_port: EXECUTION_PORT + count,
..Default::default()
},
terminal_block: TERMINAL_BLOCK,
terminal_difficulty: TERMINAL_DIFFICULTY.into(),
..Default::default()
};
let execution_node = LocalExecutionNode::new(
self.context.service_context(format!("node_{}_el", count)),
config,
);
el_config.default_datadir = execution_node.datadir.path().to_path_buf();
el_config.secret_files = vec![execution_node.datadir.path().join("jwt.hex")];
el_config.execution_endpoints =
vec![SensitiveUrl::parse(&execution_node.server.url()).unwrap()];
self.execution_nodes.write().push(execution_node);
}
// We create the beacon node without holding the lock, so that the lock isn't held
// across the await. This is only correct if this function never runs in parallel
// with itself (which at the time of writing, it does not).
let index = self_1.beacon_nodes.read().len();
let beacon_node = LocalBeaconNode::production(
self.context.service_context(format!("node_{}", index)),
self.context.service_context(format!("node_{}", count)),
beacon_config,
)
.await?;
@ -184,6 +235,16 @@ impl<E: EthSpec> LocalNetwork<E> {
.map(|body| body.unwrap().data.finalized.epoch)
}
pub fn mine_pow_blocks(&self, block_number: u64) -> Result<(), String> {
let execution_nodes = self.execution_nodes.read();
for execution_node in execution_nodes.iter() {
let mut block_gen = execution_node.server.ctx.execution_block_generator.write();
block_gen.insert_pow_block(block_number)?;
println!("Mined pow block {}", block_number);
}
Ok(())
}
pub async fn duration_to_genesis(&self) -> Duration {
let nodes = self.remote_nodes().expect("Failed to get remote nodes");
let bootnode = nodes.first().expect("Should contain bootnode");