Merge pull request #27 from realbigsean/sean-interop-4844-testing
P2P fixes, local post merge testnet, query different EL endpoints at fork boundaries
This commit is contained in:
commit
be8c6349dc
1
Cargo.lock
generated
1
Cargo.lock
generated
@ -3228,6 +3228,7 @@ dependencies = [
|
||||
"environment",
|
||||
"eth1_test_rig",
|
||||
"eth2",
|
||||
"eth2_hashing",
|
||||
"eth2_network_config",
|
||||
"eth2_ssz",
|
||||
"eth2_wallet",
|
||||
|
@ -103,6 +103,7 @@ use store::{
|
||||
use task_executor::{ShutdownReason, TaskExecutor};
|
||||
use tree_hash::TreeHash;
|
||||
use types::beacon_state::CloneConfig;
|
||||
use types::consts::eip4844::MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS;
|
||||
use types::signed_block_and_blobs::BlockWrapper;
|
||||
use types::*;
|
||||
|
||||
@ -5423,7 +5424,26 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
pub fn data_availability_boundary(&self) -> Option<Epoch> {
|
||||
self.spec
|
||||
.eip4844_fork_epoch
|
||||
.map(|e| std::cmp::max(e, self.head().finalized_checkpoint().epoch))
|
||||
.map(|fork_epoch| {
|
||||
self.epoch().ok().map(|current_epoch| {
|
||||
std::cmp::max(
|
||||
fork_epoch,
|
||||
current_epoch.saturating_sub(*MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS),
|
||||
)
|
||||
})
|
||||
})
|
||||
.flatten()
|
||||
}
|
||||
|
||||
/// Returns `true` if we are at or past the `Eip4844` fork. This will always return `false` if
|
||||
/// the `Eip4844` fork is disabled.
|
||||
pub fn is_data_availability_check_required(&self) -> Result<bool, Error> {
|
||||
let current_epoch = self.epoch()?;
|
||||
Ok(self
|
||||
.spec
|
||||
.eip4844_fork_epoch
|
||||
.map(|fork_epoch| fork_epoch <= current_epoch)
|
||||
.unwrap_or(false))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
use slot_clock::SlotClock;
|
||||
|
||||
use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY};
|
||||
use bls::PublicKey;
|
||||
use types::consts::eip4844::BLS_MODULUS;
|
||||
use crate::{kzg_utils, BeaconChainError};
|
||||
use bls::PublicKey;
|
||||
use state_processing::per_block_processing::eip4844::eip4844::verify_kzg_commitments_against_transactions;
|
||||
use types::consts::eip4844::BLS_MODULUS;
|
||||
use types::{BeaconStateError, BlobsSidecar, Hash256, KzgCommitment, Slot, Transactions};
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -19,15 +19,15 @@ pub enum BlobError {
|
||||
message_slot: Slot,
|
||||
latest_permissible_slot: Slot,
|
||||
},
|
||||
/// The blob sidecar is from a slot that is prior to the earliest permissible slot (with
|
||||
/// respect to the gossip clock disparity).
|
||||
|
||||
/// The blob sidecar has a different slot than the block.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
||||
PastSlot {
|
||||
message_slot: Slot,
|
||||
earliest_permissible_slot: Slot,
|
||||
SlotMismatch {
|
||||
blob_slot: Slot,
|
||||
block_slot: Slot,
|
||||
},
|
||||
|
||||
/// The blob sidecar contains an incorrectly formatted `BLSFieldElement` > `BLS_MODULUS`.
|
||||
@ -122,14 +122,10 @@ pub fn validate_blob_for_gossip<T: BeaconChainTypes>(
|
||||
});
|
||||
}
|
||||
|
||||
let earliest_permissible_slot = chain
|
||||
.slot_clock
|
||||
.now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
|
||||
.ok_or(BeaconChainError::UnableToReadSlot)?;
|
||||
if blob_slot > earliest_permissible_slot {
|
||||
return Err(BlobError::PastSlot {
|
||||
message_slot: earliest_permissible_slot,
|
||||
earliest_permissible_slot: blob_slot,
|
||||
if blob_slot != block_slot {
|
||||
return Err(BlobError::SlotMismatch {
|
||||
blob_slot,
|
||||
block_slot,
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -31,10 +31,12 @@ pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_secs(1);
|
||||
|
||||
pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1";
|
||||
pub const ENGINE_NEW_PAYLOAD_V2: &str = "engine_newPayloadV2";
|
||||
pub const ENGINE_NEW_PAYLOAD_V3: &str = "engine_newPayloadV3";
|
||||
pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8);
|
||||
|
||||
pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1";
|
||||
pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2";
|
||||
pub const ENGINE_GET_PAYLOAD_V3: &str = "engine_getPayloadV3";
|
||||
pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2);
|
||||
|
||||
pub const ENGINE_GET_BLOBS_BUNDLE_V1: &str = "engine_getBlobsBundleV1";
|
||||
@ -708,6 +710,23 @@ impl HttpJsonRpc {
|
||||
Ok(response.into())
|
||||
}
|
||||
|
||||
pub async fn new_payload_v3<T: EthSpec>(
|
||||
&self,
|
||||
execution_payload: ExecutionPayload<T>,
|
||||
) -> Result<PayloadStatusV1, Error> {
|
||||
let params = json!([JsonExecutionPayloadV2::try_from(execution_payload)?]);
|
||||
|
||||
let response: JsonPayloadStatusV1 = self
|
||||
.rpc_request(
|
||||
ENGINE_NEW_PAYLOAD_V3,
|
||||
params,
|
||||
ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(response.into())
|
||||
}
|
||||
|
||||
pub async fn get_payload_v1<T: EthSpec>(
|
||||
&self,
|
||||
fork_name: ForkName,
|
||||
@ -744,6 +763,24 @@ impl HttpJsonRpc {
|
||||
JsonExecutionPayload::V2(payload_v2).try_into_execution_payload(fork_name)
|
||||
}
|
||||
|
||||
pub async fn get_payload_v3<T: EthSpec>(
|
||||
&self,
|
||||
fork_name: ForkName,
|
||||
payload_id: PayloadId,
|
||||
) -> Result<ExecutionPayload<T>, Error> {
|
||||
let params = json!([JsonPayloadIdRequest::from(payload_id)]);
|
||||
|
||||
let payload_v2: JsonExecutionPayloadV2<T> = self
|
||||
.rpc_request(
|
||||
ENGINE_GET_PAYLOAD_V3,
|
||||
params,
|
||||
ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier,
|
||||
)
|
||||
.await?;
|
||||
|
||||
JsonExecutionPayload::V2(payload_v2).try_into_execution_payload(fork_name)
|
||||
}
|
||||
|
||||
pub async fn get_blobs_bundle_v1<T: EthSpec>(
|
||||
&self,
|
||||
payload_id: PayloadId,
|
||||
@ -855,13 +892,10 @@ impl HttpJsonRpc {
|
||||
&self,
|
||||
execution_payload: ExecutionPayload<T>,
|
||||
) -> Result<PayloadStatusV1, Error> {
|
||||
let supported_apis = self.get_cached_supported_apis().await?;
|
||||
if supported_apis.new_payload_v2 {
|
||||
self.new_payload_v2(execution_payload).await
|
||||
} else if supported_apis.new_payload_v1 {
|
||||
self.new_payload_v1(execution_payload).await
|
||||
} else {
|
||||
Err(Error::RequiredMethodUnsupported("engine_newPayload"))
|
||||
match execution_payload {
|
||||
ExecutionPayload::Eip4844(_) => self.new_payload_v3(execution_payload).await,
|
||||
ExecutionPayload::Capella(_) => self.new_payload_v2(execution_payload).await,
|
||||
ExecutionPayload::Merge(_) => self.new_payload_v1(execution_payload).await,
|
||||
}
|
||||
}
|
||||
|
||||
@ -872,13 +906,11 @@ impl HttpJsonRpc {
|
||||
fork_name: ForkName,
|
||||
payload_id: PayloadId,
|
||||
) -> Result<ExecutionPayload<T>, Error> {
|
||||
let supported_apis = self.get_cached_supported_apis().await?;
|
||||
if supported_apis.get_payload_v2 {
|
||||
self.get_payload_v2(fork_name, payload_id).await
|
||||
} else if supported_apis.new_payload_v1 {
|
||||
self.get_payload_v1(fork_name, payload_id).await
|
||||
} else {
|
||||
Err(Error::RequiredMethodUnsupported("engine_getPayload"))
|
||||
match fork_name {
|
||||
ForkName::Eip4844 => self.get_payload_v3(fork_name, payload_id).await,
|
||||
ForkName::Capella => self.get_payload_v2(fork_name, payload_id).await,
|
||||
ForkName::Merge => self.get_payload_v1(fork_name, payload_id).await,
|
||||
_ => Err(Error::RequiredMethodUnsupported("engine_getPayload")),
|
||||
}
|
||||
}
|
||||
|
||||
@ -886,23 +918,25 @@ impl HttpJsonRpc {
|
||||
// forkchoice_updated that the execution engine supports
|
||||
pub async fn forkchoice_updated(
|
||||
&self,
|
||||
fork_name: ForkName,
|
||||
forkchoice_state: ForkchoiceState,
|
||||
payload_attributes: Option<PayloadAttributes>,
|
||||
) -> Result<ForkchoiceUpdatedResponse, Error> {
|
||||
let supported_apis = self.get_cached_supported_apis().await?;
|
||||
if supported_apis.forkchoice_updated_v2 {
|
||||
self.forkchoice_updated_v2(forkchoice_state, payload_attributes)
|
||||
match fork_name {
|
||||
ForkName::Capella | ForkName::Eip4844 => {
|
||||
self.forkchoice_updated_v2(forkchoice_state, payload_attributes)
|
||||
.await
|
||||
}
|
||||
ForkName::Merge => {
|
||||
self.forkchoice_updated_v1(
|
||||
forkchoice_state,
|
||||
payload_attributes
|
||||
.map(|pa| pa.downgrade_to_v1())
|
||||
.transpose()?,
|
||||
)
|
||||
.await
|
||||
} else if supported_apis.forkchoice_updated_v1 {
|
||||
self.forkchoice_updated_v1(
|
||||
forkchoice_state,
|
||||
payload_attributes
|
||||
.map(|pa| pa.downgrade_to_v1())
|
||||
.transpose()?,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated"))
|
||||
}
|
||||
_ => Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ use std::sync::Arc;
|
||||
use task_executor::TaskExecutor;
|
||||
use tokio::sync::{watch, Mutex, RwLock};
|
||||
use tokio_stream::wrappers::WatchStream;
|
||||
use types::ExecutionBlockHash;
|
||||
use types::{Address, ExecutionBlockHash, ForkName, Hash256};
|
||||
|
||||
/// The number of payload IDs that will be stored for each `Engine`.
|
||||
///
|
||||
@ -114,7 +114,7 @@ pub struct Engine {
|
||||
pub api: HttpJsonRpc,
|
||||
payload_id_cache: Mutex<LruCache<PayloadIdCacheKey, PayloadId>>,
|
||||
state: RwLock<State>,
|
||||
latest_forkchoice_state: RwLock<Option<ForkchoiceState>>,
|
||||
latest_forkchoice_state: RwLock<Option<(ForkName, ForkchoiceState)>>,
|
||||
executor: TaskExecutor,
|
||||
log: Logger,
|
||||
}
|
||||
@ -153,13 +153,15 @@ impl Engine {
|
||||
|
||||
pub async fn notify_forkchoice_updated(
|
||||
&self,
|
||||
fork_name: ForkName,
|
||||
forkchoice_state: ForkchoiceState,
|
||||
payload_attributes: Option<PayloadAttributes>,
|
||||
log: &Logger,
|
||||
) -> Result<ForkchoiceUpdatedResponse, EngineApiError> {
|
||||
info!(log, "Notifying FCU"; "fork_name" => ?fork_name);
|
||||
let response = self
|
||||
.api
|
||||
.forkchoice_updated(forkchoice_state, payload_attributes.clone())
|
||||
.forkchoice_updated(fork_name, forkchoice_state, payload_attributes.clone())
|
||||
.await?;
|
||||
|
||||
if let Some(payload_id) = response.payload_id {
|
||||
@ -179,18 +181,18 @@ impl Engine {
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn get_latest_forkchoice_state(&self) -> Option<ForkchoiceState> {
|
||||
async fn get_latest_forkchoice_state(&self) -> Option<(ForkName, ForkchoiceState)> {
|
||||
*self.latest_forkchoice_state.read().await
|
||||
}
|
||||
|
||||
pub async fn set_latest_forkchoice_state(&self, state: ForkchoiceState) {
|
||||
*self.latest_forkchoice_state.write().await = Some(state);
|
||||
pub async fn set_latest_forkchoice_state(&self, fork_name: ForkName, state: ForkchoiceState) {
|
||||
*self.latest_forkchoice_state.write().await = Some((fork_name, state));
|
||||
}
|
||||
|
||||
async fn send_latest_forkchoice_state(&self) {
|
||||
let latest_forkchoice_state = self.get_latest_forkchoice_state().await;
|
||||
|
||||
if let Some(forkchoice_state) = latest_forkchoice_state {
|
||||
if let Some((fork_name, forkchoice_state)) = latest_forkchoice_state {
|
||||
if forkchoice_state.head_block_hash == ExecutionBlockHash::zero() {
|
||||
debug!(
|
||||
self.log,
|
||||
@ -204,11 +206,16 @@ impl Engine {
|
||||
self.log,
|
||||
"Issuing forkchoiceUpdated";
|
||||
"forkchoice_state" => ?forkchoice_state,
|
||||
"fork_name" => ?fork_name,
|
||||
);
|
||||
|
||||
// For simplicity, payload attributes are never included in this call. It may be
|
||||
// reasonable to include them in the future.
|
||||
if let Err(e) = self.api.forkchoice_updated(forkchoice_state, None).await {
|
||||
if let Err(e) = self
|
||||
.api
|
||||
.forkchoice_updated(fork_name, forkchoice_state, None)
|
||||
.await
|
||||
{
|
||||
debug!(
|
||||
self.log,
|
||||
"Failed to issue latest head to engine";
|
||||
|
@ -214,6 +214,7 @@ struct Inner<E: EthSpec> {
|
||||
executor: TaskExecutor,
|
||||
payload_cache: PayloadCache<E>,
|
||||
builder_profit_threshold: Uint256,
|
||||
spec: ChainSpec,
|
||||
log: Logger,
|
||||
}
|
||||
|
||||
@ -237,6 +238,8 @@ pub struct Config {
|
||||
/// The minimum value of an external payload for it to be considered in a proposal.
|
||||
pub builder_profit_threshold: u128,
|
||||
pub execution_timeout_multiplier: Option<u32>,
|
||||
#[serde(skip)]
|
||||
pub spec: ChainSpec,
|
||||
}
|
||||
|
||||
/// Provides access to one execution engine and provides a neat interface for consumption by the
|
||||
@ -259,6 +262,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
default_datadir,
|
||||
builder_profit_threshold,
|
||||
execution_timeout_multiplier,
|
||||
spec,
|
||||
} = config;
|
||||
|
||||
if urls.len() > 1 {
|
||||
@ -330,6 +334,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
executor,
|
||||
payload_cache: PayloadCache::default(),
|
||||
builder_profit_threshold: Uint256::from(builder_profit_threshold),
|
||||
spec,
|
||||
log,
|
||||
};
|
||||
|
||||
@ -1005,6 +1010,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
|
||||
let response = engine
|
||||
.notify_forkchoice_updated(
|
||||
current_fork,
|
||||
fork_choice_state,
|
||||
Some(payload_attributes.clone()),
|
||||
self.log(),
|
||||
@ -1263,8 +1269,13 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
finalized_block_hash,
|
||||
};
|
||||
|
||||
let fork_name = self
|
||||
.inner
|
||||
.spec
|
||||
.fork_name_at_epoch(next_slot.epoch(T::slots_per_epoch()));
|
||||
|
||||
self.engine()
|
||||
.set_latest_forkchoice_state(forkchoice_state)
|
||||
.set_latest_forkchoice_state(fork_name, forkchoice_state)
|
||||
.await;
|
||||
|
||||
let payload_attributes_ref = &payload_attributes;
|
||||
@ -1273,6 +1284,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
.request(|engine| async move {
|
||||
engine
|
||||
.notify_forkchoice_updated(
|
||||
fork_name,
|
||||
forkchoice_state,
|
||||
payload_attributes_ref.clone(),
|
||||
self.log(),
|
||||
|
@ -252,6 +252,13 @@ pub struct BlobsByRootRequest {
|
||||
pub block_roots: VariableList<Hash256, MaxRequestBlocks>,
|
||||
}
|
||||
|
||||
impl From<BlocksByRootRequest> for BlobsByRootRequest {
|
||||
fn from(r: BlocksByRootRequest) -> Self {
|
||||
let BlocksByRootRequest { block_roots } = r;
|
||||
Self { block_roots }
|
||||
}
|
||||
}
|
||||
|
||||
/* RPC Handling and Grouping */
|
||||
// Collection of enums and structs used by the Codecs to encode/decode RPC messages
|
||||
|
||||
|
@ -840,7 +840,17 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
);
|
||||
return None;
|
||||
}
|
||||
Err(blob_errors) => unimplemented!("handle")
|
||||
Err(e@ BlockError::BlobValidation(_)) => {
|
||||
warn!(self.log, "Could not verify blob for gossip. Rejecting the block and blob";
|
||||
"error" => %e);
|
||||
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||
self.gossip_penalize_peer(
|
||||
peer_id,
|
||||
PeerAction::LowToleranceError,
|
||||
"gossip_blob_low",
|
||||
);
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_VERIFIED_TOTAL);
|
||||
|
@ -10,11 +10,11 @@ use beacon_chain::builder::Witness;
|
||||
use beacon_chain::eth1_chain::CachingEth1Backend;
|
||||
use lighthouse_network::{NetworkGlobals, Request};
|
||||
use slog::{Drain, Level};
|
||||
use slot_clock::SystemTimeSlotClock;
|
||||
use slot_clock::{SlotClock, SystemTimeSlotClock};
|
||||
use store::MemoryStore;
|
||||
use tokio::sync::mpsc;
|
||||
use types::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||
use types::MinimalEthSpec as E;
|
||||
use types::{EthSpec, MainnetEthSpec, MinimalEthSpec as E, Slot};
|
||||
|
||||
type T = Witness<SystemTimeSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
|
||||
|
||||
@ -55,6 +55,7 @@ impl TestRig {
|
||||
network_tx,
|
||||
globals,
|
||||
beacon_processor_tx,
|
||||
chain,
|
||||
log.new(slog::o!("component" => "network_context")),
|
||||
)
|
||||
};
|
||||
|
@ -231,6 +231,7 @@ pub fn spawn<T: BeaconChainTypes>(
|
||||
network_send,
|
||||
network_globals.clone(),
|
||||
beacon_processor_send,
|
||||
beacon_chain.clone(),
|
||||
log.clone(),
|
||||
),
|
||||
range_sync: RangeSync::new(beacon_chain.clone(), log.clone()),
|
||||
|
@ -6,18 +6,21 @@ use super::range_sync::{BatchId, ChainId, ExpectedBatchTy};
|
||||
use crate::beacon_processor::WorkEvent;
|
||||
use crate::service::{NetworkMessage, RequestId};
|
||||
use crate::status::ToStatusMessage;
|
||||
use beacon_chain::{BeaconChainTypes, EngineState};
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState};
|
||||
use fnv::FnvHashMap;
|
||||
use lighthouse_network::rpc::methods::BlobsByRangeRequest;
|
||||
use lighthouse_network::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason};
|
||||
use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request};
|
||||
use slog::{debug, trace, warn};
|
||||
use slot_clock::SlotClock;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
use types::signed_block_and_blobs::BlockWrapper;
|
||||
use types::{BlobsSidecar, EthSpec, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar};
|
||||
use types::{
|
||||
BlobsSidecar, ChainSpec, EthSpec, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar,
|
||||
};
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct BlockBlobRequestInfo<T: EthSpec> {
|
||||
@ -94,6 +97,8 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
|
||||
/// Channel to send work to the beacon processor.
|
||||
beacon_processor_send: mpsc::Sender<WorkEvent<T>>,
|
||||
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
|
||||
/// Logger for the `SyncNetworkContext`.
|
||||
log: slog::Logger,
|
||||
}
|
||||
@ -103,6 +108,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
|
||||
beacon_processor_send: mpsc::Sender<WorkEvent<T>>,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
log: slog::Logger,
|
||||
) -> Self {
|
||||
SyncNetworkContext {
|
||||
@ -115,6 +121,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
backfill_sidecar_pair_requests: Default::default(),
|
||||
execution_engine_state: EngineState::Online, // always assume `Online` at the start
|
||||
beacon_processor_send,
|
||||
chain,
|
||||
log,
|
||||
}
|
||||
}
|
||||
@ -459,19 +466,29 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
peer_id: PeerId,
|
||||
request: BlocksByRootRequest,
|
||||
) -> Result<Id, &'static str> {
|
||||
//FIXME(sean) add prune depth logic here?
|
||||
// D: YES
|
||||
// MOREINFO: here depending of the boundaries we decide what kind of request we send, if we
|
||||
// request just a block or if we request a block, glob pair.
|
||||
|
||||
trace!(
|
||||
self.log,
|
||||
"Sending BlocksByRoot Request";
|
||||
"method" => "BlocksByRoot",
|
||||
"count" => request.block_roots.len(),
|
||||
"peer" => %peer_id
|
||||
);
|
||||
let request = Request::BlocksByRoot(request);
|
||||
let request = if self
|
||||
.chain
|
||||
.is_data_availability_check_required()
|
||||
.map_err(|_| "Unable to read slot clock")?
|
||||
{
|
||||
trace!(
|
||||
self.log,
|
||||
"Sending BlobsByRoot Request";
|
||||
"method" => "BlobsByRoot",
|
||||
"count" => request.block_roots.len(),
|
||||
"peer" => %peer_id
|
||||
);
|
||||
Request::BlobsByRoot(request.into())
|
||||
} else {
|
||||
trace!(
|
||||
self.log,
|
||||
"Sending BlocksByRoot Request";
|
||||
"method" => "BlocksByRoot",
|
||||
"count" => request.block_roots.len(),
|
||||
"peer" => %peer_id
|
||||
);
|
||||
Request::BlocksByRoot(request)
|
||||
};
|
||||
let id = self.next_id();
|
||||
let request_id = RequestId::Sync(SyncRequestId::SingleBlock { id });
|
||||
self.send_network_msg(NetworkMessage::SendRequest {
|
||||
@ -488,14 +505,29 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
peer_id: PeerId,
|
||||
request: BlocksByRootRequest,
|
||||
) -> Result<Id, &'static str> {
|
||||
trace!(
|
||||
self.log,
|
||||
"Sending BlocksByRoot Request";
|
||||
"method" => "BlocksByRoot",
|
||||
"count" => request.block_roots.len(),
|
||||
"peer" => %peer_id
|
||||
);
|
||||
let request = Request::BlocksByRoot(request);
|
||||
let request = if self
|
||||
.chain
|
||||
.is_data_availability_check_required()
|
||||
.map_err(|_| "Unable to read slot clock")?
|
||||
{
|
||||
trace!(
|
||||
self.log,
|
||||
"Sending BlobsByRoot Request";
|
||||
"method" => "BlobsByRoot",
|
||||
"count" => request.block_roots.len(),
|
||||
"peer" => %peer_id
|
||||
);
|
||||
Request::BlobsByRoot(request.into())
|
||||
} else {
|
||||
trace!(
|
||||
self.log,
|
||||
"Sending BlocksByRoot Request";
|
||||
"method" => "BlocksByRoot",
|
||||
"count" => request.block_roots.len(),
|
||||
"peer" => %peer_id
|
||||
);
|
||||
Request::BlocksByRoot(request)
|
||||
};
|
||||
let id = self.next_id();
|
||||
let request_id = RequestId::Sync(SyncRequestId::ParentLookup { id });
|
||||
self.send_network_msg(NetworkMessage::SendRequest {
|
||||
@ -589,15 +621,21 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
EPOCHS_PER_BATCH, 1,
|
||||
"If this is not one, everything will fail horribly"
|
||||
);
|
||||
warn!(
|
||||
self.log,
|
||||
"Missing fork boundary and prunning boundary comparison to decide request type. EVERYTHING IS A BLOB, BOB."
|
||||
);
|
||||
|
||||
// Here we need access to the beacon chain, check the fork boundary, the current epoch, the
|
||||
// blob period to serve and check with that if the batch is a blob batch or not.
|
||||
// NOTE: This would carelessly assume batch sizes are always 1 epoch, to avoid needing to
|
||||
// align with the batch boundary.
|
||||
ExpectedBatchTy::OnlyBlockBlobs
|
||||
|
||||
if let Some(data_availability_boundary) = self.chain.data_availability_boundary() {
|
||||
if epoch >= data_availability_boundary {
|
||||
ExpectedBatchTy::OnlyBlockBlobs
|
||||
} else {
|
||||
ExpectedBatchTy::OnlyBlock
|
||||
}
|
||||
} else {
|
||||
ExpectedBatchTy::OnlyBlock
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -388,11 +388,12 @@ mod tests {
|
||||
use slog::{o, Drain};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use slot_clock::SystemTimeSlotClock;
|
||||
use slot_clock::{SlotClock, SystemTimeSlotClock};
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use store::MemoryStore;
|
||||
use types::{Hash256, MinimalEthSpec as E};
|
||||
use types::{Hash256, MainnetEthSpec, MinimalEthSpec as E};
|
||||
|
||||
#[derive(Debug)]
|
||||
struct FakeStorage {
|
||||
@ -606,6 +607,7 @@ mod tests {
|
||||
network_tx,
|
||||
globals.clone(),
|
||||
beacon_processor_tx,
|
||||
chain,
|
||||
log.new(o!("component" => "network_context")),
|
||||
);
|
||||
let test_rig = TestRig {
|
||||
|
@ -342,6 +342,7 @@ pub fn get_config<E: EthSpec>(
|
||||
let execution_timeout_multiplier =
|
||||
clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?;
|
||||
el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier);
|
||||
el_config.spec = spec.clone();
|
||||
|
||||
// If `--execution-endpoint` is provided, we should ignore any `--eth1-endpoints` values and
|
||||
// use `--execution-endpoint` instead. Also, log a deprecation warning.
|
||||
|
@ -23,7 +23,7 @@ pub mod merge {
|
||||
pub const INTERVALS_PER_SLOT: u64 = 3;
|
||||
}
|
||||
pub mod eip4844 {
|
||||
use crate::Uint256;
|
||||
use crate::{Epoch, Uint256};
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
@ -32,6 +32,7 @@ pub mod eip4844 {
|
||||
"52435875175126190479447740508185965837690552500527637822603658699938581184513"
|
||||
)
|
||||
.expect("should initialize BLS_MODULUS");
|
||||
pub static ref MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS: Epoch = Epoch::from(4096_u64);
|
||||
}
|
||||
pub const BLOB_TX_TYPE: u8 = 5;
|
||||
pub const VERSIONED_HASH_VERSION_KZG: u8 = 1;
|
||||
|
@ -10,7 +10,7 @@ use std::fmt;
|
||||
#[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash, Derivative)]
|
||||
#[derivative(Debug = "transparent")]
|
||||
#[serde(transparent)]
|
||||
pub struct ExecutionBlockHash(Hash256);
|
||||
pub struct ExecutionBlockHash(pub Hash256);
|
||||
|
||||
impl ExecutionBlockHash {
|
||||
pub fn zero() -> Self {
|
||||
|
@ -23,6 +23,7 @@ types = { path = "../consensus/types" }
|
||||
state_processing = { path = "../consensus/state_processing" }
|
||||
int_to_bytes = { path = "../consensus/int_to_bytes" }
|
||||
eth2_ssz = "0.4.1"
|
||||
eth2_hashing = "0.3.0"
|
||||
environment = { path = "../lighthouse/environment" }
|
||||
eth2_network_config = { path = "../common/eth2_network_config" }
|
||||
genesis = { path = "../beacon_node/genesis" }
|
||||
|
@ -559,14 +559,41 @@ fn main() {
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("merge-fork-epoch")
|
||||
.long("merge-fork-epoch")
|
||||
Arg::with_name("bellatrix-fork-epoch")
|
||||
.long("bellatrix-fork-epoch")
|
||||
.value_name("EPOCH")
|
||||
.takes_value(true)
|
||||
.help(
|
||||
"The epoch at which to enable the Merge hard fork",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("capella-fork-epoch")
|
||||
.long("capella-fork-epoch")
|
||||
.value_name("EPOCH")
|
||||
.takes_value(true)
|
||||
.help(
|
||||
"The epoch at which to enable the Capella hard fork",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("eip4844-fork-epoch")
|
||||
.long("eip4844-fork-epoch")
|
||||
.value_name("EPOCH")
|
||||
.takes_value(true)
|
||||
.help(
|
||||
"The epoch at which to enable the eip4844 hard fork",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("ttd")
|
||||
.long("ttd")
|
||||
.value_name("TTD")
|
||||
.takes_value(true)
|
||||
.help(
|
||||
"The terminal total difficulty",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("eth1-block-hash")
|
||||
.long("eth1-block-hash")
|
||||
|
@ -1,17 +1,25 @@
|
||||
use clap::ArgMatches;
|
||||
use clap_utils::{parse_optional, parse_required, parse_ssz_optional};
|
||||
use eth2_hashing::hash;
|
||||
use eth2_network_config::Eth2NetworkConfig;
|
||||
use genesis::interop_genesis_state;
|
||||
use ssz::Decode;
|
||||
use ssz::Encode;
|
||||
use state_processing::process_activations;
|
||||
use state_processing::upgrade::{
|
||||
upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_eip4844,
|
||||
};
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use types::{
|
||||
test_utils::generate_deterministic_keypairs, Address, Config, EthSpec, ExecutionPayloadHeader,
|
||||
ExecutionPayloadHeaderMerge,
|
||||
test_utils::generate_deterministic_keypairs, Address, BeaconState, ChainSpec, Config, Eth1Data,
|
||||
EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderMerge, Hash256, Keypair, PublicKey,
|
||||
Validator,
|
||||
};
|
||||
use types::{BeaconStateMerge, ExecutionBlockHash};
|
||||
|
||||
pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> {
|
||||
let deposit_contract_address: Address = parse_required(matches, "deposit-contract-address")?;
|
||||
@ -63,10 +71,22 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul
|
||||
spec.altair_fork_epoch = Some(fork_epoch);
|
||||
}
|
||||
|
||||
if let Some(fork_epoch) = parse_optional(matches, "merge-fork-epoch")? {
|
||||
if let Some(fork_epoch) = parse_optional(matches, "bellatrix-fork-epoch")? {
|
||||
spec.bellatrix_fork_epoch = Some(fork_epoch);
|
||||
}
|
||||
|
||||
if let Some(fork_epoch) = parse_optional(matches, "capella-fork-epoch")? {
|
||||
spec.capella_fork_epoch = Some(fork_epoch);
|
||||
}
|
||||
|
||||
if let Some(fork_epoch) = parse_optional(matches, "eip4844-fork-epoch")? {
|
||||
spec.eip4844_fork_epoch = Some(fork_epoch);
|
||||
}
|
||||
|
||||
if let Some(ttd) = parse_optional(matches, "ttd")? {
|
||||
spec.terminal_total_difficulty = ttd;
|
||||
}
|
||||
|
||||
let genesis_state_bytes = if matches.is_present("interop-genesis-state") {
|
||||
let execution_payload_header: Option<ExecutionPayloadHeader<T>> =
|
||||
parse_optional(matches, "execution-payload-header")?
|
||||
@ -108,7 +128,7 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul
|
||||
|
||||
let keypairs = generate_deterministic_keypairs(validator_count);
|
||||
|
||||
let genesis_state = interop_genesis_state::<T>(
|
||||
let genesis_state = initialize_state_with_validators::<T>(
|
||||
&keypairs,
|
||||
genesis_time,
|
||||
eth1_block_hash.into_root(),
|
||||
@ -130,3 +150,103 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul
|
||||
|
||||
testnet.write_to_file(testnet_dir_path, overwrite_files)
|
||||
}
|
||||
|
||||
fn initialize_state_with_validators<T: EthSpec>(
|
||||
keypairs: &[Keypair],
|
||||
genesis_time: u64,
|
||||
eth1_block_hash: Hash256,
|
||||
execution_payload_header: Option<ExecutionPayloadHeader<T>>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<BeaconState<T>, String> {
|
||||
let default_header = ExecutionPayloadHeaderMerge {
|
||||
gas_limit: 10,
|
||||
base_fee_per_gas: 10.into(),
|
||||
timestamp: genesis_time,
|
||||
block_hash: ExecutionBlockHash(eth1_block_hash),
|
||||
prev_randao: Hash256::random(),
|
||||
parent_hash: ExecutionBlockHash::zero(),
|
||||
transactions_root: Hash256::random(),
|
||||
..ExecutionPayloadHeaderMerge::default()
|
||||
};
|
||||
let execution_payload_header =
|
||||
execution_payload_header.or(Some(ExecutionPayloadHeader::Merge(default_header)));
|
||||
// Empty eth1 data
|
||||
let eth1_data = Eth1Data {
|
||||
block_hash: eth1_block_hash,
|
||||
deposit_count: 0,
|
||||
deposit_root: Hash256::from_str(
|
||||
"0xd70a234731285c6804c2a4f56711ddb8c82c99740f207854891028af34e27e5e",
|
||||
)
|
||||
.unwrap(), // empty deposit tree root
|
||||
};
|
||||
let mut state = BeaconState::new(genesis_time, eth1_data, spec);
|
||||
|
||||
// Seed RANDAO with Eth1 entropy
|
||||
state.fill_randao_mixes_with(eth1_block_hash);
|
||||
|
||||
for keypair in keypairs.into_iter() {
|
||||
let withdrawal_credentials = |pubkey: &PublicKey| {
|
||||
let mut credentials = hash(&pubkey.as_ssz_bytes());
|
||||
credentials[0] = spec.bls_withdrawal_prefix_byte;
|
||||
Hash256::from_slice(&credentials)
|
||||
};
|
||||
let amount = spec.max_effective_balance;
|
||||
// Create a new validator.
|
||||
let validator = Validator {
|
||||
pubkey: keypair.pk.clone().into(),
|
||||
withdrawal_credentials: withdrawal_credentials(&keypair.pk),
|
||||
activation_eligibility_epoch: spec.far_future_epoch,
|
||||
activation_epoch: spec.far_future_epoch,
|
||||
exit_epoch: spec.far_future_epoch,
|
||||
withdrawable_epoch: spec.far_future_epoch,
|
||||
effective_balance: std::cmp::min(
|
||||
amount - amount % (spec.effective_balance_increment),
|
||||
spec.max_effective_balance,
|
||||
),
|
||||
slashed: false,
|
||||
};
|
||||
state.validators_mut().push(validator).unwrap();
|
||||
state.balances_mut().push(amount).unwrap();
|
||||
}
|
||||
|
||||
process_activations(&mut state, spec).unwrap();
|
||||
|
||||
if spec
|
||||
.altair_fork_epoch
|
||||
.map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch())
|
||||
{
|
||||
upgrade_to_altair(&mut state, spec).unwrap();
|
||||
|
||||
state.fork_mut().previous_version = spec.altair_fork_version;
|
||||
}
|
||||
|
||||
// Similarly, perform an upgrade to the merge if configured from genesis.
|
||||
if spec
|
||||
.bellatrix_fork_epoch
|
||||
.map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch())
|
||||
{
|
||||
upgrade_to_bellatrix(&mut state, spec).unwrap();
|
||||
|
||||
// Remove intermediate Altair fork from `state.fork`.
|
||||
state.fork_mut().previous_version = spec.bellatrix_fork_version;
|
||||
|
||||
// Override latest execution payload header.
|
||||
// See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/merge/beacon-chain.md#testing
|
||||
|
||||
if let Some(ExecutionPayloadHeader::Merge(ref header)) = execution_payload_header {
|
||||
*state
|
||||
.latest_execution_payload_header_merge_mut()
|
||||
.map_err(|_| {
|
||||
"State must contain bellatrix execution payload header".to_string()
|
||||
})? = header.clone();
|
||||
}
|
||||
}
|
||||
|
||||
// Now that we have our validators, initialize the caches (including the committees)
|
||||
state.build_all_caches(spec).unwrap();
|
||||
|
||||
// Set genesis validators root for domain separation and chain versioning
|
||||
*state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache().unwrap();
|
||||
|
||||
Ok(state)
|
||||
}
|
||||
|
@ -30,6 +30,8 @@ while getopts "d:sh" flag; do
|
||||
echo " DATADIR Value for --datadir parameter"
|
||||
echo " NETWORK-PORT Value for --enr-udp-port, --enr-tcp-port and --port"
|
||||
echo " HTTP-PORT Value for --http-port"
|
||||
echo " EXECUTION-ENDPOINT Value for --execution-endpoint"
|
||||
echo " EXECUTION-JWT Value for --execution-jwt"
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
@ -39,8 +41,12 @@ done
|
||||
data_dir=${@:$OPTIND+0:1}
|
||||
network_port=${@:$OPTIND+1:1}
|
||||
http_port=${@:$OPTIND+2:1}
|
||||
execution_endpoint=${@:$OPTIND+3:1}
|
||||
execution_jwt=${@:$OPTIND+4:1}
|
||||
|
||||
exec lighthouse \
|
||||
lighthouse_binary=lighthouse
|
||||
|
||||
exec $lighthouse_binary \
|
||||
--debug-level $DEBUG_LEVEL \
|
||||
bn \
|
||||
$SUBSCRIBE_ALL_SUBNETS \
|
||||
@ -54,4 +60,7 @@ exec lighthouse \
|
||||
--port $network_port \
|
||||
--http-port $http_port \
|
||||
--disable-packet-filter \
|
||||
--target-peers $((BN_COUNT - 1))
|
||||
--target-peers $((BN_COUNT - 1)) \
|
||||
--execution-endpoint $execution_endpoint \
|
||||
--trusted-setup-file ./trusted_setup.txt \
|
||||
--execution-jwt $execution_jwt
|
||||
|
4
scripts/local_testnet/el_bootnode.sh
Executable file
4
scripts/local_testnet/el_bootnode.sh
Executable file
@ -0,0 +1,4 @@
|
||||
priv_key="02fd74636e96a8ffac8e7b01b0de8dea94d6bcf4989513b38cf59eb32163ff91"
|
||||
|
||||
|
||||
/home/sean/CLionProjects/eip4844-interop/geth/go-ethereum/build/bin/bootnode --nodekeyhex $priv_key
|
852
scripts/local_testnet/genesis.json
Normal file
852
scripts/local_testnet/genesis.json
Normal file
File diff suppressed because one or more lines are too long
52
scripts/local_testnet/geth.sh
Executable file
52
scripts/local_testnet/geth.sh
Executable file
@ -0,0 +1,52 @@
|
||||
set -Eeuo pipefail
|
||||
|
||||
source ./vars.env
|
||||
|
||||
# Get options
|
||||
while getopts "d:sh" flag; do
|
||||
case "${flag}" in
|
||||
d) DEBUG_LEVEL=${OPTARG};;
|
||||
s) SUBSCRIBE_ALL_SUBNETS="--subscribe-all-subnets";;
|
||||
h)
|
||||
echo "Start a geth node"
|
||||
echo
|
||||
echo "usage: $0 <Options> <DATADIR> <NETWORK-PORT> <HTTP-PORT>"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -h: this help"
|
||||
echo
|
||||
echo "Positional arguments:"
|
||||
echo " DATADIR Value for --datadir parameter"
|
||||
echo " NETWORK-PORT Value for --port"
|
||||
echo " HTTP-PORT Value for --http.port"
|
||||
echo " AUTH-PORT Value for --authrpc.port"
|
||||
echo " GENESIS_FILE Value for geth init"
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Get positional arguments
|
||||
data_dir=${@:$OPTIND+0:1}
|
||||
network_port=${@:$OPTIND+1:1}
|
||||
http_port=${@:$OPTIND+2:1}
|
||||
auth_port=${@:$OPTIND+3:1}
|
||||
genesis_file=${@:$OPTIND+4:1}
|
||||
|
||||
# Init
|
||||
$GETH_BINARY init \
|
||||
--datadir $data_dir \
|
||||
$genesis_file
|
||||
|
||||
echo "Completed init"
|
||||
|
||||
exec $GETH_BINARY \
|
||||
--datadir $data_dir \
|
||||
--ipcdisable \
|
||||
--http \
|
||||
--http.api="engine,eth,web3,net,debug" \
|
||||
--networkid=$CHAIN_ID \
|
||||
--syncmode=full \
|
||||
--bootnodes $EL_BOOTNODE_ENODE \
|
||||
--port $network_port \
|
||||
--http.port $auth_port
|
@ -12,7 +12,7 @@ if [ -f "$1" ]; then
|
||||
[[ -n "$pid" ]] || continue
|
||||
|
||||
echo killing $pid
|
||||
kill $pid
|
||||
kill $pid || true
|
||||
done < $1
|
||||
fi
|
||||
|
||||
|
@ -13,11 +13,6 @@ set -o nounset -o errexit -o pipefail
|
||||
|
||||
source ./vars.env
|
||||
|
||||
lcli \
|
||||
deploy-deposit-contract \
|
||||
--eth1-http http://localhost:8545 \
|
||||
--confirmations 1 \
|
||||
--validator-count $VALIDATOR_COUNT
|
||||
|
||||
NOW=`date +%s`
|
||||
GENESIS_TIME=`expr $NOW + $GENESIS_DELAY`
|
||||
@ -32,13 +27,20 @@ lcli \
|
||||
--genesis-delay $GENESIS_DELAY \
|
||||
--genesis-fork-version $GENESIS_FORK_VERSION \
|
||||
--altair-fork-epoch $ALTAIR_FORK_EPOCH \
|
||||
--bellatrix-fork-epoch $BELLATRIX_FORK_EPOCH \
|
||||
--capella-fork-epoch $CAPELLA_FORK_EPOCH \
|
||||
--eip4844-fork-epoch $EIP4844_FORK_EPOCH \
|
||||
--ttd $TTD \
|
||||
--eth1-block-hash $ETH1_BLOCK_HASH \
|
||||
--eth1-id $CHAIN_ID \
|
||||
--eth1-follow-distance 1 \
|
||||
--seconds-per-slot $SECONDS_PER_SLOT \
|
||||
--seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \
|
||||
--validator-count $GENESIS_VALIDATOR_COUNT \
|
||||
--interop-genesis-state \
|
||||
--force
|
||||
|
||||
echo Specification generated at $TESTNET_DIR.
|
||||
echo Specification and genesis.ssz generated at $TESTNET_DIR.
|
||||
echo "Generating $VALIDATOR_COUNT validators concurrently... (this may take a while)"
|
||||
|
||||
lcli \
|
||||
@ -48,13 +50,10 @@ lcli \
|
||||
--node-count $BN_COUNT
|
||||
|
||||
echo Validators generated with keystore passwords at $DATADIR.
|
||||
echo "Building genesis state... (this might take a while)"
|
||||
|
||||
lcli \
|
||||
interop-genesis \
|
||||
--spec $SPEC_PRESET \
|
||||
--genesis-time $GENESIS_TIME \
|
||||
--testnet-dir $TESTNET_DIR \
|
||||
$GENESIS_VALIDATOR_COUNT
|
||||
GENESIS_TIME=$(lcli pretty-ssz state_merge ~/.lighthouse/local-testnet/testnet/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d')
|
||||
CAPELLA_TIME=$((GENESIS_TIME + (CAPELLA_FORK_EPOCH * 32 * SECONDS_PER_SLOT)))
|
||||
EIP4844_TIME=$((GENESIS_TIME + (EIP4844_FORK_EPOCH * 32 * SECONDS_PER_SLOT)))
|
||||
|
||||
echo Created genesis state in $TESTNET_DIR
|
||||
sed -i 's/"shanghaiTime".*$/"shanghaiTime": '"$CAPELLA_TIME"',/g' genesis.json
|
||||
sed -i 's/"shardingForkTime".*$/"shardingForkTime": '"$EIP4844_TIME"',/g' genesis.json
|
||||
|
@ -40,6 +40,8 @@ if (( $VC_COUNT > $BN_COUNT )); then
|
||||
exit
|
||||
fi
|
||||
|
||||
genesis_file=${@:$OPTIND+0:1}
|
||||
|
||||
# Init some constants
|
||||
PID_FILE=$TESTNET_DIR/PIDS.pid
|
||||
LOG_DIR=$TESTNET_DIR
|
||||
@ -55,6 +57,9 @@ mkdir -p $LOG_DIR
|
||||
for (( bn=1; bn<=$BN_COUNT; bn++ )); do
|
||||
touch $LOG_DIR/beacon_node_$bn.log
|
||||
done
|
||||
for (( el=1; el<=$BN_COUNT; el++ )); do
|
||||
touch $LOG_DIR/geth_$el.log
|
||||
done
|
||||
for (( vc=1; vc<=$VC_COUNT; vc++ )); do
|
||||
touch $LOG_DIR/validator_node_$vc.log
|
||||
done
|
||||
@ -92,12 +97,6 @@ execute_command_add_PID() {
|
||||
echo "$!" >> $PID_FILE
|
||||
}
|
||||
|
||||
# Start ganache, setup things up and start the bootnode.
|
||||
# The delays are necessary, hopefully there is a better way :(
|
||||
|
||||
# Delay to let ganache to get started
|
||||
execute_command_add_PID ganache_test_node.log ./ganache_test_node.sh
|
||||
sleeping 10
|
||||
|
||||
# Setup data
|
||||
echo "executing: ./setup.sh >> $LOG_DIR/setup.log"
|
||||
@ -105,16 +104,37 @@ echo "executing: ./setup.sh >> $LOG_DIR/setup.log"
|
||||
|
||||
# Delay to let boot_enr.yaml to be created
|
||||
execute_command_add_PID bootnode.log ./bootnode.sh
|
||||
sleeping 1
|
||||
sleeping 3
|
||||
|
||||
execute_command_add_PID el_bootnode.log ./el_bootnode.sh
|
||||
sleeping 3
|
||||
|
||||
# Start beacon nodes
|
||||
BN_udp_tcp_base=9000
|
||||
BN_http_port_base=8000
|
||||
|
||||
EL_base_network=7000
|
||||
EL_base_http=6000
|
||||
EL_base_auth_http=5000
|
||||
|
||||
(( $VC_COUNT < $BN_COUNT )) && SAS=-s || SAS=
|
||||
|
||||
for (( el=1; el<=$BN_COUNT; el++ )); do
|
||||
execute_command_add_PID geth_$el.log ./geth.sh $DATADIR/geth_datadir$el $((EL_base_network + $el)) $((EL_base_http + $el)) $((EL_base_auth_http + $el + 10)) $genesis_file
|
||||
done
|
||||
|
||||
sleeping 20
|
||||
|
||||
# Reset the `genesis.json` config file fork times.
|
||||
sed -i 's/"shanghaiTime".*$/"shanghaiTime": 0,/g' genesis.json
|
||||
sed -i 's/"shardingForkTime".*$/"shardingForkTime": 0,/g' genesis.json
|
||||
|
||||
for (( bn=1; bn<=$BN_COUNT; bn++ )); do
|
||||
execute_command_add_PID beacon_node_$bn.log ./beacon_node.sh $SAS -d $DEBUG_LEVEL $DATADIR/node_$bn $((BN_udp_tcp_base + $bn)) $((BN_http_port_base + $bn))
|
||||
|
||||
execute_command_add_PID json_snoop_$bn.log json_rpc_snoop -p $((EL_base_auth_http + $bn)) -b 0.0.0.0 http://localhost:$((EL_base_auth_http + $bn + 10))
|
||||
secret=$DATADIR/geth_datadir$bn/geth/jwtsecret
|
||||
echo $secret
|
||||
execute_command_add_PID beacon_node_$bn.log ./beacon_node.sh $SAS -d $DEBUG_LEVEL $DATADIR/node_$bn $((BN_udp_tcp_base + $bn)) $((BN_http_port_base + $bn)) http://localhost:$((EL_base_auth_http + $bn)) $secret
|
||||
done
|
||||
|
||||
# Start requested number of validator clients
|
||||
|
4163
scripts/local_testnet/trusted_setup.txt
Normal file
4163
scripts/local_testnet/trusted_setup.txt
Normal file
File diff suppressed because it is too large
Load Diff
@ -30,4 +30,5 @@ exec lighthouse \
|
||||
--testnet-dir $TESTNET_DIR \
|
||||
--init-slashing-protection \
|
||||
--beacon-nodes ${@:$OPTIND+1:1} \
|
||||
--suggested-fee-recipient 0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990 \
|
||||
$VC_ARGS
|
||||
|
@ -1,17 +1,21 @@
|
||||
GETH_BINARY=geth
|
||||
|
||||
# Base directories for the validator keys and secrets
|
||||
DATADIR=~/.lighthouse/local-testnet
|
||||
|
||||
# Directory for the eth2 config
|
||||
TESTNET_DIR=$DATADIR/testnet
|
||||
|
||||
# Mnemonic for the ganache test network
|
||||
ETH1_NETWORK_MNEMONIC="vast thought differ pull jewel broom cook wrist tribe word before omit"
|
||||
EL_BOOTNODE_ENODE="enode://51ea9bb34d31efc3491a842ed13b8cab70e753af108526b57916d716978b380ed713f4336a80cdb85ec2a115d5a8c0ae9f3247bed3c84d3cb025c6bab311062c@127.0.0.1:0?discport=30301"
|
||||
|
||||
# Hardcoded deposit contract based on ETH1_NETWORK_MNEMONIC
|
||||
DEPOSIT_CONTRACT_ADDRESS=8c594691c0e592ffa21f153a16ae41db5befcaaa
|
||||
# Hardcoded deposit contract
|
||||
DEPOSIT_CONTRACT_ADDRESS=4242424242424242424242424242424242424242
|
||||
|
||||
GENESIS_FORK_VERSION=0x42424242
|
||||
|
||||
# Block hash generated from genesis.json in directory
|
||||
ETH1_BLOCK_HASH=16ef16304456fdacdeb272bd70207021031db355ed6c5e44ebd34c1ab757e221
|
||||
|
||||
VALIDATOR_COUNT=80
|
||||
GENESIS_VALIDATOR_COUNT=80
|
||||
|
||||
@ -33,7 +37,12 @@ BOOTNODE_PORT=4242
|
||||
CHAIN_ID=4242
|
||||
|
||||
# Hard fork configuration
|
||||
ALTAIR_FORK_EPOCH=18446744073709551615
|
||||
ALTAIR_FORK_EPOCH=0
|
||||
BELLATRIX_FORK_EPOCH=0
|
||||
CAPELLA_FORK_EPOCH=1
|
||||
EIP4844_FORK_EPOCH=2
|
||||
|
||||
TTD=0
|
||||
|
||||
# Spec version (mainnet or minimal)
|
||||
SPEC_PRESET=mainnet
|
||||
|
@ -23,7 +23,7 @@ use std::fmt::Debug;
|
||||
#[cfg(not(all(feature = "withdrawals", feature = "withdrawals-processing")))]
|
||||
use std::marker::PhantomData;
|
||||
use std::path::Path;
|
||||
#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))]
|
||||
#[cfg(feature = "withdrawals")]
|
||||
use types::SignedBlsToExecutionChange;
|
||||
use types::{
|
||||
Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit,
|
||||
@ -45,10 +45,7 @@ struct ExecutionMetadata {
|
||||
/// Newtype for testing withdrawals.
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct WithdrawalsPayload<T: EthSpec> {
|
||||
#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))]
|
||||
payload: FullPayload<T>,
|
||||
#[cfg(not(all(feature = "withdrawals", feature = "withdrawals-processing")))]
|
||||
_phantom_data: PhantomData<T>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@ -400,6 +397,7 @@ impl<E: EthSpec> Operation<E> for WithdrawalsPayload<E> {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "withdrawals")]
|
||||
impl<E: EthSpec> Operation<E> for SignedBlsToExecutionChange {
|
||||
fn handler_name() -> String {
|
||||
"bls_to_execution_change".into()
|
||||
|
Loading…
Reference in New Issue
Block a user