diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 0285d63e5..8334ad180 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -58,8 +58,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install anvil - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Run tests in release run: make test-release release-tests-windows: @@ -78,9 +78,8 @@ jobs: run: | choco install python protoc visualstudio2019-workload-vctools -y npm config set msvs_version 2019 - - name: Install anvil - # Extra feature to work around https://github.com/foundry-rs/foundry/issues/5115 - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil --features ethers/ipc + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Install make run: choco install -y make - uses: KyleMayes/install-llvm-action@v1 @@ -155,8 +154,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install anvil - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Run tests in debug run: make test-debug state-transition-vectors-ubuntu: @@ -211,8 +210,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install anvil - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Run the beacon chain sim that starts from an eth1 contract run: cargo run --release --bin simulator eth1-sim merge-transition-ubuntu: @@ -227,8 +226,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install anvil - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Run the beacon chain sim and go through the merge transition run: cargo run --release --bin simulator eth1-sim --post-merge no-eth1-simulator-ubuntu: @@ -257,8 +256,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install anvil - run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil + - name: Install Foundry (anvil) + uses: foundry-rs/foundry-toolchain@v1 - name: Run the syncing simulator run: cargo run --release --bin simulator syncing-sim doppelganger-protection-test: diff --git a/Cargo.lock b/Cargo.lock index b4f71c1c4..234c49650 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -257,6 +257,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "anvil-rpc" +version = "0.1.0" +source = "git+https://github.com/foundry-rs/foundry?rev=b45456717ffae1af65acdc71099f8cb95e6683a0#b45456717ffae1af65acdc71099f8cb95e6683a0" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "anyhow" version = "1.0.71" @@ -506,9 +515,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.17" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43" +checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" dependencies = [ "async-trait", "axum-core", @@ -524,22 +533,23 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite 0.2.9", + "rustversion", "serde", "serde_json", + "serde_path_to_error", "serde_urlencoded", "sync_wrapper", "tokio", "tower", - "tower-http", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.2.9" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", "bytes", @@ -547,6 +557,7 @@ dependencies = [ "http", "http-body", "mime", + "rustversion", "tower-layer", "tower-service", ] @@ -605,7 +616,7 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "beacon-api-client" version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client#30679e9e25d61731cde54e14cd8a3688a39d8e5b" +source = "git+https://github.com/ralexstokes/beacon-api-client?rev=93d7e8c#93d7e8c38fe9782c4862909663e7b57c44f805a9" dependencies = [ "ethereum-consensus", "http", @@ -2605,7 +2616,7 @@ dependencies = [ [[package]] name = "ethereum-consensus" version = "0.1.1" -source = "git+https://github.com/ralexstokes//ethereum-consensus?rev=9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d#9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" +source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=e380108#e380108d15fcc40349927fdf3d11c71f9edb67c2" dependencies = [ "async-stream", "blst", @@ -2618,8 +2629,9 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", + "serde_yaml", "sha2 0.9.9", - "ssz-rs", + "ssz_rs", "thiserror", "tokio", "tokio-stream", @@ -2893,7 +2905,7 @@ dependencies = [ "serde_json", "slog", "slot_clock", - "ssz-rs", + "ssz_rs", "ssz_types", "state_processing", "strum", @@ -3618,12 +3630,6 @@ dependencies = [ "pin-project-lite 0.2.9", ] -[[package]] -name = "http-range-header" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" - [[package]] name = "http_api" version = "0.1.0" @@ -5104,9 +5110,9 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" -version = "0.5.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" [[package]] name = "md-5" @@ -5195,16 +5201,20 @@ dependencies = [ [[package]] name = "mev-rs" -version = "0.2.1" -source = "git+https://github.com/ralexstokes//mev-rs?rev=7813d4a4a564e0754e9aaab2d95520ba437c3889#7813d4a4a564e0754e9aaab2d95520ba437c3889" +version = "0.3.0" +source = "git+https://github.com/ralexstokes/mev-rs?rev=216657016d5c0889b505857c89ae42c7aa2764af#216657016d5c0889b505857c89ae42c7aa2764af" dependencies = [ + "anvil-rpc", "async-trait", "axum", "beacon-api-client", "ethereum-consensus", "hyper", + "parking_lot 0.12.1", + "reqwest", "serde", - "ssz-rs", + "serde_json", + "ssz_rs", "thiserror", "tokio", "tracing", @@ -5655,6 +5665,7 @@ dependencies = [ "execution_layer", "sensitive_url", "tempfile", + "tokio", "types", "validator_client", "validator_dir", @@ -7492,6 +7503,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b1b6471d7496b051e03f1958802a73f88b947866f5146f329e47e36554f4e55" +dependencies = [ + "itoa", + "serde", +] + [[package]] name = "serde_repr" version = "0.1.12" @@ -8001,23 +8022,24 @@ dependencies = [ ] [[package]] -name = "ssz-rs" -version = "0.8.0" -source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1" +name = "ssz_rs" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "057291e5631f280978fa9c8009390663ca4613359fc1318e36a8c24c392f6d1f" dependencies = [ "bitvec 1.0.1", "hex", "num-bigint", "serde", "sha2 0.9.9", - "ssz-rs-derive", - "thiserror", + "ssz_rs_derive", ] [[package]] -name = "ssz-rs-derive" -version = "0.8.0" -source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1" +name = "ssz_rs_derive" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140" dependencies = [ "proc-macro2", "quote", @@ -8812,25 +8834,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tower-http" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" -dependencies = [ - "bitflags 1.3.2", - "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", - "pin-project-lite 0.2.9", - "tower", - "tower-layer", - "tower-service", -] - [[package]] name = "tower-layer" version = "0.3.2" diff --git a/Cargo.toml b/Cargo.toml index 09763529e..918df9cc0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -93,13 +93,6 @@ resolver = "2" warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" } -[patch."https://github.com/ralexstokes/mev-rs"] -mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" } -[patch."https://github.com/ralexstokes/ethereum-consensus"] -ethereum-consensus = { git = "https://github.com/ralexstokes//ethereum-consensus", rev = "9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" } -[patch."https://github.com/ralexstokes/ssz-rs"] -ssz-rs = { git = "https://github.com/ralexstokes//ssz-rs", rev = "adf1a0b14cef90b9536f28ef89da1fab316465e1" } - [profile.maxperf] inherits = "release" lto = "fat" diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index a4a661197..460bf18bc 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -86,7 +86,7 @@ impl BeaconChain { let ideal_reward = reward_numerator .safe_div(active_increments)? .safe_div(WEIGHT_DENOMINATOR)?; - if !state.is_in_inactivity_leak(previous_epoch, spec) { + if !state.is_in_inactivity_leak(previous_epoch, spec)? { ideal_rewards_hashmap .insert((flag_index, effective_balance), (ideal_reward, penalty)); } else { diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 84039e422..b28d0b81f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -236,6 +236,17 @@ pub struct PrePayloadAttributes { pub parent_block_number: u64, } +/// Information about a state/block at a specific slot. +#[derive(Debug, Clone, Copy)] +pub struct FinalizationAndCanonicity { + /// True if the slot of the state or block is finalized. + /// + /// This alone DOES NOT imply that the state/block is finalized, use `self.is_finalized()`. + pub slot_is_finalized: bool, + /// True if the state or block is canonical at its slot. + pub canonical: bool, +} + /// Define whether a forkchoiceUpdate needs to be checked for an override (`Yes`) or has already /// been checked (`AlreadyApplied`). It is safe to specify `Yes` even if re-orgs are disabled. #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] @@ -470,6 +481,12 @@ pub struct BeaconChain { type BeaconBlockAndState = (BeaconBlock, BeaconState); +impl FinalizationAndCanonicity { + pub fn is_finalized(self) -> bool { + self.slot_is_finalized && self.canonical + } +} + impl BeaconChain { /// Checks if a block is finalized. /// The finalization check is done with the block slot. The block root is used to verify that @@ -499,16 +516,30 @@ impl BeaconChain { state_root: &Hash256, state_slot: Slot, ) -> Result { + self.state_finalization_and_canonicity(state_root, state_slot) + .map(FinalizationAndCanonicity::is_finalized) + } + + /// Fetch the finalization and canonicity status of the state with `state_root`. + pub fn state_finalization_and_canonicity( + &self, + state_root: &Hash256, + state_slot: Slot, + ) -> Result { let finalized_slot = self .canonical_head .cached_head() .finalized_checkpoint() .epoch .start_slot(T::EthSpec::slots_per_epoch()); - let is_canonical = self + let slot_is_finalized = state_slot <= finalized_slot; + let canonical = self .state_root_at_slot(state_slot)? .map_or(false, |canonical_root| state_root == &canonical_root); - Ok(state_slot <= finalized_slot && is_canonical) + Ok(FinalizationAndCanonicity { + slot_is_finalized, + canonical, + }) } /// Persists the head tracker and fork choice. diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index cc7a957ec..efbc9905b 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -83,6 +83,8 @@ pub struct ChainConfig { pub enable_backfill_rate_limiting: bool, /// Whether to use `ProgressiveBalancesCache` in unrealized FFG progression calculation. pub progressive_balances_mode: ProgressiveBalancesMode, + /// Number of epochs between each migration of data from the hot database to the freezer. + pub epochs_per_migration: u64, } impl Default for ChainConfig { @@ -114,6 +116,7 @@ impl Default for ChainConfig { always_prepare_payload: false, enable_backfill_rate_limiting: true, progressive_balances_mode: ProgressiveBalancesMode::Checked, + epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, } } } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index f394cabe0..3f1ad7581 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -81,6 +81,7 @@ pub use execution_payload::NotifyExecutionLayer; pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; pub use kzg::TrustedSetup; pub use metrics::scrape_for_metrics; +pub use migrate::MigratorConfig; pub use parking_lot; pub use slot_clock; pub use state_processing::per_block_processing::errors::{ diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 6f04da31f..4251a9bf5 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -25,10 +25,15 @@ const MIN_COMPACTION_PERIOD_SECONDS: u64 = 7200; /// Compact after a large finality gap, if we respect `MIN_COMPACTION_PERIOD_SECONDS`. const COMPACTION_FINALITY_DISTANCE: u64 = 1024; +/// Default number of epochs to wait between finalization migrations. +pub const DEFAULT_EPOCHS_PER_MIGRATION: u64 = 1; + /// The background migrator runs a thread to perform pruning and migrate state from the hot /// to the cold database. pub struct BackgroundMigrator, Cold: ItemStore> { db: Arc>, + /// Record of when the last migration ran, for enforcing `epochs_per_migration`. + prev_migration: Arc>, #[allow(clippy::type_complexity)] tx_thread: Option, thread::JoinHandle<()>)>>, /// Genesis block root, for persisting the `PersistedBeaconChain`. @@ -36,9 +41,22 @@ pub struct BackgroundMigrator, Cold: ItemStore> log: Logger, } -#[derive(Debug, Default, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct MigratorConfig { pub blocking: bool, + /// Run migrations at most once per `epochs_per_migration`. + /// + /// If set to 0 or 1, then run every finalization. + pub epochs_per_migration: u64, +} + +impl Default for MigratorConfig { + fn default() -> Self { + Self { + blocking: false, + epochs_per_migration: DEFAULT_EPOCHS_PER_MIGRATION, + } + } } impl MigratorConfig { @@ -46,6 +64,19 @@ impl MigratorConfig { self.blocking = true; self } + + pub fn epochs_per_migration(mut self, epochs_per_migration: u64) -> Self { + self.epochs_per_migration = epochs_per_migration; + self + } +} + +/// Record of when the last migration ran. +pub struct PrevMigration { + /// The epoch at which the last finalization migration ran. + epoch: Epoch, + /// The number of epochs to wait between runs. + epochs_per_migration: u64, } /// Pruning can be successful, or in rare cases deferred to a later point. @@ -93,6 +124,7 @@ pub struct FinalizationNotification { finalized_state_root: BeaconStateHash, finalized_checkpoint: Checkpoint, head_tracker: Arc, + prev_migration: Arc>, genesis_block_root: Hash256, } @@ -104,6 +136,11 @@ impl, Cold: ItemStore> BackgroundMigrator Self { + // Estimate last migration run from DB split slot. + let prev_migration = Arc::new(Mutex::new(PrevMigration { + epoch: db.get_split_slot().epoch(E::slots_per_epoch()), + epochs_per_migration: config.epochs_per_migration, + })); let tx_thread = if config.blocking { None } else { @@ -112,6 +149,7 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator prev_migration.epoch, + "new_finalized_epoch" => epoch, + "epochs_per_migration" => prev_migration.epochs_per_migration, + ); + return; + } + + // Update the previous migration epoch immediately to avoid holding the lock. If the + // migration doesn't succeed then the next migration will be retried at the next scheduled + // run. + prev_migration.epoch = epoch; + drop(prev_migration); + debug!(log, "Database consolidation started"); let finalized_state_root = notif.finalized_state_root; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 15331313b..147a0c3ba 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -519,6 +519,7 @@ where let validator_keypairs = self .validator_keypairs .expect("cannot build without validator keypairs"); + let chain_config = self.chain_config.unwrap_or_default(); let trusted_setup: TrustedSetup = serde_json::from_reader(eth2_network_config::get_trusted_setup::()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) @@ -528,13 +529,17 @@ where .logger(log.clone()) .custom_spec(spec) .store(self.store.expect("cannot build without store")) - .store_migrator_config(MigratorConfig::default().blocking()) + .store_migrator_config( + MigratorConfig::default() + .blocking() + .epochs_per_migration(chain_config.epochs_per_migration), + ) .task_executor(self.runtime.task_executor.clone()) .execution_layer(self.execution_layer) .dummy_eth1_backend() .expect("should build dummy backend") .shutdown_sender(shutdown_tx) - .chain_config(self.chain_config.unwrap_or_default()) + .chain_config(chain_config) .event_handler(Some(ServerSentEventHandler::new_with_capacity( log.clone(), 5, diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 30692e022..cfd7f63ec 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -12,7 +12,7 @@ use beacon_chain::{ slot_clock::{SlotClock, SystemTimeSlotClock}, state_advance_timer::spawn_state_advance_timer, store::{HotColdDB, ItemStore, LevelDB, StoreConfig}, - BeaconChain, BeaconChainTypes, Eth1ChainBackend, ServerSentEventHandler, + BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler, }; use beacon_processor::{ work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessor, BeaconProcessorSend, @@ -188,6 +188,9 @@ where .store(store) .task_executor(context.executor.clone()) .custom_spec(spec.clone()) + .store_migrator_config( + MigratorConfig::default().epochs_per_migration(chain_config.epochs_per_migration), + ) .chain_config(chain_config) .graffiti(graffiti) .event_handler(event_handler) diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index fd776b9ab..11a2c2012 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -42,9 +42,9 @@ lazy_static = "1.4.0" ethers-core = "1.0.2" builder_client = { path = "../builder_client" } fork_choice = { path = "../../consensus/fork_choice" } -mev-rs = { git = "https://github.com/ralexstokes/mev-rs" } -ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus" } -ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs" } +mev-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "216657016d5c0889b505857c89ae42c7aa2764af" } +ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "e380108" } +ssz_rs = "0.9.0" tokio-stream = { version = "0.1.9", features = [ "sync" ] } strum = "0.24.0" keccak-hash = "0.10.0" diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 10734d2e3..e2ef23f7d 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -237,6 +237,7 @@ impl> BlockProposalContents BlockProposalContents::PayloadAndBlobs { @@ -938,6 +939,7 @@ impl ExecutionLayer { BlockProposalContents::Payload { payload: relay.data.message.header, block_value: relay.data.message.value, + _phantom: PhantomData, }, )), Err(reason) if !reason.payload_invalid() => { @@ -992,6 +994,7 @@ impl ExecutionLayer { BlockProposalContents::Payload { payload: relay.data.message.header, block_value: relay.data.message.value, + _phantom: PhantomData, }, )), // If the payload is valid then use it. The local EE failed @@ -1000,6 +1003,7 @@ impl ExecutionLayer { BlockProposalContents::Payload { payload: relay.data.message.header, block_value: relay.data.message.value, + _phantom: PhantomData, }, )), Err(reason) => { @@ -1178,30 +1182,36 @@ impl ExecutionLayer { "parent_hash" => ?parent_hash, ); engine.api.get_payload::(current_fork, payload_id).await - }.await?; - - if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() { - error!( - self.log(), - "Inconsistent fee recipient"; - "msg" => "The fee recipient returned from the Execution Engine differs \ - from the suggested_fee_recipient set on the beacon node. This could \ - indicate that fees are being diverted to another address. Please \ - ensure that the value of suggested_fee_recipient is set correctly and \ - that the Execution Engine is trusted.", - "fee_recipient" => ?payload_response.execution_payload_ref().fee_recipient(), - "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), - ); - } - if f(self, payload_response.execution_payload_ref()).is_some() { - warn!( - self.log(), - "Duplicate payload cached, this might indicate redundant proposal \ - attempts." - ); - } - - Ok(payload_response.into()) + }; + let payload_response = payload_fut.await; + let (execution_payload, block_value) = payload_response.map(|payload_response| { + if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() { + error!( + self.log(), + "Inconsistent fee recipient"; + "msg" => "The fee recipient returned from the Execution Engine differs \ + from the suggested_fee_recipient set on the beacon node. This could \ + indicate that fees are being diverted to another address. Please \ + ensure that the value of suggested_fee_recipient is set correctly and \ + that the Execution Engine is trusted.", + "fee_recipient" => ?payload_response.execution_payload_ref().fee_recipient(), + "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), + ); + } + if f(self, payload_response.execution_payload_ref()).is_some() { + warn!( + self.log(), + "Duplicate payload cached, this might indicate redundant proposal \ + attempts." + ); + } + payload_response.into() + })?; + Ok(BlockProposalContents::Payload { + payload: execution_payload.into(), + block_value, + _phantom: PhantomData, + }) }) .await .map_err(Box::new) @@ -2126,6 +2136,22 @@ async fn timed_future, T>(metric: &str, future: F) -> (T, (result, duration) } +fn noop( + _: &ExecutionLayer, + _: ExecutionPayloadRef, +) -> Option> { + None +} + +#[cfg(test)] +/// Returns the duration since the unix epoch. +fn timestamp_now() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_else(|_| Duration::from_secs(0)) + .as_secs() +} + #[derive(Debug)] pub enum BlobTxConversionError { /// The transaction type was not set. @@ -2356,12 +2382,3 @@ mod test { .await; } } - -#[cfg(test)] -/// Returns the duration since the unix epoch. -fn timestamp_now() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or_else(|_| Duration::from_secs(0)) - .as_secs() -} diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index fe5414028..278dacb82 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -11,11 +11,17 @@ use ethereum_consensus::{ }; use fork_choice::ForkchoiceUpdateParameters; use mev_rs::{ - bellatrix::{BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix}, - capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, - sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError, - BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload, - SignedBlindedBeaconBlock, SignedBuilderBid, SignedValidatorRegistration, + blinded_block_provider::Server as BlindedBlockProviderServer, + signing::{sign_builder_message, verify_signed_builder_message}, + types::{ + bellatrix::{ + BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix, + }, + capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, + BidRequest, BuilderBid, ExecutionPayload as ServerPayload, SignedBlindedBeaconBlock, + SignedBuilderBid, SignedValidatorRegistration, + }, + Error as MevError, }; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; @@ -47,7 +53,7 @@ pub enum Operation { } impl Operation { - fn apply(self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { + fn apply(self, bid: &mut B) -> Result<(), MevError> { match self { Operation::FeeRecipient(fee_recipient) => { *bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)? @@ -73,7 +79,7 @@ pub trait BidStuff { fn prev_randao_mut(&mut self) -> &mut Hash32; fn block_number_mut(&mut self) -> &mut u64; fn timestamp_mut(&mut self) -> &mut u64; - fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError>; + fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError>; fn sign_builder_message( &mut self, @@ -134,11 +140,9 @@ impl BidStuff for BuilderBid { } } - fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError> { + fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError> { match self { - Self::Bellatrix(_) => Err(BlindedBlockProviderError::Custom( - "withdrawals_root called on bellatrix bid".to_string(), - )), + Self::Bellatrix(_) => Err(MevError::InvalidFork), Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root), } } @@ -274,7 +278,7 @@ impl MockBuilder { *self.invalidate_signatures.write() = false; } - fn apply_operations(&self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { + fn apply_operations(&self, bid: &mut B) -> Result<(), MevError> { let mut guard = self.operations.write(); while let Some(op) = guard.pop() { op.apply(bid)?; @@ -288,7 +292,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { async fn register_validators( &self, registrations: &mut [SignedValidatorRegistration], - ) -> Result<(), BlindedBlockProviderError> { + ) -> Result<(), MevError> { for registration in registrations { let pubkey = registration.message.public_key.clone(); let message = &mut registration.message; @@ -307,10 +311,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { Ok(()) } - async fn fetch_best_bid( - &self, - bid_request: &BidRequest, - ) -> Result { + async fn fetch_best_bid(&self, bid_request: &BidRequest) -> Result { let slot = Slot::new(bid_request.slot); let fork = self.spec.fork_name_at_slot::(slot); let signed_cached_data = self @@ -336,7 +337,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { .map_err(convert_err)? .block_hash(); if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { - return Err(BlindedBlockProviderError::Custom(format!( + return Err(custom_err(format!( "head mismatch: {} {}", head_execution_hash, bid_request.parent_hash ))); @@ -396,7 +397,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { .get_debug_beacon_states(StateId::Head) .await .map_err(convert_err)? - .ok_or_else(|| BlindedBlockProviderError::Custom("missing head state".to_string()))? + .ok_or_else(|| custom_err("missing head state".to_string()))? .data; let prev_randao = head_state .get_randao_mix(head_state.current_epoch()) @@ -409,10 +410,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![])) } ForkName::Base | ForkName::Altair => { - return Err(BlindedBlockProviderError::Custom(format!( - "Unsupported fork: {}", - fork - ))); + return Err(MevError::InvalidFork); } }; @@ -452,12 +450,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, public_key: self.builder_sk.public_key(), }), - ForkName::Base | ForkName::Altair | ForkName::Deneb => { - return Err(BlindedBlockProviderError::Custom(format!( - "Unsupported fork: {}", - fork - ))) - } + ForkName::Base | ForkName::Altair | ForkName::Deneb => return Err(MevError::InvalidFork), }; *message.gas_limit_mut() = cached_data.gas_limit; @@ -475,7 +468,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { async fn open_bid( &self, signed_block: &mut SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let node = match signed_block { SignedBlindedBeaconBlock::Bellatrix(block) => { block.message.body.execution_payload_header.hash_tree_root() @@ -496,9 +489,7 @@ impl mev_rs::BlindedBlockProvider for MockBuilder { } } -pub fn from_ssz_rs( - ssz_rs_data: &T, -) -> Result { +pub fn from_ssz_rs(ssz_rs_data: &T) -> Result { U::from_ssz_bytes( ssz_rs::serialize(ssz_rs_data) .map_err(convert_err)? @@ -507,12 +498,17 @@ pub fn from_ssz_rs( .map_err(convert_err) } -pub fn to_ssz_rs( - ssz_data: &T, -) -> Result { +pub fn to_ssz_rs(ssz_data: &T) -> Result { ssz_rs::deserialize::(&ssz_data.as_ssz_bytes()).map_err(convert_err) } -fn convert_err(e: E) -> BlindedBlockProviderError { - BlindedBlockProviderError::Custom(format!("{e:?}")) +fn convert_err(e: E) -> MevError { + custom_err(format!("{e:?}")) +} + +// This is a bit of a hack since the `Custom` variant was removed from `mev_rs::Error`. +fn custom_err(s: String) -> MevError { + MevError::Consensus(ethereum_consensus::state_transition::Error::Io( + std::io::Error::new(std::io::ErrorKind::Other, s), + )) } diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index 1b924f382..e099e130a 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -75,7 +75,7 @@ impl PackingEfficiencyHandler { available_attestations: HashSet::new(), included_attestations: HashMap::new(), committee_store: CommitteeStore::new(), - _phantom: PhantomData::default(), + _phantom: PhantomData, }; handler.compute_epoch(start_epoch, &starting_state, spec)?; diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 9e4aadef1..5e8605377 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -70,15 +70,32 @@ impl StateId { .map_err(BeaconChainError::DBError) .map_err(warp_utils::reject::beacon_chain_error)? { - let execution_optimistic = chain - .canonical_head - .fork_choice_read_lock() - .is_optimistic_or_invalid_block_no_fallback(&hot_summary.latest_block_root) - .map_err(BeaconChainError::ForkChoiceError) - .map_err(warp_utils::reject::beacon_chain_error)?; - let finalized = chain - .is_finalized_state(root, hot_summary.slot) + let finalization_status = chain + .state_finalization_and_canonicity(root, hot_summary.slot) .map_err(warp_utils::reject::beacon_chain_error)?; + let finalized = finalization_status.is_finalized(); + let fork_choice = chain.canonical_head.fork_choice_read_lock(); + let execution_optimistic = if finalization_status.slot_is_finalized + && !finalization_status.canonical + { + // This block is permanently orphaned and has likely been pruned from fork + // choice. If it isn't found in fork choice, mark it optimistic to be on the + // safe side. + fork_choice + .is_optimistic_or_invalid_block_no_fallback( + &hot_summary.latest_block_root, + ) + .unwrap_or(true) + } else { + // This block is either old and finalized, or recent and unfinalized, so + // it's safe to fallback to the optimistic status of the finalized block. + chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_or_invalid_block(&hot_summary.latest_block_root) + .map_err(BeaconChainError::ForkChoiceError) + .map_err(warp_utils::reject::beacon_chain_error)? + }; return Ok((*root, execution_optimistic, finalized)); } else if let Some(_cold_state_slot) = chain .store diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index d12274308..40f21f727 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -2,8 +2,9 @@ use beacon_chain::{ chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, + ChainConfig, }; -use eth2::types::DepositContractData; +use eth2::types::{DepositContractData, StateId}; use execution_layer::{ForkchoiceState, PayloadAttributes}; use http_api::test_utils::InteractiveTester; use parking_lot::Mutex; @@ -17,7 +18,7 @@ use std::time::Duration; use tree_hash::TreeHash; use types::{ Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, - MainnetEthSpec, ProposerPreparationData, Slot, + MainnetEthSpec, MinimalEthSpec, ProposerPreparationData, Slot, }; type E = MainnetEthSpec; @@ -48,6 +49,76 @@ async fn deposit_contract_custom_network() { assert_eq!(result, expected); } +// Test that state lookups by root function correctly for states that are finalized but still +// present in the hot database, and have had their block pruned from fork choice. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn state_by_root_pruned_from_fork_choice() { + type E = MinimalEthSpec; + + let validator_count = 24; + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + + let tester = InteractiveTester::::new_with_initializer_and_mutator( + Some(spec.clone()), + validator_count, + Some(Box::new(move |builder| { + builder + .deterministic_keypairs(validator_count) + .fresh_ephemeral_store() + .chain_config(ChainConfig { + epochs_per_migration: 1024, + ..ChainConfig::default() + }) + })), + None, + ) + .await; + + let client = &tester.client; + let harness = &tester.harness; + + // Create some chain depth and finalize beyond fork choice's pruning depth. + let num_epochs = 8_u64; + let num_initial = num_epochs * E::slots_per_epoch(); + harness.advance_slot(); + harness + .extend_chain_with_sync( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + SyncCommitteeStrategy::NoValidators, + ) + .await; + + // Should now be finalized. + let finalized_epoch = harness.finalized_checkpoint().epoch; + assert_eq!(finalized_epoch, num_epochs - 2); + + // The split slot should still be at 0. + assert_eq!(harness.chain.store.get_split_slot(), 0); + + // States that are between the split and the finalized slot should be able to be looked up by + // state root. + for slot in 0..finalized_epoch.start_slot(E::slots_per_epoch()).as_u64() { + let state_root = harness + .chain + .state_root_at_slot(Slot::new(slot)) + .unwrap() + .unwrap(); + let response = client + .get_debug_beacon_states::(StateId::Root(state_root)) + .await + .unwrap() + .unwrap(); + + assert!(response.finalized.unwrap()); + assert!(!response.execution_optimistic.unwrap()); + + let mut state = response.data; + assert_eq!(state.update_tree_hash_cache().unwrap(), state_root); + } +} + /// Data structure for tracking fork choice updates received by the mock execution layer. #[derive(Debug, Default)] struct ForkChoiceUpdates { diff --git a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs index 3df7f7c16..5ce0c55ca 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs @@ -1,7 +1,10 @@ //! ENR extension trait to support libp2p integration. use crate::{Enr, Multiaddr, PeerId}; use discv5::enr::{CombinedKey, CombinedPublicKey}; -use libp2p::core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol}; +use libp2p::{ + core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol}, + identity::secp256k1, +}; use tiny_keccak::{Hasher, Keccak}; /// Extend ENR for libp2p types. @@ -36,6 +39,8 @@ pub trait CombinedKeyPublicExt { pub trait CombinedKeyExt { /// Converts a libp2p key into an ENR combined key. fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result; + /// Converts a [`secp256k1::Keypair`] into and Enr [`CombinedKey`]. + fn from_secp256k1(key: &secp256k1::Keypair) -> CombinedKey; } impl EnrExt for Enr { @@ -220,12 +225,7 @@ impl CombinedKeyPublicExt for CombinedPublicKey { impl CombinedKeyExt for CombinedKey { fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result { match key { - Keypair::Secp256k1(key) => { - let secret = - discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes()) - .expect("libp2p key must be valid"); - Ok(CombinedKey::Secp256k1(secret)) - } + Keypair::Secp256k1(key) => Ok(CombinedKey::from_secp256k1(key)), Keypair::Ed25519(key) => { let ed_keypair = discv5::enr::ed25519_dalek::SigningKey::from_bytes( &(key.encode()[..32]) @@ -237,6 +237,11 @@ impl CombinedKeyExt for CombinedKey { Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"), } } + fn from_secp256k1(key: &secp256k1::Keypair) -> Self { + let secret = discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes()) + .expect("libp2p key must be valid"); + CombinedKey::Secp256k1(secret) + } } // helper function to convert a peer_id to a node_id. This is only possible for secp256k1/ed25519 libp2p diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 3ee74ebf0..d4d0baef6 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1101,6 +1101,7 @@ mod tests { use super::*; use crate::rpc::methods::{MetaData, MetaDataV2}; use enr::EnrBuilder; + use libp2p::identity::secp256k1; use slog::{o, Drain}; use types::{BitVector, MinimalEthSpec, SubnetId}; @@ -1119,10 +1120,10 @@ mod tests { } async fn build_discovery() -> Discovery { - let keypair = libp2p::identity::Keypair::generate_secp256k1(); + let keypair = secp256k1::Keypair::generate(); let mut config = NetworkConfig::default(); config.set_listening_addr(crate::ListenAddress::unused_v4_ports()); - let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); + let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default()).unwrap(); let log = build_log(slog::Level::Debug, false); let globals = NetworkGlobals::new( @@ -1138,6 +1139,7 @@ mod tests { false, &log, ); + let keypair = Keypair::Secp256k1(keypair); Discovery::new(&keypair, &config, Arc::new(globals), &log) .await .unwrap() @@ -1184,8 +1186,8 @@ mod tests { fn make_enr(subnet_ids: Vec) -> Enr { let mut builder = EnrBuilder::new("v4"); - let keypair = libp2p::identity::Keypair::generate_secp256k1(); - let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); + let keypair = secp256k1::Keypair::generate(); + let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); // set the "attnets" field on our ENR let mut bitfield = BitVector::::new(); diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index 295616f36..97eaaa005 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -134,9 +134,8 @@ impl NetworkGlobals { log: &slog::Logger, ) -> NetworkGlobals { use crate::CombinedKeyExt; - let keypair = libp2p::identity::Keypair::generate_secp256k1(); - let enr_key: discv5::enr::CombinedKey = - discv5::enr::CombinedKey::from_libp2p(&keypair).unwrap(); + let keypair = libp2p::identity::secp256k1::Keypair::generate(); + let enr_key: discv5::enr::CombinedKey = discv5::enr::CombinedKey::from_secp256k1(&keypair); let enr = discv5::enr::EnrBuilder::new("v4").build(&enr_key).unwrap(); NetworkGlobals::new( enr, diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 7deeebe30..9b4b46307 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -540,6 +540,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { [default: 8192 (mainnet) or 64 (minimal)]") .takes_value(true) ) + .arg( + Arg::with_name("epochs-per-migration") + .long("epochs-per-migration") + .value_name("N") + .help("The number of epochs to wait between running the migration of data from the \ + hot DB to the cold DB. Less frequent runs can be useful for minimizing disk \ + writes") + .default_value("1") + .takes_value(true) + ) .arg( Arg::with_name("block-cache-size") .long("block-cache-size") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 8d2cb3822..5d097870c 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -421,6 +421,12 @@ pub fn get_config( client_config.store.prune_payloads = prune_payloads; } + if let Some(epochs_per_migration) = + clap_utils::parse_optional(cli_args, "epochs-per-migration")? + { + client_config.chain.epochs_per_migration = epochs_per_migration; + } + if let Some(prune_blobs) = clap_utils::parse_optional(cli_args, "prune-blobs")? { client_config.store.prune_blobs = prune_blobs; } diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index 7340206a3..a3ee72540 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -2,7 +2,7 @@ #![cfg_attr( not(test), deny( - clippy::integer_arithmetic, + clippy::arithmetic_side_effects, clippy::disallowed_methods, clippy::indexing_slicing, clippy::unwrap_used, diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 709302eec..22309334f 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -1,4 +1,4 @@ -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] use super::signature_sets::{Error as SignatureSetError, *}; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 011d4d0cc..590514def 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -960,7 +960,7 @@ async fn fork_spanning_exit() { spec.bellatrix_fork_epoch = Some(Epoch::new(4)); spec.shard_committee_period = 0; - let harness = BeaconChainHarness::builder(MainnetEthSpec::default()) + let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) .deterministic_keypairs(VALIDATOR_COUNT) .mock_execution_layer() diff --git a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs index 967f642e8..a895567d1 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs @@ -34,7 +34,7 @@ pub fn process_inactivity_updates( .safe_add_assign(spec.inactivity_score_bias)?; } // Decrease the score of all validators for forgiveness when not during a leak - if !state.is_in_inactivity_leak(previous_epoch, spec) { + if !state.is_in_inactivity_leak(previous_epoch, spec)? { let inactivity_score = state.get_inactivity_score_mut(index)?; inactivity_score .safe_sub_assign(min(spec.inactivity_score_recovery_rate, *inactivity_score))?; diff --git a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs index e2aa67a61..19d57130c 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs @@ -77,7 +77,7 @@ pub fn get_flag_index_deltas( let mut delta = Delta::default(); if unslashed_participating_indices.contains(index)? { - if !state.is_in_inactivity_leak(previous_epoch, spec) { + if !state.is_in_inactivity_leak(previous_epoch, spec)? { let reward_numerator = base_reward .safe_mul(weight)? .safe_mul(unslashed_participating_increments)?; diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index 864844080..b3924cd97 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -138,7 +138,7 @@ impl VerifyOperation for SignedVoluntaryExit { Ok(SigVerifiedOp::new(self, state)) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![self.message.epoch] } @@ -156,7 +156,7 @@ impl VerifyOperation for AttesterSlashing { Ok(SigVerifiedOp::new(self, state)) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![ self.attestation_1.data.target.epoch, @@ -177,7 +177,7 @@ impl VerifyOperation for ProposerSlashing { Ok(SigVerifiedOp::new(self, state)) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { // Only need a single epoch because the slots of the two headers must be equal. smallvec![self @@ -200,7 +200,7 @@ impl VerifyOperation for SignedBlsToExecutionChange { Ok(SigVerifiedOp::new(self, state)) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![] } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 01d2f40e6..4b187a751 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -455,7 +455,7 @@ impl BeaconState { } /// Specialised deserialisation method that uses the `ChainSpec` as context. - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { // Slot is after genesis_time (u64) and genesis_validators_root (Hash256). let slot_start = ::ssz_fixed_len() + ::ssz_fixed_len(); @@ -1761,16 +1761,22 @@ impl BeaconState { previous_epoch: Epoch, val_index: usize, ) -> Result { - self.get_validator(val_index).map(|val| { - val.is_active_at(previous_epoch) - || (val.slashed && previous_epoch + Epoch::new(1) < val.withdrawable_epoch) - }) + let val = self.get_validator(val_index)?; + Ok(val.is_active_at(previous_epoch) + || (val.slashed && previous_epoch.safe_add(Epoch::new(1))? < val.withdrawable_epoch)) } /// Passing `previous_epoch` to this function rather than computing it internally provides /// a tangible speed improvement in state processing. - pub fn is_in_inactivity_leak(&self, previous_epoch: Epoch, spec: &ChainSpec) -> bool { - (previous_epoch - self.finalized_checkpoint().epoch) > spec.min_epochs_to_inactivity_penalty + pub fn is_in_inactivity_leak( + &self, + previous_epoch: Epoch, + spec: &ChainSpec, + ) -> Result { + Ok( + (previous_epoch.safe_sub(self.finalized_checkpoint().epoch)?) + > spec.min_epochs_to_inactivity_penalty, + ) } /// Get the `SyncCommittee` associated with the next slot. Useful because sync committees diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 8afef1183..2db8fbe76 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -1,4 +1,4 @@ -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] use super::BeaconState; use crate::*; diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index d1d63e3c8..69cd6fbb8 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -1,4 +1,4 @@ -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] #![allow(clippy::disallowed_methods)] #![allow(clippy::indexing_slicing)] diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 2d5b51e5f..f362fdfb5 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -468,7 +468,7 @@ impl ChainSpec { Hash256::from(domain) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] pub const fn attestation_subnet_prefix_bits(&self) -> u32 { let attestation_subnet_count_bits = self.attestation_subnet_count.ilog2(); self.attestation_subnet_extra_bits as u32 + attestation_subnet_count_bits diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 8aa01830b..157f6c7a8 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -115,7 +115,7 @@ impl ExecutionPayload { } } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] /// Returns the maximum size of an execution payload. pub fn max_execution_payload_merge_size() -> usize { // Fixed part @@ -126,7 +126,7 @@ impl ExecutionPayload { + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] /// Returns the maximum size of an execution payload. pub fn max_execution_payload_capella_size() -> usize { // Fixed part diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index fe07a9c0f..58b7aaeea 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -3,7 +3,7 @@ #![cfg_attr( not(test), deny( - clippy::integer_arithmetic, + clippy::arithmetic_side_effects, clippy::disallowed_methods, clippy::indexing_slicing ) diff --git a/consensus/types/src/participation_list.rs b/consensus/types/src/participation_list.rs index 89a56cb87..be119fbef 100644 --- a/consensus/types/src/participation_list.rs +++ b/consensus/types/src/participation_list.rs @@ -1,4 +1,4 @@ -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] use crate::{Hash256, ParticipationFlags, Unsigned, VariableList}; use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, Error, TreeHashCache}; diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 6793fe557..eb25b57b0 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -72,7 +72,7 @@ impl SubnetId { .into()) } - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] /// Computes the set of subnets the node should be subscribed to during the current epoch, /// along with the first epoch in which these subscriptions are no longer valid. pub fn compute_subnets_for_epoch( diff --git a/consensus/types/src/test_utils/mod.rs b/consensus/types/src/test_utils/mod.rs index c0333bcfd..d172342ee 100644 --- a/consensus/types/src/test_utils/mod.rs +++ b/consensus/types/src/test_utils/mod.rs @@ -1,4 +1,4 @@ -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] use std::fmt::Debug; diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index a9e79d14f..f31df2ce1 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -30,7 +30,7 @@ pub trait TestRandom { impl TestRandom for PhantomData { fn random_for_test(_rng: &mut impl RngCore) -> Self { - PhantomData::default() + PhantomData } } diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 8662a8047..0584cd654 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -1,6 +1,7 @@ use clap::ArgMatches; use lighthouse_network::{ - discovery::{build_enr, CombinedKey, CombinedKeyExt, Keypair, ENR_FILENAME}, + discovery::{build_enr, CombinedKey, CombinedKeyExt, ENR_FILENAME}, + libp2p::identity::secp256k1, NetworkConfig, NETWORK_KEY_FILENAME, }; use std::fs::File; @@ -29,8 +30,8 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { config.enr_udp4_port = Some(udp_port); config.enr_tcp6_port = Some(tcp_port); - let local_keypair = Keypair::generate_secp256k1(); - let enr_key = CombinedKey::from_libp2p(&local_keypair)?; + let secp256k1_keypair = secp256k1::Keypair::generate(); + let enr_key = CombinedKey::from_secp256k1(&secp256k1_keypair); let enr_fork_id = EnrForkId { fork_digest: ChainSpec::compute_fork_digest(genesis_fork_version, Hash256::zero()), next_fork_version: genesis_fork_version, @@ -47,13 +48,10 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { .write_all(enr.to_base64().as_bytes()) .map_err(|e| format!("Unable to write ENR to {}: {:?}", ENR_FILENAME, e))?; - let secret_bytes = match local_keypair { - Keypair::Secp256k1(key) => key.secret().to_bytes(), - _ => return Err("Key is not a secp256k1 key".into()), - }; - let mut key_file = File::create(output_dir.join(NETWORK_KEY_FILENAME)) .map_err(|e| format!("Unable to create {}: {:?}", NETWORK_KEY_FILENAME, e))?; + + let secret_bytes = secp256k1_keypair.secret().to_bytes(); key_file .write_all(&secret_bytes) .map_err(|e| format!("Unable to write key to {}: {:?}", NETWORK_KEY_FILENAME, e))?; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index e18bebb4a..31231e4fb 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -40,7 +40,6 @@ fn main() { .long("spec") .value_name("STRING") .takes_value(true) - .required(true) .possible_values(&["minimal", "mainnet", "gnosis"]) .default_value("mainnet") .global(true), @@ -372,7 +371,6 @@ fn main() { .index(2) .value_name("BIP39_MNENMONIC") .takes_value(true) - .required(true) .default_value( "replace nephew blur decorate waste convince soup column \ orient excite play baby", @@ -393,7 +391,6 @@ fn main() { .help("The block hash used when generating an execution payload. This \ value is used for `execution_payload_header.block_hash` as well as \ `execution_payload_header.random`") - .required(true) .default_value( "0x0000000000000000000000000000000000000000000000000000000000000000", ), @@ -411,7 +408,6 @@ fn main() { .value_name("INTEGER") .takes_value(true) .help("The base fee per gas field in the execution payload generated.") - .required(true) .default_value("1000000000"), ) .arg( @@ -420,7 +416,6 @@ fn main() { .value_name("INTEGER") .takes_value(true) .help("The gas limit field in the execution payload generated.") - .required(true) .default_value("30000000"), ) .arg( diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 50b93d806..568125525 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1772,6 +1772,24 @@ fn no_reconstruct_historic_states_flag() { .run_with_zero_port() .with_config(|config| assert!(!config.chain.reconstruct_historic_states)); } +#[test] +fn epochs_per_migration_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.epochs_per_migration, + beacon_node::beacon_chain::migrate::DEFAULT_EPOCHS_PER_MIGRATION + ) + }); +} +#[test] +fn epochs_per_migration_override() { + CommandLineTest::new() + .flag("epochs-per-migration", Some("128")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.epochs_per_migration, 128)); +} // Tests for Slasher flags. // Using `--slasher-max-db-size` to work around https://github.com/sigp/lighthouse/issues/2342 diff --git a/scripts/local_testnet/clean.sh b/scripts/local_testnet/clean.sh index 6db8753d0..cd915e470 100755 --- a/scripts/local_testnet/clean.sh +++ b/scripts/local_testnet/clean.sh @@ -9,5 +9,5 @@ set -Eeuo pipefail source ./vars.env if [ -d $DATADIR ]; then - rm -r $DATADIR + rm -rf $DATADIR fi diff --git a/slasher/tests/backend.rs b/slasher/tests/backend.rs index 9e68107de..fd1a6ae14 100644 --- a/slasher/tests/backend.rs +++ b/slasher/tests/backend.rs @@ -1,4 +1,4 @@ -#![cfg(all(feature = "lmdb"))] +#![cfg(feature = "lmdb")] use slasher::{config::MDBX_DATA_FILENAME, Config, DatabaseBackend, DatabaseBackendOverride}; use std::fs::File; diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 27f6fd805..f568c87cf 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -13,7 +13,7 @@ BLS_TARBALL = $(patsubst %,%-$(BLS_TEST_TAG).tar.gz,$(BLS_TEST)) BLS_OUTPUT_DIR := $(OUTPUT_DIR)/$(BLS_TEST_REPO_NAME) BLS_BASE_URL := https://github.com/ethereum/$(BLS_TEST_REPO_NAME)/releases/download/$(BLS_TEST_TAG) -WGET := $(if $(LIGHTHOUSE_GITHUB_TOKEN),wget --header="Authorization: $(LIGHTHOUSE_GITHUB_TOKEN)",wget) +CURL := $(if $(LIGHTHOUSE_GITHUB_TOKEN),curl -L --header "Authorization: $(LIGHTHOUSE_GITHUB_TOKEN)",curl -L) all: make $(OUTPUT_DIR) @@ -27,11 +27,11 @@ $(OUTPUT_DIR): $(TARBALLS) $(BLS_OUTPUT_DIR): mkdir $(BLS_OUTPUT_DIR) - $(WGET) $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -O $(BLS_TARBALL) + $(CURL) $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -o $(BLS_TARBALL) tar -xzf $(BLS_TARBALL) -C $(BLS_OUTPUT_DIR) %-$(TESTS_TAG).tar.gz: - $(WGET) $(BASE_URL)/$*.tar.gz -O $@ + $(CURL) $(BASE_URL)/$*.tar.gz -o $@ clean-test-files: rm -rf $(OUTPUT_DIR) $(BLS_OUTPUT_DIR) diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index ea5d005c1..ac77349c5 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -14,3 +14,4 @@ validator_client = { path = "../../validator_client" } validator_dir = { path = "../../common/validator_dir", features = ["insecure_keys"] } sensitive_url = { path = "../../common/sensitive_url" } execution_layer = { path = "../../beacon_node/execution_layer" } +tokio = { version = "1.14.0", features = ["time"] } diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 1b822a322..1e3f14a5a 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -10,6 +10,7 @@ use std::path::PathBuf; use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; use tempfile::{Builder as TempBuilder, TempDir}; +use tokio::time::timeout; use types::EthSpec; use validator_client::ProductionValidatorClient; use validator_dir::insecure_keys::build_deterministic_validator_dirs; @@ -24,6 +25,8 @@ pub use validator_client::Config as ValidatorConfig; /// The global timeout for HTTP requests to the beacon node. const HTTP_TIMEOUT: Duration = Duration::from_secs(4); +/// The timeout for a beacon node to start up. +const STARTUP_TIMEOUT: Duration = Duration::from_secs(60); /// Provides a beacon node that is running in the current process on a given tokio executor (it /// is _local_ to this process). @@ -51,12 +54,16 @@ impl LocalBeaconNode { client_config.set_data_dir(datadir.path().into()); client_config.network.network_dir = PathBuf::from(datadir.path()).join("network"); - ProductionBeaconNode::new(context, client_config) - .await - .map(move |client| Self { - client: client.into_inner(), - datadir, - }) + timeout( + STARTUP_TIMEOUT, + ProductionBeaconNode::new(context, client_config), + ) + .await + .map_err(|_| format!("Beacon node startup timed out after {:?}", STARTUP_TIMEOUT))? + .map(move |client| Self { + client: client.into_inner(), + datadir, + }) } } diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index 5dc2d5ec8..ff8020105 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -25,8 +25,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("4") .help("Number of beacon nodes")) .arg(Arg::with_name("proposer-nodes") - .short("n") - .long("nodes") + .short("p") + .long("proposer_nodes") .takes_value(true) .default_value("2") .help("Number of proposer-only beacon nodes")) @@ -64,8 +64,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("4") .help("Number of beacon nodes")) .arg(Arg::with_name("proposer-nodes") - .short("n") - .long("nodes") + .short("p") + .long("proposer_nodes") .takes_value(true) .default_value("2") .help("Number of proposer-only beacon nodes")) diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 3e764d27d..57c944cf1 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -1,14 +1,16 @@ use crate::local_network::{EXECUTION_PORT, TERMINAL_BLOCK, TERMINAL_DIFFICULTY}; -use crate::{checks, LocalNetwork, E}; +use crate::{checks, LocalNetwork}; use clap::ArgMatches; use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; use eth1_test_rig::AnvilEth1Instance; +use crate::retry::with_retry; use execution_layer::http::deposit_methods::Eth1Id; use futures::prelude::*; +use node_test_rig::environment::RuntimeContext; use node_test_rig::{ environment::{EnvironmentBuilder, LoggerConfig}, - testing_client_config, testing_validator_config, ClientGenesis, ValidatorFiles, + testing_client_config, testing_validator_config, ClientConfig, ClientGenesis, ValidatorFiles, }; use rayon::prelude::*; use sensitive_url::SensitiveUrl; @@ -107,71 +109,24 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let context = env.core_context(); let main_future = async { - /* - * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit - * validators. - */ - let anvil_eth1_instance = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?; - let deposit_contract = anvil_eth1_instance.deposit_contract; - let chain_id = anvil_eth1_instance.anvil.chain_id(); - let anvil = anvil_eth1_instance.anvil; - let eth1_endpoint = SensitiveUrl::parse(anvil.endpoint().as_str()) - .expect("Unable to parse anvil endpoint."); - let deposit_contract_address = deposit_contract.address(); - - // Start a timer that produces eth1 blocks on an interval. - tokio::spawn(async move { - let mut interval = tokio::time::interval(eth1_block_time); - loop { - interval.tick().await; - let _ = anvil.evm_mine().await; - } - }); - - // Submit deposits to the deposit contract. - tokio::spawn(async move { - for i in 0..total_validator_count { - println!("Submitting deposit for validator {}...", i); - let _ = deposit_contract - .deposit_deterministic_async::(i, deposit_amount) - .await; - } - }); - - let mut beacon_config = testing_client_config(); - - beacon_config.genesis = ClientGenesis::DepositContract; - beacon_config.eth1.endpoint = Eth1Endpoint::NoAuth(eth1_endpoint); - beacon_config.eth1.deposit_contract_address = deposit_contract_address; - beacon_config.eth1.deposit_contract_deploy_block = 0; - beacon_config.eth1.lowest_cached_block_number = 0; - beacon_config.eth1.follow_distance = 1; - beacon_config.eth1.node_far_behind_seconds = 20; - beacon_config.dummy_eth1_backend = false; - beacon_config.sync_eth1_chain = true; - beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64; - beacon_config.eth1.chain_id = Eth1Id::from(chain_id); - beacon_config.network.target_peers = node_count + proposer_nodes - 1; - - beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); - - if post_merge_sim { - let el_config = execution_layer::Config { - execution_endpoints: vec![SensitiveUrl::parse(&format!( - "http://localhost:{}", - EXECUTION_PORT - )) - .unwrap()], - ..Default::default() - }; - - beacon_config.execution_layer = Some(el_config); - } - /* * Create a new `LocalNetwork` with one beacon node. */ - let network = LocalNetwork::new(context.clone(), beacon_config.clone()).await?; + let max_retries = 3; + let (network, beacon_config) = with_retry(max_retries, || { + Box::pin(create_local_network( + LocalNetworkParams { + eth1_block_time, + total_validator_count, + deposit_amount, + node_count, + proposer_nodes, + post_merge_sim, + }, + context.clone(), + )) + }) + .await?; /* * One by one, add beacon nodes to the network. @@ -341,3 +296,88 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { Ok(()) } + +struct LocalNetworkParams { + eth1_block_time: Duration, + total_validator_count: usize, + deposit_amount: u64, + node_count: usize, + proposer_nodes: usize, + post_merge_sim: bool, +} + +async fn create_local_network( + LocalNetworkParams { + eth1_block_time, + total_validator_count, + deposit_amount, + node_count, + proposer_nodes, + post_merge_sim, + }: LocalNetworkParams, + context: RuntimeContext, +) -> Result<(LocalNetwork, ClientConfig), String> { + /* + * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit + * validators. + */ + let anvil_eth1_instance = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?; + let deposit_contract = anvil_eth1_instance.deposit_contract; + let chain_id = anvil_eth1_instance.anvil.chain_id(); + let anvil = anvil_eth1_instance.anvil; + let eth1_endpoint = + SensitiveUrl::parse(anvil.endpoint().as_str()).expect("Unable to parse anvil endpoint."); + let deposit_contract_address = deposit_contract.address(); + + // Start a timer that produces eth1 blocks on an interval. + tokio::spawn(async move { + let mut interval = tokio::time::interval(eth1_block_time); + loop { + interval.tick().await; + let _ = anvil.evm_mine().await; + } + }); + + // Submit deposits to the deposit contract. + tokio::spawn(async move { + for i in 0..total_validator_count { + println!("Submitting deposit for validator {}...", i); + let _ = deposit_contract + .deposit_deterministic_async::(i, deposit_amount) + .await; + } + }); + + let mut beacon_config = testing_client_config(); + + beacon_config.genesis = ClientGenesis::DepositContract; + beacon_config.eth1.endpoint = Eth1Endpoint::NoAuth(eth1_endpoint); + beacon_config.eth1.deposit_contract_address = deposit_contract_address; + beacon_config.eth1.deposit_contract_deploy_block = 0; + beacon_config.eth1.lowest_cached_block_number = 0; + beacon_config.eth1.follow_distance = 1; + beacon_config.eth1.node_far_behind_seconds = 20; + beacon_config.dummy_eth1_backend = false; + beacon_config.sync_eth1_chain = true; + beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64; + beacon_config.eth1.chain_id = Eth1Id::from(chain_id); + beacon_config.network.target_peers = node_count + proposer_nodes - 1; + + beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); + + if post_merge_sim { + let el_config = execution_layer::Config { + execution_endpoints: vec![SensitiveUrl::parse(&format!( + "http://localhost:{}", + EXECUTION_PORT + )) + .unwrap()], + ..Default::default() + }; + + beacon_config.execution_layer = Some(el_config); + } + + let network = LocalNetwork::new(context, beacon_config.clone()).await?; + Ok((network, beacon_config)) +} diff --git a/testing/simulator/src/main.rs b/testing/simulator/src/main.rs index a19777c5a..e8af9c180 100644 --- a/testing/simulator/src/main.rs +++ b/testing/simulator/src/main.rs @@ -21,6 +21,7 @@ mod cli; mod eth1_sim; mod local_network; mod no_eth1_sim; +mod retry; mod sync_sim; use cli::cli_app; diff --git a/testing/simulator/src/retry.rs b/testing/simulator/src/retry.rs new file mode 100644 index 000000000..a4eb52cea --- /dev/null +++ b/testing/simulator/src/retry.rs @@ -0,0 +1,63 @@ +use std::fmt::Debug; +use std::future::Future; +use std::pin::Pin; + +/// Executes the function with a specified number of retries if the function returns an error. +/// Once it exceeds `max_retries` and still fails, the error is returned. +pub async fn with_retry(max_retries: usize, mut func: F) -> Result +where + F: FnMut() -> Pin>>>, + E: Debug, +{ + let mut retry_count = 0; + loop { + let result = Box::pin(func()).await; + if result.is_ok() || retry_count >= max_retries { + break result; + } + retry_count += 1; + + if let Err(e) = result { + eprintln!( + "Operation failed with error {:?}, retrying {} of {}", + e, retry_count, max_retries + ); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::collections::VecDeque; + + async fn my_async_func(is_ok: bool) -> Result<(), ()> { + if is_ok { + Ok(()) + } else { + Err(()) + } + } + + #[tokio::test] + async fn test_with_retry_ok() { + let res = with_retry(3, || Box::pin(my_async_func(true))).await; + assert!(res.is_ok()); + } + + #[tokio::test] + async fn test_with_retry_2nd_ok() { + let mut mock_results = VecDeque::from([false, true]); + let res = with_retry(3, || { + Box::pin(my_async_func(mock_results.pop_front().unwrap())) + }) + .await; + assert!(res.is_ok()); + } + + #[tokio::test] + async fn test_with_retry_fail() { + let res = with_retry(3, || Box::pin(my_async_func(false))).await; + assert!(res.is_err()); + } +} diff --git a/watch/Cargo.toml b/watch/Cargo.toml index d1793a9d0..1a003167d 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -21,7 +21,7 @@ types = { path = "../consensus/types" } eth2 = { path = "../common/eth2" } beacon_node = { path = "../beacon_node"} tokio = { version = "1.14.0", features = ["time"] } -axum = "0.5.15" +axum = "0.6.18" hyper = "0.14.20" serde = "1.0.116" serde_json = "1.0.58" diff --git a/watch/src/server/mod.rs b/watch/src/server/mod.rs index 09d5ec6aa..d8ae0eb6c 100644 --- a/watch/src/server/mod.rs +++ b/watch/src/server/mod.rs @@ -5,7 +5,6 @@ use crate::config::Config as FullConfig; use crate::database::{self, PgPool}; use crate::suboptimal_attestations::{attestation_routes, blockprint_attestation_routes}; use axum::{ - handler::Handler, http::{StatusCode, Uri}, routing::get, Extension, Json, Router, @@ -104,7 +103,7 @@ pub fn start_server( } let app = routes - .fallback(route_not_found.into_service()) + .fallback(route_not_found) .layer(Extension(pool)) .layer(Extension(slots_per_epoch));