From 661779f08e7a2f43fe394af9f27078cdb87a4a1b Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 24 Aug 2023 05:54:36 +0000 Subject: [PATCH 1/3] Implement expected withdrawals endpoint (#4390) ## Issue Addressed [#4029](https://github.com/sigp/lighthouse/issues/4029) ## Proposed Changes implement expected_withdrawals HTTP API per the spec https://github.com/ethereum/beacon-APIs/pull/304 ## Additional Info --- beacon_node/http_api/src/builder_states.rs | 72 +++++++++++++++ beacon_node/http_api/src/lib.rs | 57 ++++++++++++ beacon_node/http_api/tests/tests.rs | 101 +++++++++++++++++++++ common/eth2/src/lib.rs | 17 ++++ common/eth2/src/types.rs | 5 + 5 files changed, 252 insertions(+) create mode 100644 beacon_node/http_api/src/builder_states.rs diff --git a/beacon_node/http_api/src/builder_states.rs b/beacon_node/http_api/src/builder_states.rs new file mode 100644 index 000000000..90203f2d6 --- /dev/null +++ b/beacon_node/http_api/src/builder_states.rs @@ -0,0 +1,72 @@ +use crate::StateId; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use safe_arith::SafeArith; +use state_processing::per_block_processing::get_expected_withdrawals; +use state_processing::state_advance::partial_state_advance; +use std::sync::Arc; +use types::{BeaconState, EthSpec, ForkName, Slot, Withdrawals}; + +const MAX_EPOCH_LOOKAHEAD: u64 = 2; + +/// Get the withdrawals computed from the specified state, that will be included in the block +/// that gets built on the specified state. +pub fn get_next_withdrawals( + chain: &Arc>, + mut state: BeaconState, + state_id: StateId, + proposal_slot: Slot, +) -> Result, warp::Rejection> { + get_next_withdrawals_sanity_checks(chain, &state, proposal_slot)?; + + // advance the state to the epoch of the proposal slot. + let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch()); + let (state_root, _, _) = state_id.root(chain)?; + if proposal_epoch != state.current_epoch() { + if let Err(e) = + partial_state_advance(&mut state, Some(state_root), proposal_slot, &chain.spec) + { + return Err(warp_utils::reject::custom_server_error(format!( + "failed to advance to the epoch of the proposal slot: {:?}", + e + ))); + } + } + + match get_expected_withdrawals(&state, &chain.spec) { + Ok(withdrawals) => Ok(withdrawals), + Err(e) => Err(warp_utils::reject::custom_server_error(format!( + "failed to get expected withdrawal: {:?}", + e + ))), + } +} + +fn get_next_withdrawals_sanity_checks( + chain: &BeaconChain, + state: &BeaconState, + proposal_slot: Slot, +) -> Result<(), warp::Rejection> { + if proposal_slot <= state.slot() { + return Err(warp_utils::reject::custom_bad_request( + "proposal slot must be greater than the pre-state slot".to_string(), + )); + } + + let fork = chain.spec.fork_name_at_slot::(proposal_slot); + if let ForkName::Base | ForkName::Altair | ForkName::Merge = fork { + return Err(warp_utils::reject::custom_bad_request( + "the specified state is a pre-capella state.".to_string(), + )); + } + + let look_ahead_limit = MAX_EPOCH_LOOKAHEAD + .safe_mul(T::EthSpec::slots_per_epoch()) + .map_err(warp_utils::reject::arith_error)?; + if proposal_slot >= state.slot() + look_ahead_limit { + return Err(warp_utils::reject::custom_bad_request(format!( + "proposal slot is greater than or equal to the look ahead limit: {look_ahead_limit}" + ))); + } + + Ok(()) +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 8e316834d..3aa10139b 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -10,6 +10,7 @@ mod attester_duties; mod block_id; mod block_packing_efficiency; mod block_rewards; +mod builder_states; mod database; mod metrics; mod proposer_duties; @@ -32,6 +33,7 @@ use beacon_chain::{ }; use beacon_processor::BeaconProcessorSend; pub use block_id::BlockId; +use builder_states::get_next_withdrawals; use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ @@ -2291,6 +2293,60 @@ pub fn serve( }, ); + /* + * builder/states + */ + + let builder_states_path = eth_v1 + .and(warp::path("builder")) + .and(warp::path("states")) + .and(chain_filter.clone()); + + // GET builder/states/{state_id}/expected_withdrawals + let get_expected_withdrawals = builder_states_path + .clone() + .and(task_spawner_filter.clone()) + .and(warp::path::param::()) + .and(warp::path("expected_withdrawals")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .then( + |chain: Arc>, + task_spawner: TaskSpawner, + state_id: StateId, + query: api_types::ExpectedWithdrawalsQuery, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (state, execution_optimistic, finalized) = state_id.state(&chain)?; + let proposal_slot = query.proposal_slot.unwrap_or(state.slot() + 1); + let withdrawals = + get_next_withdrawals::(&chain, state, state_id, proposal_slot)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .header("Content-Type", "application/octet-stream") + .body(withdrawals.as_ssz_bytes().into()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => Ok(warp::reply::json( + &api_types::ExecutionOptimisticFinalizedResponse { + data: withdrawals, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }, + ) + .into_response()), + } + }) + }, + ); + /* * beacon/rewards */ @@ -4503,6 +4559,7 @@ pub fn serve( .uor(get_lighthouse_block_packing_efficiency) .uor(get_lighthouse_merge_readiness) .uor(get_events) + .uor(get_expected_withdrawals) .uor(lighthouse_log_events.boxed()) .recover(warp_utils::reject::handle_rejection), ) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 3c72441c0..46cc55591 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -28,6 +28,7 @@ use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_slot_processing; +use state_processing::state_advance::partial_state_advance; use std::convert::TryInto; use std::sync::Arc; use tokio::time::Duration; @@ -4341,6 +4342,72 @@ impl ApiTester { self } + pub async fn test_get_expected_withdrawals_invalid_state(self) -> Self { + let state_id = CoreStateId::Root(Hash256::zero()); + + let result = self.client.get_expected_withdrawals(&state_id).await; + + match result { + Err(e) => { + assert_eq!(e.status().unwrap(), 404); + } + _ => panic!("query did not fail correctly"), + } + + self + } + + pub async fn test_get_expected_withdrawals_capella(self) -> Self { + let slot = self.chain.slot().unwrap(); + let state_id = CoreStateId::Slot(slot); + + // calculate the expected withdrawals + let (mut state, _, _) = StateId(state_id).state(&self.chain).unwrap(); + let proposal_slot = state.slot() + 1; + let proposal_epoch = proposal_slot.epoch(E::slots_per_epoch()); + let (state_root, _, _) = StateId(state_id).root(&self.chain).unwrap(); + if proposal_epoch != state.current_epoch() { + let _ = partial_state_advance( + &mut state, + Some(state_root), + proposal_slot, + &self.chain.spec, + ); + } + let expected_withdrawals = get_expected_withdrawals(&state, &self.chain.spec).unwrap(); + + // fetch expected withdrawals from the client + let result = self.client.get_expected_withdrawals(&state_id).await; + match result { + Ok(withdrawal_response) => { + assert_eq!(withdrawal_response.execution_optimistic, Some(false)); + assert_eq!(withdrawal_response.finalized, Some(false)); + assert_eq!(withdrawal_response.data, expected_withdrawals.to_vec()); + } + Err(e) => { + println!("{:?}", e); + panic!("query failed incorrectly"); + } + } + + self + } + + pub async fn test_get_expected_withdrawals_pre_capella(self) -> Self { + let state_id = CoreStateId::Head; + + let result = self.client.get_expected_withdrawals(&state_id).await; + + match result { + Err(e) => { + assert_eq!(e.status().unwrap(), 400); + } + _ => panic!("query did not fail correctly"), + } + + self + } + pub async fn test_get_events_altair(self) -> Self { let topics = vec![EventTopic::ContributionAndProof]; let mut events_future = self @@ -5123,3 +5190,37 @@ async fn optimistic_responses() { .test_check_optimistic_responses() .await; } + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn expected_withdrawals_invalid_pre_capella() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_get_expected_withdrawals_pre_capella() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn expected_withdrawals_invalid_state() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_get_expected_withdrawals_invalid_state() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn expected_withdrawals_valid_capella() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_get_expected_withdrawals_capella() + .await; +} diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 146a832e3..74c2f3802 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1261,6 +1261,23 @@ impl BeaconNodeHttpClient { Ok(()) } + // GET builder/states/{state_id}/expected_withdrawals + pub async fn get_expected_withdrawals( + &self, + state_id: &StateId, + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("builder") + .push("states") + .push(&state_id.to_string()) + .push("expected_withdrawals"); + + self.get(path).await + } + /// `POST validator/contribution_and_proofs` pub async fn post_validator_contribution_and_proofs( &self, diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index f451d3b8f..28fd09c09 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -581,6 +581,11 @@ pub struct SyncingData { pub sync_distance: Slot, } +#[derive(Serialize, Deserialize)] +pub struct ExpectedWithdrawalsQuery { + pub proposal_slot: Option, +} + #[derive(Clone, PartialEq, Debug, Deserialize)] #[serde(try_from = "String", bound = "T: FromStr")] pub struct QueryVec { From ea43b6a53c0e24a4a1111bcbb1b27fd4b33b4322 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Thu, 24 Aug 2023 05:54:37 +0000 Subject: [PATCH 2/3] Revive mplex (#4619) ## Issue Addressed N/A ## Proposed Changes In #4431 , we seem to have removed support for mplex as it is being deprecated in libp2p. See https://github.com/libp2p/specs/issues/553 . Related rust-libp2p PR https://github.com/libp2p/rust-libp2p/pull/3920 However, since this isn't part of the official [consensus specs](https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#multiplexing), we still need to support mplex. > Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). This PR adds back mplex support as before. --- Cargo.lock | 20 +++++++++++++++++++ beacon_node/lighthouse_network/Cargo.toml | 1 + .../lighthouse_network/src/service/utils.rs | 10 +++++++++- 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 9a8cf4833..b1779045e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4117,6 +4117,25 @@ dependencies = [ "prometheus-client", ] +[[package]] +name = "libp2p-mplex" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93959ed08b6caf9810e067655e25f1362098797fef7c44d3103e63dcb6f0fabe" +dependencies = [ + "asynchronous-codec", + "bytes", + "futures", + "libp2p-core", + "libp2p-identity", + "log", + "nohash-hasher", + "parking_lot 0.12.1", + "rand 0.8.5", + "smallvec 1.11.0", + "unsigned-varint 0.7.1", +] + [[package]] name = "libp2p-noise" version = "0.43.0" @@ -4382,6 +4401,7 @@ dependencies = [ "hex", "lazy_static", "libp2p", + "libp2p-mplex", "lighthouse_metrics", "lighthouse_version", "lru 0.7.8", diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index f71845fed..925d278ad 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -44,6 +44,7 @@ prometheus-client = "0.21.0" unused_port = { path = "../../common/unused_port" } delay_map = "0.3.0" void = "1" +libp2p-mplex = "0.40.0" [dependencies.libp2p] version = "0.52" diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 21fd09b6b..b8acc4ed6 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -50,13 +50,21 @@ pub fn build_transport( transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone)) }; + // mplex config + let mut mplex_config = libp2p_mplex::MplexConfig::new(); + mplex_config.set_max_buffer_size(256); + mplex_config.set_max_buffer_behaviour(libp2p_mplex::MaxBufferBehaviour::Block); + // yamux config let mut yamux_config = yamux::Config::default(); yamux_config.set_window_update_mode(yamux::WindowUpdateMode::on_read()); let (transport, bandwidth) = transport .upgrade(core::upgrade::Version::V1) .authenticate(generate_noise_config(&local_private_key)) - .multiplex(yamux_config) + .multiplex(core::upgrade::SelectUpgrade::new( + yamux_config, + mplex_config, + )) .timeout(Duration::from_secs(10)) .boxed() .with_bandwidth_logging(); From fa6003bb5ccf1949297dea66f79849dbf073a918 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 24 Aug 2023 05:54:38 +0000 Subject: [PATCH 3/3] Use lockfile with cross and fix audit fail (#4656) ## Issue Addressed Temporary ignore for #4651. We are unaffected, and upstream will be patched in a few days. ## Proposed Changes - Ignore cargo audit failures (ublocks CI) - Use `--locked` when building with `cross`. We use `--locked` for regular builds, and I think excluding it from `cross` was just an oversight. I think for consistent builds it makes sense to use `--locked` while building. This is particularly relevant for release binaries, which otherwise will just use a random selection of dependencies that exist on build day (near impossible to recreate if we had to). --- Makefile | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index cb447e26a..bc84cdede 100644 --- a/Makefile +++ b/Makefile @@ -71,13 +71,13 @@ install-lcli: # optimized CPU functions that may not be available on some systems. This # results in a more portable binary with ~20% slower BLS verification. build-x86_64: - cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" + cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-x86_64-portable: - cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" + cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-aarch64: - cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" + cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-aarch64-portable: - cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" + cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary @@ -206,8 +206,9 @@ arbitrary-fuzz: # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: - cargo install --force cargo-audit - cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2022-0093 + # cargo install --force cargo-audit + cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2022-0093 \ + --ignore RUSTSEC-2023-0052 --ignore RUSTSEC-2023-0053 # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: