Merge pull request #4658 from realbigsean/merge-unstable-deneb-aug-24
Merge unstable deneb aug 24
This commit is contained in:
commit
ce824e00a3
13
Makefile
13
Makefile
@ -71,13 +71,13 @@ install-lcli:
|
|||||||
# optimized CPU functions that may not be available on some systems. This
|
# optimized CPU functions that may not be available on some systems. This
|
||||||
# results in a more portable binary with ~20% slower BLS verification.
|
# results in a more portable binary with ~20% slower BLS verification.
|
||||||
build-x86_64:
|
build-x86_64:
|
||||||
cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)"
|
cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked
|
||||||
build-x86_64-portable:
|
build-x86_64-portable:
|
||||||
cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)"
|
cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked
|
||||||
build-aarch64:
|
build-aarch64:
|
||||||
cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)"
|
cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked
|
||||||
build-aarch64-portable:
|
build-aarch64-portable:
|
||||||
cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)"
|
cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked
|
||||||
|
|
||||||
# Create a `.tar.gz` containing a binary for a specific target.
|
# Create a `.tar.gz` containing a binary for a specific target.
|
||||||
define tarball_release_binary
|
define tarball_release_binary
|
||||||
@ -214,8 +214,9 @@ arbitrary-fuzz:
|
|||||||
|
|
||||||
# Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database)
|
# Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database)
|
||||||
audit:
|
audit:
|
||||||
cargo install --force cargo-audit
|
# cargo install --force cargo-audit
|
||||||
cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2022-0093
|
cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2022-0093 \
|
||||||
|
--ignore RUSTSEC-2023-0052 --ignore RUSTSEC-2023-0053
|
||||||
|
|
||||||
# Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose.
|
# Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose.
|
||||||
vendor:
|
vendor:
|
||||||
|
@ -241,7 +241,8 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
|
|||||||
let Some(parent_block) = chain
|
let Some(parent_block) = chain
|
||||||
.canonical_head
|
.canonical_head
|
||||||
.fork_choice_read_lock()
|
.fork_choice_read_lock()
|
||||||
.get_block(&block_parent_root) else {
|
.get_block(&block_parent_root)
|
||||||
|
else {
|
||||||
return Err(GossipBlobError::BlobParentUnknown(
|
return Err(GossipBlobError::BlobParentUnknown(
|
||||||
signed_blob_sidecar.message,
|
signed_blob_sidecar.message,
|
||||||
));
|
));
|
||||||
|
@ -349,11 +349,8 @@ pub fn consistency_checks<T: EthSpec>(
|
|||||||
block: &SignedBeaconBlock<T>,
|
block: &SignedBeaconBlock<T>,
|
||||||
blobs: &[Arc<BlobSidecar<T>>],
|
blobs: &[Arc<BlobSidecar<T>>],
|
||||||
) -> Result<(), AvailabilityCheckError> {
|
) -> Result<(), AvailabilityCheckError> {
|
||||||
let Ok(block_kzg_commitments) = block
|
let Ok(block_kzg_commitments) = block.message().body().blob_kzg_commitments() else {
|
||||||
.message()
|
return Ok(());
|
||||||
.body()
|
|
||||||
.blob_kzg_commitments() else {
|
|
||||||
return Ok(())
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if blobs.len() != block_kzg_commitments.len() {
|
if blobs.len() != block_kzg_commitments.len() {
|
||||||
|
@ -586,8 +586,9 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
|
|||||||
let Some(verified_blobs) = Vec::from(pending_components.verified_blobs)
|
let Some(verified_blobs) = Vec::from(pending_components.verified_blobs)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.take(num_blobs_expected)
|
.take(num_blobs_expected)
|
||||||
.collect::<Option<Vec<_>>>() else {
|
.collect::<Option<Vec<_>>>()
|
||||||
return Ok(Availability::MissingComponents(import_data.block_root))
|
else {
|
||||||
|
return Ok(Availability::MissingComponents(import_data.block_root));
|
||||||
};
|
};
|
||||||
|
|
||||||
let available_block = make_available(block, verified_blobs)?;
|
let available_block = make_available(block, verified_blobs)?;
|
||||||
|
@ -841,7 +841,7 @@ mod tests {
|
|||||||
let mut store = $type::default();
|
let mut store = $type::default();
|
||||||
let max_cap = store.max_capacity();
|
let max_cap = store.max_capacity();
|
||||||
|
|
||||||
let to_skip = vec![1_u64, 3, 4, 5];
|
let to_skip = [1_u64, 3, 4, 5];
|
||||||
let periods = (0..max_cap * 3)
|
let periods = (0..max_cap * 3)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|i| !to_skip.contains(i))
|
.filter(|i| !to_skip.contains(i))
|
||||||
@ -1012,7 +1012,7 @@ mod tests {
|
|||||||
let mut store = $type::default();
|
let mut store = $type::default();
|
||||||
let max_cap = store.max_capacity();
|
let max_cap = store.max_capacity();
|
||||||
|
|
||||||
let to_skip = vec![1_u64, 3, 4, 5];
|
let to_skip = [1_u64, 3, 4, 5];
|
||||||
let periods = (0..max_cap * 3)
|
let periods = (0..max_cap * 3)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|i| !to_skip.contains(i))
|
.filter(|i| !to_skip.contains(i))
|
||||||
@ -1121,7 +1121,7 @@ mod tests {
|
|||||||
let mut store = $type::default();
|
let mut store = $type::default();
|
||||||
let max_cap = store.max_capacity();
|
let max_cap = store.max_capacity();
|
||||||
|
|
||||||
let to_skip = vec![1_u64, 3, 4, 5];
|
let to_skip = [1_u64, 3, 4, 5];
|
||||||
let periods = (0..max_cap * 3)
|
let periods = (0..max_cap * 3)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|i| !to_skip.contains(i))
|
.filter(|i| !to_skip.contains(i))
|
||||||
|
@ -136,7 +136,9 @@ async fn produces_attestations() {
|
|||||||
let rpc_block =
|
let rpc_block =
|
||||||
RpcBlock::<MainnetEthSpec>::new(Arc::new(block.clone()), Some(blobs.clone()))
|
RpcBlock::<MainnetEthSpec>::new(Arc::new(block.clone()), Some(blobs.clone()))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(available_block) = chain
|
let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(
|
||||||
|
available_block,
|
||||||
|
) = chain
|
||||||
.data_availability_checker
|
.data_availability_checker
|
||||||
.check_rpc_block_availability(rpc_block)
|
.check_rpc_block_availability(rpc_block)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@ -212,7 +214,9 @@ async fn early_attester_cache_old_request() {
|
|||||||
|
|
||||||
let rpc_block =
|
let rpc_block =
|
||||||
RpcBlock::<MainnetEthSpec>::new(head.beacon_block.clone(), Some(head_blobs)).unwrap();
|
RpcBlock::<MainnetEthSpec>::new(head.beacon_block.clone(), Some(head_blobs)).unwrap();
|
||||||
let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(available_block) = harness.chain
|
let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(available_block) =
|
||||||
|
harness
|
||||||
|
.chain
|
||||||
.data_availability_checker
|
.data_availability_checker
|
||||||
.check_rpc_block_availability(rpc_block)
|
.check_rpc_block_availability(rpc_block)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -39,7 +39,7 @@ pub fn genesis_deposits(
|
|||||||
|
|
||||||
Ok(deposit_data
|
Ok(deposit_data
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.zip(proofs.into_iter())
|
.zip(proofs)
|
||||||
.map(|(data, proof)| (data, proof.into()))
|
.map(|(data, proof)| (data, proof.into()))
|
||||||
.map(|(data, proof)| Deposit { proof, data })
|
.map(|(data, proof)| Deposit { proof, data })
|
||||||
.collect())
|
.collect())
|
||||||
|
72
beacon_node/http_api/src/builder_states.rs
Normal file
72
beacon_node/http_api/src/builder_states.rs
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
use crate::StateId;
|
||||||
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
|
use safe_arith::SafeArith;
|
||||||
|
use state_processing::per_block_processing::get_expected_withdrawals;
|
||||||
|
use state_processing::state_advance::partial_state_advance;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use types::{BeaconState, EthSpec, ForkName, Slot, Withdrawals};
|
||||||
|
|
||||||
|
const MAX_EPOCH_LOOKAHEAD: u64 = 2;
|
||||||
|
|
||||||
|
/// Get the withdrawals computed from the specified state, that will be included in the block
|
||||||
|
/// that gets built on the specified state.
|
||||||
|
pub fn get_next_withdrawals<T: BeaconChainTypes>(
|
||||||
|
chain: &Arc<BeaconChain<T>>,
|
||||||
|
mut state: BeaconState<T::EthSpec>,
|
||||||
|
state_id: StateId,
|
||||||
|
proposal_slot: Slot,
|
||||||
|
) -> Result<Withdrawals<T::EthSpec>, warp::Rejection> {
|
||||||
|
get_next_withdrawals_sanity_checks(chain, &state, proposal_slot)?;
|
||||||
|
|
||||||
|
// advance the state to the epoch of the proposal slot.
|
||||||
|
let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch());
|
||||||
|
let (state_root, _, _) = state_id.root(chain)?;
|
||||||
|
if proposal_epoch != state.current_epoch() {
|
||||||
|
if let Err(e) =
|
||||||
|
partial_state_advance(&mut state, Some(state_root), proposal_slot, &chain.spec)
|
||||||
|
{
|
||||||
|
return Err(warp_utils::reject::custom_server_error(format!(
|
||||||
|
"failed to advance to the epoch of the proposal slot: {:?}",
|
||||||
|
e
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
match get_expected_withdrawals(&state, &chain.spec) {
|
||||||
|
Ok(withdrawals) => Ok(withdrawals),
|
||||||
|
Err(e) => Err(warp_utils::reject::custom_server_error(format!(
|
||||||
|
"failed to get expected withdrawal: {:?}",
|
||||||
|
e
|
||||||
|
))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_next_withdrawals_sanity_checks<T: BeaconChainTypes>(
|
||||||
|
chain: &BeaconChain<T>,
|
||||||
|
state: &BeaconState<T::EthSpec>,
|
||||||
|
proposal_slot: Slot,
|
||||||
|
) -> Result<(), warp::Rejection> {
|
||||||
|
if proposal_slot <= state.slot() {
|
||||||
|
return Err(warp_utils::reject::custom_bad_request(
|
||||||
|
"proposal slot must be greater than the pre-state slot".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let fork = chain.spec.fork_name_at_slot::<T::EthSpec>(proposal_slot);
|
||||||
|
if let ForkName::Base | ForkName::Altair | ForkName::Merge = fork {
|
||||||
|
return Err(warp_utils::reject::custom_bad_request(
|
||||||
|
"the specified state is a pre-capella state.".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let look_ahead_limit = MAX_EPOCH_LOOKAHEAD
|
||||||
|
.safe_mul(T::EthSpec::slots_per_epoch())
|
||||||
|
.map_err(warp_utils::reject::arith_error)?;
|
||||||
|
if proposal_slot >= state.slot() + look_ahead_limit {
|
||||||
|
return Err(warp_utils::reject::custom_bad_request(format!(
|
||||||
|
"proposal slot is greater than or equal to the look ahead limit: {look_ahead_limit}"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -11,6 +11,7 @@ mod block_id;
|
|||||||
mod block_packing_efficiency;
|
mod block_packing_efficiency;
|
||||||
mod block_rewards;
|
mod block_rewards;
|
||||||
mod build_block_contents;
|
mod build_block_contents;
|
||||||
|
mod builder_states;
|
||||||
mod database;
|
mod database;
|
||||||
mod metrics;
|
mod metrics;
|
||||||
mod proposer_duties;
|
mod proposer_duties;
|
||||||
@ -33,6 +34,7 @@ use beacon_chain::{
|
|||||||
};
|
};
|
||||||
use beacon_processor::BeaconProcessorSend;
|
use beacon_processor::BeaconProcessorSend;
|
||||||
pub use block_id::BlockId;
|
pub use block_id::BlockId;
|
||||||
|
use builder_states::get_next_withdrawals;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use directory::DEFAULT_ROOT_DIR;
|
use directory::DEFAULT_ROOT_DIR;
|
||||||
use eth2::types::{
|
use eth2::types::{
|
||||||
@ -2332,6 +2334,60 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* builder/states
|
||||||
|
*/
|
||||||
|
|
||||||
|
let builder_states_path = eth_v1
|
||||||
|
.and(warp::path("builder"))
|
||||||
|
.and(warp::path("states"))
|
||||||
|
.and(chain_filter.clone());
|
||||||
|
|
||||||
|
// GET builder/states/{state_id}/expected_withdrawals
|
||||||
|
let get_expected_withdrawals = builder_states_path
|
||||||
|
.clone()
|
||||||
|
.and(task_spawner_filter.clone())
|
||||||
|
.and(warp::path::param::<StateId>())
|
||||||
|
.and(warp::path("expected_withdrawals"))
|
||||||
|
.and(warp::query::<api_types::ExpectedWithdrawalsQuery>())
|
||||||
|
.and(warp::path::end())
|
||||||
|
.and(warp::header::optional::<api_types::Accept>("accept"))
|
||||||
|
.then(
|
||||||
|
|chain: Arc<BeaconChain<T>>,
|
||||||
|
task_spawner: TaskSpawner<T::EthSpec>,
|
||||||
|
state_id: StateId,
|
||||||
|
query: api_types::ExpectedWithdrawalsQuery,
|
||||||
|
accept_header: Option<api_types::Accept>| {
|
||||||
|
task_spawner.blocking_response_task(Priority::P1, move || {
|
||||||
|
let (state, execution_optimistic, finalized) = state_id.state(&chain)?;
|
||||||
|
let proposal_slot = query.proposal_slot.unwrap_or(state.slot() + 1);
|
||||||
|
let withdrawals =
|
||||||
|
get_next_withdrawals::<T>(&chain, state, state_id, proposal_slot)?;
|
||||||
|
|
||||||
|
match accept_header {
|
||||||
|
Some(api_types::Accept::Ssz) => Response::builder()
|
||||||
|
.status(200)
|
||||||
|
.header("Content-Type", "application/octet-stream")
|
||||||
|
.body(withdrawals.as_ssz_bytes().into())
|
||||||
|
.map_err(|e| {
|
||||||
|
warp_utils::reject::custom_server_error(format!(
|
||||||
|
"failed to create response: {}",
|
||||||
|
e
|
||||||
|
))
|
||||||
|
}),
|
||||||
|
_ => Ok(warp::reply::json(
|
||||||
|
&api_types::ExecutionOptimisticFinalizedResponse {
|
||||||
|
data: withdrawals,
|
||||||
|
execution_optimistic: Some(execution_optimistic),
|
||||||
|
finalized: Some(finalized),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.into_response()),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* beacon/rewards
|
* beacon/rewards
|
||||||
*/
|
*/
|
||||||
@ -4529,6 +4585,7 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
.uor(get_lighthouse_block_packing_efficiency)
|
.uor(get_lighthouse_block_packing_efficiency)
|
||||||
.uor(get_lighthouse_merge_readiness)
|
.uor(get_lighthouse_merge_readiness)
|
||||||
.uor(get_events)
|
.uor(get_events)
|
||||||
|
.uor(get_expected_withdrawals)
|
||||||
.uor(lighthouse_log_events.boxed())
|
.uor(lighthouse_log_events.boxed())
|
||||||
.recover(warp_utils::reject::handle_rejection),
|
.recover(warp_utils::reject::handle_rejection),
|
||||||
)
|
)
|
||||||
|
@ -28,6 +28,7 @@ use sensitive_url::SensitiveUrl;
|
|||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use state_processing::per_block_processing::get_expected_withdrawals;
|
use state_processing::per_block_processing::get_expected_withdrawals;
|
||||||
use state_processing::per_slot_processing;
|
use state_processing::per_slot_processing;
|
||||||
|
use state_processing::state_advance::partial_state_advance;
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::time::Duration;
|
use tokio::time::Duration;
|
||||||
@ -4443,6 +4444,72 @@ impl ApiTester {
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn test_get_expected_withdrawals_invalid_state(self) -> Self {
|
||||||
|
let state_id = CoreStateId::Root(Hash256::zero());
|
||||||
|
|
||||||
|
let result = self.client.get_expected_withdrawals(&state_id).await;
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Err(e) => {
|
||||||
|
assert_eq!(e.status().unwrap(), 404);
|
||||||
|
}
|
||||||
|
_ => panic!("query did not fail correctly"),
|
||||||
|
}
|
||||||
|
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn test_get_expected_withdrawals_capella(self) -> Self {
|
||||||
|
let slot = self.chain.slot().unwrap();
|
||||||
|
let state_id = CoreStateId::Slot(slot);
|
||||||
|
|
||||||
|
// calculate the expected withdrawals
|
||||||
|
let (mut state, _, _) = StateId(state_id).state(&self.chain).unwrap();
|
||||||
|
let proposal_slot = state.slot() + 1;
|
||||||
|
let proposal_epoch = proposal_slot.epoch(E::slots_per_epoch());
|
||||||
|
let (state_root, _, _) = StateId(state_id).root(&self.chain).unwrap();
|
||||||
|
if proposal_epoch != state.current_epoch() {
|
||||||
|
let _ = partial_state_advance(
|
||||||
|
&mut state,
|
||||||
|
Some(state_root),
|
||||||
|
proposal_slot,
|
||||||
|
&self.chain.spec,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
let expected_withdrawals = get_expected_withdrawals(&state, &self.chain.spec).unwrap();
|
||||||
|
|
||||||
|
// fetch expected withdrawals from the client
|
||||||
|
let result = self.client.get_expected_withdrawals(&state_id).await;
|
||||||
|
match result {
|
||||||
|
Ok(withdrawal_response) => {
|
||||||
|
assert_eq!(withdrawal_response.execution_optimistic, Some(false));
|
||||||
|
assert_eq!(withdrawal_response.finalized, Some(false));
|
||||||
|
assert_eq!(withdrawal_response.data, expected_withdrawals.to_vec());
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
println!("{:?}", e);
|
||||||
|
panic!("query failed incorrectly");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn test_get_expected_withdrawals_pre_capella(self) -> Self {
|
||||||
|
let state_id = CoreStateId::Head;
|
||||||
|
|
||||||
|
let result = self.client.get_expected_withdrawals(&state_id).await;
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Err(e) => {
|
||||||
|
assert_eq!(e.status().unwrap(), 400);
|
||||||
|
}
|
||||||
|
_ => panic!("query did not fail correctly"),
|
||||||
|
}
|
||||||
|
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn test_get_events_altair(self) -> Self {
|
pub async fn test_get_events_altair(self) -> Self {
|
||||||
let topics = vec![EventTopic::ContributionAndProof];
|
let topics = vec![EventTopic::ContributionAndProof];
|
||||||
let mut events_future = self
|
let mut events_future = self
|
||||||
@ -5245,3 +5312,37 @@ async fn optimistic_responses() {
|
|||||||
.test_check_optimistic_responses()
|
.test_check_optimistic_responses()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
|
async fn expected_withdrawals_invalid_pre_capella() {
|
||||||
|
let mut config = ApiTesterConfig::default();
|
||||||
|
config.spec.altair_fork_epoch = Some(Epoch::new(0));
|
||||||
|
ApiTester::new_from_config(config)
|
||||||
|
.await
|
||||||
|
.test_get_expected_withdrawals_pre_capella()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
|
async fn expected_withdrawals_invalid_state() {
|
||||||
|
let mut config = ApiTesterConfig::default();
|
||||||
|
config.spec.altair_fork_epoch = Some(Epoch::new(0));
|
||||||
|
config.spec.bellatrix_fork_epoch = Some(Epoch::new(0));
|
||||||
|
config.spec.capella_fork_epoch = Some(Epoch::new(0));
|
||||||
|
ApiTester::new_from_config(config)
|
||||||
|
.await
|
||||||
|
.test_get_expected_withdrawals_invalid_state()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
|
async fn expected_withdrawals_valid_capella() {
|
||||||
|
let mut config = ApiTesterConfig::default();
|
||||||
|
config.spec.altair_fork_epoch = Some(Epoch::new(0));
|
||||||
|
config.spec.bellatrix_fork_epoch = Some(Epoch::new(0));
|
||||||
|
config.spec.capella_fork_epoch = Some(Epoch::new(0));
|
||||||
|
ApiTester::new_from_config(config)
|
||||||
|
.await
|
||||||
|
.test_get_expected_withdrawals_capella()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
@ -647,7 +647,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
if subnet_queries.len() == MAX_SUBNETS_IN_QUERY || self.queued_queries.is_empty() {
|
if subnet_queries.len() == MAX_SUBNETS_IN_QUERY || self.queued_queries.is_empty() {
|
||||||
// This query is for searching for peers of a particular subnet
|
// This query is for searching for peers of a particular subnet
|
||||||
// Drain subnet_queries so we can re-use it as we continue to process the queue
|
// Drain subnet_queries so we can re-use it as we continue to process the queue
|
||||||
let grouped_queries: Vec<SubnetQuery> = subnet_queries.drain(..).collect();
|
let grouped_queries: Vec<SubnetQuery> = std::mem::take(&mut subnet_queries);
|
||||||
self.start_subnet_query(grouped_queries);
|
self.start_subnet_query(grouped_queries);
|
||||||
processed = true;
|
processed = true;
|
||||||
}
|
}
|
||||||
|
@ -981,6 +981,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
|
|
||||||
macro_rules! prune_peers {
|
macro_rules! prune_peers {
|
||||||
($filter: expr) => {
|
($filter: expr) => {
|
||||||
|
let filter = $filter;
|
||||||
for (peer_id, info) in self
|
for (peer_id, info) in self
|
||||||
.network_globals
|
.network_globals
|
||||||
.peers
|
.peers
|
||||||
@ -988,7 +989,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
.worst_connected_peers()
|
.worst_connected_peers()
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|(_, info)| {
|
.filter(|(_, info)| {
|
||||||
!info.has_future_duty() && !info.is_trusted() && $filter(*info)
|
!info.has_future_duty() && !info.is_trusted() && filter(*info)
|
||||||
})
|
})
|
||||||
{
|
{
|
||||||
if peers_to_prune.len()
|
if peers_to_prune.len()
|
||||||
|
@ -279,9 +279,10 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
|||||||
_seen_timestamp: Duration,
|
_seen_timestamp: Duration,
|
||||||
process_type: BlockProcessType,
|
process_type: BlockProcessType,
|
||||||
) {
|
) {
|
||||||
let Some(slot) = blobs.iter().find_map(|blob|{
|
let Some(slot) = blobs
|
||||||
blob.as_ref().map(|blob| blob.slot)
|
.iter()
|
||||||
}) else {
|
.find_map(|blob| blob.as_ref().map(|blob| blob.slot))
|
||||||
|
else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -149,12 +149,14 @@ pub trait RequestState<L: Lookup, T: BeaconChainTypes> {
|
|||||||
.copied()
|
.copied()
|
||||||
.map(PeerShouldHave::BlockAndBlobs);
|
.map(PeerShouldHave::BlockAndBlobs);
|
||||||
|
|
||||||
let Some(peer_id) = available_peer_opt.or_else(||request_state
|
let Some(peer_id) = available_peer_opt.or_else(|| {
|
||||||
|
request_state
|
||||||
.potential_peers
|
.potential_peers
|
||||||
.iter()
|
.iter()
|
||||||
.choose(&mut rand::thread_rng())
|
.choose(&mut rand::thread_rng())
|
||||||
.copied()
|
.copied()
|
||||||
.map(PeerShouldHave::Neither)) else {
|
.map(PeerShouldHave::Neither)
|
||||||
|
}) else {
|
||||||
return Err(LookupRequestError::NoPeers);
|
return Err(LookupRequestError::NoPeers);
|
||||||
};
|
};
|
||||||
request_state.used_peers.insert(peer_id.to_peer_id());
|
request_state.used_peers.insert(peer_id.to_peer_id());
|
||||||
|
@ -597,7 +597,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
if response.is_some() {
|
if response.is_some() {
|
||||||
debug!(self.log, "Response for a parent lookup request that was not found"; "peer_id" => %peer_id);
|
debug!(self.log, "Response for a parent lookup request that was not found"; "peer_id" => %peer_id);
|
||||||
}
|
}
|
||||||
return
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
match self.parent_lookup_response_inner::<R>(
|
match self.parent_lookup_response_inner::<R>(
|
||||||
@ -781,7 +781,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
"peer_id" => %peer_id,
|
"peer_id" => %peer_id,
|
||||||
"error" => msg
|
"error" => msg
|
||||||
);
|
);
|
||||||
return
|
return;
|
||||||
};
|
};
|
||||||
R::request_state_mut(&mut parent_lookup.current_parent_request)
|
R::request_state_mut(&mut parent_lookup.current_parent_request)
|
||||||
.register_failure_downloading();
|
.register_failure_downloading();
|
||||||
@ -852,7 +852,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
let request_state = R::request_state_mut(&mut lookup);
|
let request_state = R::request_state_mut(&mut lookup);
|
||||||
|
|
||||||
let Ok(peer_id) = request_state.get_state().processing_peer() else {
|
let Ok(peer_id) = request_state.get_state().processing_peer() else {
|
||||||
return
|
return;
|
||||||
};
|
};
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -1233,9 +1233,8 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
mut parent_lookup: ParentLookup<T>,
|
mut parent_lookup: ParentLookup<T>,
|
||||||
) {
|
) {
|
||||||
// We should always have a block peer.
|
// We should always have a block peer.
|
||||||
let Ok(block_peer_id) =
|
let Ok(block_peer_id) = parent_lookup.block_processing_peer() else {
|
||||||
parent_lookup.block_processing_peer() else {
|
return;
|
||||||
return
|
|
||||||
};
|
};
|
||||||
let block_peer_id = block_peer_id.to_peer_id();
|
let block_peer_id = block_peer_id.to_peer_id();
|
||||||
|
|
||||||
@ -1301,15 +1300,13 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
let Some(id) = self
|
let Some(id) = self
|
||||||
.single_block_lookups
|
.single_block_lookups
|
||||||
.iter()
|
.iter()
|
||||||
.find_map(|(id, req)|
|
.find_map(|(id, req)| (req.block_root() == chain_hash).then_some(*id))
|
||||||
(req.block_root() == chain_hash).then_some(*id)) else {
|
else {
|
||||||
warn!(self.log, "No id found for single block lookup"; "chain_hash" => %chain_hash);
|
warn!(self.log, "No id found for single block lookup"; "chain_hash" => %chain_hash);
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Some(lookup) = self
|
let Some(lookup) = self.single_block_lookups.get_mut(&id) else {
|
||||||
.single_block_lookups
|
|
||||||
.get_mut(&id) else {
|
|
||||||
warn!(self.log, "No id found for single block lookup"; "chain_hash" => %chain_hash);
|
warn!(self.log, "No id found for single block lookup"; "chain_hash" => %chain_hash);
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
@ -136,7 +136,7 @@ impl<L: Lookup, T: BeaconChainTypes> SingleBlockLookup<L, T> {
|
|||||||
pub fn get_cached_child_block(&self) -> CachedChild<T::EthSpec> {
|
pub fn get_cached_child_block(&self) -> CachedChild<T::EthSpec> {
|
||||||
if let Some(components) = self.cached_child_components.as_ref() {
|
if let Some(components) = self.cached_child_components.as_ref() {
|
||||||
let Some(block) = components.downloaded_block.as_ref() else {
|
let Some(block) = components.downloaded_block.as_ref() else {
|
||||||
return CachedChild::DownloadIncomplete
|
return CachedChild::DownloadIncomplete;
|
||||||
};
|
};
|
||||||
|
|
||||||
if !self.missing_blob_ids().is_empty() {
|
if !self.missing_blob_ids().is_empty() {
|
||||||
|
@ -299,7 +299,8 @@ macro_rules! field {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn update_pattern(spec: &ChainSpec) -> UpdatePattern {
|
fn update_pattern(spec: &ChainSpec) -> UpdatePattern {
|
||||||
$update_pattern(spec)
|
let update_pattern = $update_pattern;
|
||||||
|
update_pattern(spec)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_value(
|
fn get_value(
|
||||||
@ -307,7 +308,8 @@ macro_rules! field {
|
|||||||
vindex: u64,
|
vindex: u64,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<Self::Value, ChunkError> {
|
) -> Result<Self::Value, ChunkError> {
|
||||||
$get_value(state, vindex, spec)
|
let get_value = $get_value;
|
||||||
|
get_value(state, vindex, spec)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_fixed_length() -> bool {
|
fn is_fixed_length() -> bool {
|
||||||
|
@ -167,7 +167,7 @@ impl<E: EthSpec> KeyValueStore<E> for LevelDB<E> {
|
|||||||
)
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
for (start_key, end_key) in vec![
|
for (start_key, end_key) in [
|
||||||
endpoints(DBColumn::BeaconStateTemporary),
|
endpoints(DBColumn::BeaconStateTemporary),
|
||||||
endpoints(DBColumn::BeaconState),
|
endpoints(DBColumn::BeaconState),
|
||||||
] {
|
] {
|
||||||
|
@ -1289,6 +1289,23 @@ impl BeaconNodeHttpClient {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GET builder/states/{state_id}/expected_withdrawals
|
||||||
|
pub async fn get_expected_withdrawals(
|
||||||
|
&self,
|
||||||
|
state_id: &StateId,
|
||||||
|
) -> Result<ExecutionOptimisticFinalizedResponse<Vec<Withdrawal>>, Error> {
|
||||||
|
let mut path = self.eth_path(V1)?;
|
||||||
|
|
||||||
|
path.path_segments_mut()
|
||||||
|
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||||
|
.push("builder")
|
||||||
|
.push("states")
|
||||||
|
.push(&state_id.to_string())
|
||||||
|
.push("expected_withdrawals");
|
||||||
|
|
||||||
|
self.get(path).await
|
||||||
|
}
|
||||||
|
|
||||||
/// `POST validator/contribution_and_proofs`
|
/// `POST validator/contribution_and_proofs`
|
||||||
pub async fn post_validator_contribution_and_proofs<T: EthSpec>(
|
pub async fn post_validator_contribution_and_proofs<T: EthSpec>(
|
||||||
&self,
|
&self,
|
||||||
|
@ -586,6 +586,11 @@ pub struct SyncingData {
|
|||||||
pub sync_distance: Slot,
|
pub sync_distance: Slot,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct ExpectedWithdrawalsQuery {
|
||||||
|
pub proposal_slot: Option<Slot>,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Debug, Deserialize)]
|
#[derive(Clone, PartialEq, Debug, Deserialize)]
|
||||||
#[serde(try_from = "String", bound = "T: FromStr")]
|
#[serde(try_from = "String", bound = "T: FromStr")]
|
||||||
pub struct QueryVec<T: FromStr> {
|
pub struct QueryVec<T: FromStr> {
|
||||||
|
@ -28,10 +28,10 @@ mod test {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn unsigned_sum_small() {
|
fn unsigned_sum_small() {
|
||||||
let v = vec![400u64, 401, 402, 403, 404, 405, 406];
|
let arr = [400u64, 401, 402, 403, 404, 405, 406];
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
v.iter().copied().safe_sum().unwrap(),
|
arr.iter().copied().safe_sum().unwrap(),
|
||||||
v.iter().copied().sum()
|
arr.iter().copied().sum()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -61,10 +61,10 @@ mod test {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn signed_sum_almost_overflow() {
|
fn signed_sum_almost_overflow() {
|
||||||
let v = vec![i64::MIN, 1, -1i64, i64::MAX, i64::MAX, 1];
|
let arr = [i64::MIN, 1, -1i64, i64::MAX, i64::MAX, 1];
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
v.iter().copied().safe_sum().unwrap(),
|
arr.iter().copied().safe_sum().unwrap(),
|
||||||
v.iter().copied().sum()
|
arr.iter().copied().sum()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -129,7 +129,10 @@ impl<T: EthSpec> BlobSidecar<T> {
|
|||||||
// Ensure that the blob is canonical by ensuring that
|
// Ensure that the blob is canonical by ensuring that
|
||||||
// each field element contained in the blob is < BLS_MODULUS
|
// each field element contained in the blob is < BLS_MODULUS
|
||||||
for i in 0..T::Kzg::FIELD_ELEMENTS_PER_BLOB {
|
for i in 0..T::Kzg::FIELD_ELEMENTS_PER_BLOB {
|
||||||
let Some(byte) = blob_bytes.get_mut(i.checked_mul(T::Kzg::BYTES_PER_FIELD_ELEMENT).ok_or("overflow".to_string())?) else {
|
let Some(byte) = blob_bytes.get_mut(
|
||||||
|
i.checked_mul(T::Kzg::BYTES_PER_FIELD_ELEMENT)
|
||||||
|
.ok_or("overflow".to_string())?,
|
||||||
|
) else {
|
||||||
return Err(format!("blob byte index out of bounds: {:?}", i));
|
return Err(format!("blob byte index out of bounds: {:?}", i));
|
||||||
};
|
};
|
||||||
*byte = 0;
|
*byte = 0;
|
||||||
|
@ -27,10 +27,7 @@ impl<Pub> Copy for GenericPublicKeyBytes<Pub> {}
|
|||||||
|
|
||||||
impl<Pub> Clone for GenericPublicKeyBytes<Pub> {
|
impl<Pub> Clone for GenericPublicKeyBytes<Pub> {
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
Self {
|
*self
|
||||||
bytes: self.bytes,
|
|
||||||
_phantom: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user