Appease Clippy 1.68 and refactor http_api (#4068)

## Proposed Changes

Two tiny updates to satisfy Clippy 1.68

Plus refactoring of the `http_api` into less complex types so the compiler can chew and digest them more easily.

Co-authored-by: Michael Sproul <michael@sigmaprime.io>
This commit is contained in:
Michael Sproul 2023-03-13 01:40:03 +00:00
parent 4a1c0c96be
commit 90cef1db86
17 changed files with 164 additions and 133 deletions

View File

@ -1,4 +1,3 @@
#![recursion_limit = "128"] // For lazy-static
pub mod attestation_rewards; pub mod attestation_rewards;
pub mod attestation_verification; pub mod attestation_verification;
mod attester_cache; mod attester_cache;

View File

@ -1,4 +1,3 @@
#![recursion_limit = "256"]
//! This crate contains a HTTP server which serves the endpoints listed here: //! This crate contains a HTTP server which serves the endpoints listed here:
//! //!
//! https://github.com/ethereum/beacon-APIs //! https://github.com/ethereum/beacon-APIs
@ -71,7 +70,8 @@ use warp::Reply;
use warp::{http::Response, Filter}; use warp::{http::Response, Filter};
use warp_utils::{ use warp_utils::{
query::multi_key_query, query::multi_key_query,
task::{blocking_json_task, blocking_task}, task::{blocking_json_task, blocking_response_task},
uor::UnifyingOrFilter,
}; };
const API_PREFIX: &str = "eth"; const API_PREFIX: &str = "eth";
@ -1125,7 +1125,7 @@ pub fn serve<T: BeaconChainTypes>(
log: Logger| async move { log: Logger| async move {
publish_blocks::publish_block(None, block, chain, &network_tx, log) publish_blocks::publish_block(None, block, chain, &network_tx, log)
.await .await
.map(|()| warp::reply()) .map(|()| warp::reply().into_response())
}, },
); );
@ -1149,7 +1149,7 @@ pub fn serve<T: BeaconChainTypes>(
log: Logger| async move { log: Logger| async move {
publish_blocks::publish_blinded_block(block, chain, &network_tx, log) publish_blocks::publish_blinded_block(block, chain, &network_tx, log)
.await .await
.map(|()| warp::reply()) .map(|()| warp::reply().into_response())
}, },
); );
@ -1255,7 +1255,7 @@ pub fn serve<T: BeaconChainTypes>(
|block_id: BlockId, |block_id: BlockId,
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
accept_header: Option<api_types::Accept>| { accept_header: Option<api_types::Accept>| {
blocking_task(move || { blocking_response_task(move || {
let (block, execution_optimistic) = block_id.blinded_block(&chain)?; let (block, execution_optimistic) = block_id.blinded_block(&chain)?;
let fork_name = block let fork_name = block
.fork_name(&chain.spec) .fork_name(&chain.spec)
@ -1767,7 +1767,7 @@ pub fn serve<T: BeaconChainTypes>(
.and(eth1_service_filter.clone()) .and(eth1_service_filter.clone())
.and_then( .and_then(
|accept_header: Option<api_types::Accept>, eth1_service: eth1::Service| { |accept_header: Option<api_types::Accept>, eth1_service: eth1::Service| {
blocking_task(move || match accept_header { blocking_response_task(move || match accept_header {
Some(api_types::Accept::Json) | None => { Some(api_types::Accept::Json) | None => {
let snapshot = eth1_service.get_deposit_snapshot(); let snapshot = eth1_service.get_deposit_snapshot();
Ok( Ok(
@ -1986,7 +1986,7 @@ pub fn serve<T: BeaconChainTypes>(
state_id: StateId, state_id: StateId,
accept_header: Option<api_types::Accept>, accept_header: Option<api_types::Accept>,
chain: Arc<BeaconChain<T>>| { chain: Arc<BeaconChain<T>>| {
blocking_task(move || match accept_header { blocking_response_task(move || match accept_header {
Some(api_types::Accept::Ssz) => { Some(api_types::Accept::Ssz) => {
// We can ignore the optimistic status for the "fork" since it's a // We can ignore the optimistic status for the "fork" since it's a
// specification constant that doesn't change across competing heads of the // specification constant that doesn't change across competing heads of the
@ -1999,7 +1999,9 @@ pub fn serve<T: BeaconChainTypes>(
.status(200) .status(200)
.header("Content-Type", "application/octet-stream") .header("Content-Type", "application/octet-stream")
.body(state.as_ssz_bytes().into()) .body(state.as_ssz_bytes().into())
.map(|resp| add_consensus_version_header(resp, fork_name)) .map(|resp: warp::reply::Response| {
add_consensus_version_header(resp, fork_name)
})
.map_err(|e| { .map_err(|e| {
warp_utils::reject::custom_server_error(format!( warp_utils::reject::custom_server_error(format!(
"failed to create response: {}", "failed to create response: {}",
@ -2162,7 +2164,7 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end()) .and(warp::path::end())
.and(network_globals.clone()) .and(network_globals.clone())
.and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| { .and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
blocking_task(move || match *network_globals.sync_state.read() { blocking_response_task(move || match *network_globals.sync_state.read() {
SyncState::SyncingFinalized { .. } SyncState::SyncingFinalized { .. }
| SyncState::SyncingHead { .. } | SyncState::SyncingHead { .. }
| SyncState::SyncTransition | SyncState::SyncTransition
@ -2426,7 +2428,7 @@ pub fn serve<T: BeaconChainTypes>(
.map_err(inconsistent_fork_rejection)?; .map_err(inconsistent_fork_rejection)?;
fork_versioned_response(endpoint_version, fork_name, block) fork_versioned_response(endpoint_version, fork_name, block)
.map(|response| warp::reply::json(&response)) .map(|response| warp::reply::json(&response).into_response())
}, },
); );
@ -2483,7 +2485,7 @@ pub fn serve<T: BeaconChainTypes>(
// Pose as a V2 endpoint so we return the fork `version`. // Pose as a V2 endpoint so we return the fork `version`.
fork_versioned_response(V2, fork_name, block) fork_versioned_response(V2, fork_name, block)
.map(|response| warp::reply::json(&response)) .map(|response| warp::reply::json(&response).into_response())
}, },
); );
@ -2856,7 +2858,7 @@ pub fn serve<T: BeaconChainTypes>(
)) ))
})?; })?;
Ok::<_, warp::reject::Rejection>(warp::reply::json(&())) Ok::<_, warp::reject::Rejection>(warp::reply::json(&()).into_response())
}, },
); );
@ -2965,7 +2967,7 @@ pub fn serve<T: BeaconChainTypes>(
builder builder
.post_builder_validators(&filtered_registration_data) .post_builder_validators(&filtered_registration_data)
.await .await
.map(|resp| warp::reply::json(&resp)) .map(|resp| warp::reply::json(&resp).into_response())
.map_err(|e| { .map_err(|e| {
warn!( warn!(
log, log,
@ -3227,7 +3229,7 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end()) .and(warp::path::end())
.and(chain_filter.clone()) .and(chain_filter.clone())
.and_then(|chain: Arc<BeaconChain<T>>| { .and_then(|chain: Arc<BeaconChain<T>>| {
blocking_task(move || { blocking_response_task(move || {
Ok::<_, warp::Rejection>(warp::reply::json(&api_types::GenericResponseRef::from( Ok::<_, warp::Rejection>(warp::reply::json(&api_types::GenericResponseRef::from(
chain chain
.canonical_head .canonical_head
@ -3346,7 +3348,7 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end()) .and(warp::path::end())
.and(chain_filter.clone()) .and(chain_filter.clone())
.and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| { .and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| {
blocking_task(move || { blocking_response_task(move || {
// This debug endpoint provides no indication of optimistic status. // This debug endpoint provides no indication of optimistic status.
let (state, _execution_optimistic) = state_id.state(&chain)?; let (state, _execution_optimistic) = state_id.state(&chain)?;
Response::builder() Response::builder()
@ -3482,9 +3484,10 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone()) .and(chain_filter.clone())
.and_then(|chain: Arc<BeaconChain<T>>| async move { .and_then(|chain: Arc<BeaconChain<T>>| async move {
let merge_readiness = chain.check_merge_readiness().await; let merge_readiness = chain.check_merge_readiness().await;
Ok::<_, warp::reject::Rejection>(warp::reply::json(&api_types::GenericResponse::from( Ok::<_, warp::reject::Rejection>(
merge_readiness, warp::reply::json(&api_types::GenericResponse::from(merge_readiness))
))) .into_response(),
)
}); });
let get_events = eth_v1 let get_events = eth_v1
@ -3495,7 +3498,7 @@ pub fn serve<T: BeaconChainTypes>(
.and_then( .and_then(
|topics_res: Result<api_types::EventQuery, warp::Rejection>, |topics_res: Result<api_types::EventQuery, warp::Rejection>,
chain: Arc<BeaconChain<T>>| { chain: Arc<BeaconChain<T>>| {
blocking_task(move || { blocking_response_task(move || {
let topics = topics_res?; let topics = topics_res?;
// for each topic subscribed spawn a new subscription // for each topic subscribed spawn a new subscription
let mut receivers = Vec::with_capacity(topics.topics.len()); let mut receivers = Vec::with_capacity(topics.topics.len());
@ -3562,108 +3565,110 @@ pub fn serve<T: BeaconChainTypes>(
); );
// Define the ultimate set of routes that will be provided to the server. // Define the ultimate set of routes that will be provided to the server.
// Use `uor` rather than `or` in order to simplify types (see `UnifyingOrFilter`).
let routes = warp::get() let routes = warp::get()
.and( .and(
get_beacon_genesis get_beacon_genesis
.boxed() .uor(get_beacon_state_root)
.or(get_beacon_state_root.boxed()) .uor(get_beacon_state_fork)
.or(get_beacon_state_fork.boxed()) .uor(get_beacon_state_finality_checkpoints)
.or(get_beacon_state_finality_checkpoints.boxed()) .uor(get_beacon_state_validator_balances)
.or(get_beacon_state_validator_balances.boxed()) .uor(get_beacon_state_validators_id)
.or(get_beacon_state_validators_id.boxed()) .uor(get_beacon_state_validators)
.or(get_beacon_state_validators.boxed()) .uor(get_beacon_state_committees)
.or(get_beacon_state_committees.boxed()) .uor(get_beacon_state_sync_committees)
.or(get_beacon_state_sync_committees.boxed()) .uor(get_beacon_state_randao)
.or(get_beacon_state_randao.boxed()) .uor(get_beacon_headers)
.or(get_beacon_headers.boxed()) .uor(get_beacon_headers_block_id)
.or(get_beacon_headers_block_id.boxed()) .uor(get_beacon_block)
.or(get_beacon_block.boxed()) .uor(get_beacon_block_attestations)
.or(get_beacon_block_attestations.boxed()) .uor(get_beacon_blinded_block)
.or(get_beacon_blinded_block.boxed()) .uor(get_beacon_block_root)
.or(get_beacon_block_root.boxed()) .uor(get_beacon_pool_attestations)
.or(get_beacon_pool_attestations.boxed()) .uor(get_beacon_pool_attester_slashings)
.or(get_beacon_pool_attester_slashings.boxed()) .uor(get_beacon_pool_proposer_slashings)
.or(get_beacon_pool_proposer_slashings.boxed()) .uor(get_beacon_pool_voluntary_exits)
.or(get_beacon_pool_voluntary_exits.boxed()) .uor(get_beacon_pool_bls_to_execution_changes)
.or(get_beacon_pool_bls_to_execution_changes.boxed()) .uor(get_beacon_deposit_snapshot)
.or(get_beacon_deposit_snapshot.boxed()) .uor(get_beacon_rewards_blocks)
.or(get_beacon_rewards_blocks.boxed()) .uor(get_config_fork_schedule)
.or(get_config_fork_schedule.boxed()) .uor(get_config_spec)
.or(get_config_spec.boxed()) .uor(get_config_deposit_contract)
.or(get_config_deposit_contract.boxed()) .uor(get_debug_beacon_states)
.or(get_debug_beacon_states.boxed()) .uor(get_debug_beacon_heads)
.or(get_debug_beacon_heads.boxed()) .uor(get_node_identity)
.or(get_node_identity.boxed()) .uor(get_node_version)
.or(get_node_version.boxed()) .uor(get_node_syncing)
.or(get_node_syncing.boxed()) .uor(get_node_health)
.or(get_node_health.boxed()) .uor(get_node_peers_by_id)
.or(get_node_peers_by_id.boxed()) .uor(get_node_peers)
.or(get_node_peers.boxed()) .uor(get_node_peer_count)
.or(get_node_peer_count.boxed()) .uor(get_validator_duties_proposer)
.or(get_validator_duties_proposer.boxed()) .uor(get_validator_blocks)
.or(get_validator_blocks.boxed()) .uor(get_validator_blinded_blocks)
.or(get_validator_blinded_blocks.boxed()) .uor(get_validator_attestation_data)
.or(get_validator_attestation_data.boxed()) .uor(get_validator_aggregate_attestation)
.or(get_validator_aggregate_attestation.boxed()) .uor(get_validator_sync_committee_contribution)
.or(get_validator_sync_committee_contribution.boxed()) .uor(get_lighthouse_health)
.or(get_lighthouse_health.boxed()) .uor(get_lighthouse_ui_health)
.or(get_lighthouse_ui_health.boxed()) .uor(get_lighthouse_ui_validator_count)
.or(get_lighthouse_ui_validator_count.boxed()) .uor(get_lighthouse_syncing)
.or(get_lighthouse_syncing.boxed()) .uor(get_lighthouse_nat)
.or(get_lighthouse_nat.boxed()) .uor(get_lighthouse_peers)
.or(get_lighthouse_peers.boxed()) .uor(get_lighthouse_peers_connected)
.or(get_lighthouse_peers_connected.boxed()) .uor(get_lighthouse_proto_array)
.or(get_lighthouse_proto_array.boxed()) .uor(get_lighthouse_validator_inclusion_global)
.or(get_lighthouse_validator_inclusion_global.boxed()) .uor(get_lighthouse_validator_inclusion)
.or(get_lighthouse_validator_inclusion.boxed()) .uor(get_lighthouse_eth1_syncing)
.or(get_lighthouse_eth1_syncing.boxed()) .uor(get_lighthouse_eth1_block_cache)
.or(get_lighthouse_eth1_block_cache.boxed()) .uor(get_lighthouse_eth1_deposit_cache)
.or(get_lighthouse_eth1_deposit_cache.boxed()) .uor(get_lighthouse_beacon_states_ssz)
.or(get_lighthouse_beacon_states_ssz.boxed()) .uor(get_lighthouse_staking)
.or(get_lighthouse_staking.boxed()) .uor(get_lighthouse_database_info)
.or(get_lighthouse_database_info.boxed()) .uor(get_lighthouse_block_rewards)
.or(get_lighthouse_block_rewards.boxed()) .uor(get_lighthouse_attestation_performance)
.or(get_lighthouse_attestation_performance.boxed()) .uor(get_lighthouse_block_packing_efficiency)
.or(get_lighthouse_block_packing_efficiency.boxed()) .uor(get_lighthouse_merge_readiness)
.or(get_lighthouse_merge_readiness.boxed()) .uor(get_events)
.or(get_events.boxed())
.recover(warp_utils::reject::handle_rejection), .recover(warp_utils::reject::handle_rejection),
) )
.boxed() .boxed()
.or(warp::post().and( .uor(
warp::post().and(
post_beacon_blocks post_beacon_blocks
.boxed() .uor(post_beacon_blinded_blocks)
.or(post_beacon_blinded_blocks.boxed()) .uor(post_beacon_pool_attestations)
.or(post_beacon_pool_attestations.boxed()) .uor(post_beacon_pool_attester_slashings)
.or(post_beacon_pool_attester_slashings.boxed()) .uor(post_beacon_pool_proposer_slashings)
.or(post_beacon_pool_proposer_slashings.boxed()) .uor(post_beacon_pool_voluntary_exits)
.or(post_beacon_pool_voluntary_exits.boxed()) .uor(post_beacon_pool_sync_committees)
.or(post_beacon_pool_sync_committees.boxed()) .uor(post_beacon_pool_bls_to_execution_changes)
.or(post_beacon_pool_bls_to_execution_changes.boxed()) .uor(post_beacon_rewards_attestations)
.or(post_beacon_rewards_attestations.boxed()) .uor(post_beacon_rewards_sync_committee)
.or(post_beacon_rewards_sync_committee.boxed()) .uor(post_validator_duties_attester)
.or(post_validator_duties_attester.boxed()) .uor(post_validator_duties_sync)
.or(post_validator_duties_sync.boxed()) .uor(post_validator_aggregate_and_proofs)
.or(post_validator_aggregate_and_proofs.boxed()) .uor(post_validator_contribution_and_proofs)
.or(post_validator_contribution_and_proofs.boxed()) .uor(post_validator_beacon_committee_subscriptions)
.or(post_validator_beacon_committee_subscriptions.boxed()) .uor(post_validator_sync_committee_subscriptions)
.or(post_validator_sync_committee_subscriptions.boxed()) .uor(post_validator_prepare_beacon_proposer)
.or(post_validator_prepare_beacon_proposer.boxed()) .uor(post_validator_register_validator)
.or(post_validator_register_validator.boxed()) .uor(post_lighthouse_liveness)
.or(post_lighthouse_liveness.boxed()) .uor(post_lighthouse_database_reconstruct)
.or(post_lighthouse_database_reconstruct.boxed()) .uor(post_lighthouse_database_historical_blocks)
.or(post_lighthouse_database_historical_blocks.boxed()) .uor(post_lighthouse_block_rewards)
.or(post_lighthouse_block_rewards.boxed()) .uor(post_lighthouse_ui_validator_metrics)
.or(post_lighthouse_ui_validator_metrics.boxed()) .uor(post_lighthouse_ui_validator_info)
.or(post_lighthouse_ui_validator_info.boxed())
.recover(warp_utils::reject::handle_rejection), .recover(warp_utils::reject::handle_rejection),
)) ),
)
.recover(warp_utils::reject::handle_rejection) .recover(warp_utils::reject::handle_rejection)
.with(slog_logging(log.clone())) .with(slog_logging(log.clone()))
.with(prometheus_metrics()) .with(prometheus_metrics())
// Add a `Server` header. // Add a `Server` header.
.map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform())) .map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform()))
.with(cors_builder.build()); .with(cors_builder.build())
.boxed();
let http_socket: SocketAddr = SocketAddr::new(config.listen_addr, config.listen_port); let http_socket: SocketAddr = SocketAddr::new(config.listen_addr, config.listen_port);
let http_server: HttpServer = match config.tls_config { let http_server: HttpServer = match config.tls_config {

View File

@ -4,7 +4,7 @@ use serde::Serialize;
use types::{ use types::{
ExecutionOptimisticForkVersionedResponse, ForkName, ForkVersionedResponse, InconsistentFork, ExecutionOptimisticForkVersionedResponse, ForkName, ForkVersionedResponse, InconsistentFork,
}; };
use warp::reply::{self, Reply, WithHeader}; use warp::reply::{self, Reply, Response};
pub const V1: EndpointVersion = EndpointVersion(1); pub const V1: EndpointVersion = EndpointVersion(1);
pub const V2: EndpointVersion = EndpointVersion(2); pub const V2: EndpointVersion = EndpointVersion(2);
@ -48,8 +48,8 @@ pub fn execution_optimistic_fork_versioned_response<T: Serialize>(
} }
/// Add the `Eth-Consensus-Version` header to a response. /// Add the `Eth-Consensus-Version` header to a response.
pub fn add_consensus_version_header<T: Reply>(reply: T, fork_name: ForkName) -> WithHeader<T> { pub fn add_consensus_version_header<T: Reply>(reply: T, fork_name: ForkName) -> Response {
reply::with_header(reply, CONSENSUS_VERSION_HEADER, fork_name.to_string()) reply::with_header(reply, CONSENSUS_VERSION_HEADER, fork_name.to_string()).into_response()
} }
pub fn inconsistent_fork_rejection(error: InconsistentFork) -> warp::reject::Rejection { pub fn inconsistent_fork_rejection(error: InconsistentFork) -> warp::reject::Rejection {

View File

@ -1,5 +1,4 @@
#![cfg(not(debug_assertions))] // Tests are too slow in debug. #![cfg(not(debug_assertions))] // Tests are too slow in debug.
#![recursion_limit = "256"]
pub mod common; pub mod common;
pub mod fork_tests; pub mod fork_tests;

View File

@ -1,5 +1,4 @@
#![cfg(test)] #![cfg(test)]
#![recursion_limit = "512"]
use beacon_chain::StateSkipConfig; use beacon_chain::StateSkipConfig;
use node_test_rig::{ use node_test_rig::{

View File

@ -1,4 +1,3 @@
#![recursion_limit = "256"]
extern crate proc_macro; extern crate proc_macro;
use proc_macro::TokenStream; use proc_macro::TokenStream;

View File

@ -6,3 +6,4 @@ pub mod metrics;
pub mod query; pub mod query;
pub mod reject; pub mod reject;
pub mod task; pub mod task;
pub mod uor;

View File

@ -1,4 +1,5 @@
use serde::Serialize; use serde::Serialize;
use warp::reply::{Reply, Response};
/// A convenience wrapper around `blocking_task`. /// A convenience wrapper around `blocking_task`.
pub async fn blocking_task<F, T>(func: F) -> Result<T, warp::Rejection> pub async fn blocking_task<F, T>(func: F) -> Result<T, warp::Rejection>
@ -8,16 +9,29 @@ where
{ {
tokio::task::spawn_blocking(func) tokio::task::spawn_blocking(func)
.await .await
.unwrap_or_else(|_| Err(warp::reject::reject())) // This should really be a 500 .unwrap_or_else(|_| Err(warp::reject::reject()))
}
/// A convenience wrapper around `blocking_task` that returns a `warp::reply::Response`.
///
/// Using this method consistently makes it possible to simplify types using `.unify()` or `.uor()`.
pub async fn blocking_response_task<F, T>(func: F) -> Result<Response, warp::Rejection>
where
F: FnOnce() -> Result<T, warp::Rejection> + Send + 'static,
T: Reply + Send + 'static,
{
blocking_task(func).await.map(Reply::into_response)
} }
/// A convenience wrapper around `blocking_task` for use with `warp` JSON responses. /// A convenience wrapper around `blocking_task` for use with `warp` JSON responses.
pub async fn blocking_json_task<F, T>(func: F) -> Result<warp::reply::Json, warp::Rejection> pub async fn blocking_json_task<F, T>(func: F) -> Result<Response, warp::Rejection>
where where
F: FnOnce() -> Result<T, warp::Rejection> + Send + 'static, F: FnOnce() -> Result<T, warp::Rejection> + Send + 'static,
T: Serialize + Send + 'static, T: Serialize + Send + 'static,
{ {
blocking_task(func) blocking_response_task(|| {
let response = func()?;
Ok(warp::reply::json(&response))
})
.await .await
.map(|resp| warp::reply::json(&resp))
} }

View File

@ -0,0 +1,25 @@
use warp::{filters::BoxedFilter, Filter, Rejection};
/// Mixin trait for `Filter` providing the unifying-or method.
pub trait UnifyingOrFilter: Filter<Error = Rejection> + Sized + Send + Sync + 'static
where
Self::Extract: Send,
{
/// Unifying `or`.
///
/// This is a shorthand for `self.or(other).unify().boxed()`, which is useful because it keeps
/// the filter type simple and prevents type-checker explosions.
fn uor<F>(self, other: F) -> BoxedFilter<Self::Extract>
where
F: Filter<Extract = Self::Extract, Error = Rejection> + Clone + Send + Sync + 'static,
{
self.or(other).unify().boxed()
}
}
impl<F> UnifyingOrFilter for F
where
F: Filter<Error = Rejection> + Sized + Send + Sync + 'static,
F::Extract: Send,
{
}

View File

@ -1695,7 +1695,6 @@ mod tests {
fn get_queued_attestations() -> Vec<QueuedAttestation> { fn get_queued_attestations() -> Vec<QueuedAttestation> {
(1..4) (1..4)
.into_iter()
.map(|i| QueuedAttestation { .map(|i| QueuedAttestation {
slot: Slot::new(i), slot: Slot::new(i),
attesting_indices: vec![], attesting_indices: vec![],

View File

@ -1,4 +1,3 @@
#![recursion_limit = "256"]
//! Provides procedural derive macros for the `Encode` and `Decode` traits of the `eth2_ssz` crate. //! Provides procedural derive macros for the `Encode` and `Decode` traits of the `eth2_ssz` crate.
//! //!
//! ## Attributes //! ## Attributes

View File

@ -1,4 +1,3 @@
#![recursion_limit = "256"]
use darling::FromDeriveInput; use darling::FromDeriveInput;
use proc_macro::TokenStream; use proc_macro::TokenStream;
use quote::quote; use quote::quote;

View File

@ -1,6 +1,4 @@
//! Ethereum 2.0 types //! Ethereum 2.0 types
// Required for big type-level numbers
#![recursion_limit = "128"]
// Clippy lint set up // Clippy lint set up
#![cfg_attr( #![cfg_attr(
not(test), not(test),

View File

@ -266,7 +266,7 @@ where
} }
/// Hashes the `self.serialize()` bytes. /// Hashes the `self.serialize()` bytes.
#[allow(clippy::derive_hash_xor_eq)] #[allow(clippy::derived_hash_with_manual_eq)]
impl<Pub, AggPub, Sig, AggSig> Hash for GenericAggregateSignature<Pub, AggPub, Sig, AggSig> impl<Pub, AggPub, Sig, AggSig> Hash for GenericAggregateSignature<Pub, AggPub, Sig, AggSig>
where where
Sig: TSignature<Pub>, Sig: TSignature<Pub>,

View File

@ -1,5 +1,3 @@
#![recursion_limit = "256"]
mod metrics; mod metrics;
use beacon_node::ProductionBeaconNode; use beacon_node::ProductionBeaconNode;

View File

@ -1,4 +1,3 @@
#![recursion_limit = "1024"]
/// This binary runs integration tests between Lighthouse and execution engines. /// This binary runs integration tests between Lighthouse and execution engines.
/// ///
/// It will first attempt to build any supported integration clients, then it will run tests. /// It will first attempt to build any supported integration clients, then it will run tests.

View File

@ -1,5 +1,3 @@
#![recursion_limit = "256"]
//! This crate provides a simluation that creates `n` beacon node and validator clients, each with //! This crate provides a simluation that creates `n` beacon node and validator clients, each with
//! `v` validators. A deposit contract is deployed at the start of the simulation using a local //! `v` validators. A deposit contract is deployed at the start of the simulation using a local
//! `ganache` instance (you must have `ganache` installed and avaliable on your path). All //! `ganache` instance (you must have `ganache` installed and avaliable on your path). All