Merge branch 'unstable' of https://github.com/sigp/lighthouse into merge-unstable-deneb-aug-24

This commit is contained in:
realbigsean 2023-08-24 14:34:32 -04:00
commit f90b190d9a
No known key found for this signature in database
GPG Key ID: BE1B3DB104F6C788
8 changed files with 19 additions and 19 deletions

View File

@ -841,7 +841,7 @@ mod tests {
let mut store = $type::default(); let mut store = $type::default();
let max_cap = store.max_capacity(); let max_cap = store.max_capacity();
let to_skip = vec![1_u64, 3, 4, 5]; let to_skip = [1_u64, 3, 4, 5];
let periods = (0..max_cap * 3) let periods = (0..max_cap * 3)
.into_iter() .into_iter()
.filter(|i| !to_skip.contains(i)) .filter(|i| !to_skip.contains(i))
@ -1012,7 +1012,7 @@ mod tests {
let mut store = $type::default(); let mut store = $type::default();
let max_cap = store.max_capacity(); let max_cap = store.max_capacity();
let to_skip = vec![1_u64, 3, 4, 5]; let to_skip = [1_u64, 3, 4, 5];
let periods = (0..max_cap * 3) let periods = (0..max_cap * 3)
.into_iter() .into_iter()
.filter(|i| !to_skip.contains(i)) .filter(|i| !to_skip.contains(i))
@ -1121,7 +1121,7 @@ mod tests {
let mut store = $type::default(); let mut store = $type::default();
let max_cap = store.max_capacity(); let max_cap = store.max_capacity();
let to_skip = vec![1_u64, 3, 4, 5]; let to_skip = [1_u64, 3, 4, 5];
let periods = (0..max_cap * 3) let periods = (0..max_cap * 3)
.into_iter() .into_iter()
.filter(|i| !to_skip.contains(i)) .filter(|i| !to_skip.contains(i))

View File

@ -39,7 +39,7 @@ pub fn genesis_deposits(
Ok(deposit_data Ok(deposit_data
.into_iter() .into_iter()
.zip(proofs.into_iter()) .zip(proofs)
.map(|(data, proof)| (data, proof.into())) .map(|(data, proof)| (data, proof.into()))
.map(|(data, proof)| Deposit { proof, data }) .map(|(data, proof)| Deposit { proof, data })
.collect()) .collect())

View File

@ -647,7 +647,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
if subnet_queries.len() == MAX_SUBNETS_IN_QUERY || self.queued_queries.is_empty() { if subnet_queries.len() == MAX_SUBNETS_IN_QUERY || self.queued_queries.is_empty() {
// This query is for searching for peers of a particular subnet // This query is for searching for peers of a particular subnet
// Drain subnet_queries so we can re-use it as we continue to process the queue // Drain subnet_queries so we can re-use it as we continue to process the queue
let grouped_queries: Vec<SubnetQuery> = subnet_queries.drain(..).collect(); let grouped_queries: Vec<SubnetQuery> = std::mem::take(&mut subnet_queries);
self.start_subnet_query(grouped_queries); self.start_subnet_query(grouped_queries);
processed = true; processed = true;
} }

View File

@ -981,6 +981,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
macro_rules! prune_peers { macro_rules! prune_peers {
($filter: expr) => { ($filter: expr) => {
let filter = $filter;
for (peer_id, info) in self for (peer_id, info) in self
.network_globals .network_globals
.peers .peers
@ -988,7 +989,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
.worst_connected_peers() .worst_connected_peers()
.iter() .iter()
.filter(|(_, info)| { .filter(|(_, info)| {
!info.has_future_duty() && !info.is_trusted() && $filter(*info) !info.has_future_duty() && !info.is_trusted() && filter(*info)
}) })
{ {
if peers_to_prune.len() if peers_to_prune.len()

View File

@ -299,7 +299,8 @@ macro_rules! field {
} }
fn update_pattern(spec: &ChainSpec) -> UpdatePattern { fn update_pattern(spec: &ChainSpec) -> UpdatePattern {
$update_pattern(spec) let update_pattern = $update_pattern;
update_pattern(spec)
} }
fn get_value( fn get_value(
@ -307,7 +308,8 @@ macro_rules! field {
vindex: u64, vindex: u64,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Self::Value, ChunkError> { ) -> Result<Self::Value, ChunkError> {
$get_value(state, vindex, spec) let get_value = $get_value;
get_value(state, vindex, spec)
} }
fn is_fixed_length() -> bool { fn is_fixed_length() -> bool {

View File

@ -167,7 +167,7 @@ impl<E: EthSpec> KeyValueStore<E> for LevelDB<E> {
) )
}; };
for (start_key, end_key) in vec![ for (start_key, end_key) in [
endpoints(DBColumn::BeaconStateTemporary), endpoints(DBColumn::BeaconStateTemporary),
endpoints(DBColumn::BeaconState), endpoints(DBColumn::BeaconState),
] { ] {

View File

@ -28,10 +28,10 @@ mod test {
#[test] #[test]
fn unsigned_sum_small() { fn unsigned_sum_small() {
let v = vec![400u64, 401, 402, 403, 404, 405, 406]; let arr = [400u64, 401, 402, 403, 404, 405, 406];
assert_eq!( assert_eq!(
v.iter().copied().safe_sum().unwrap(), arr.iter().copied().safe_sum().unwrap(),
v.iter().copied().sum() arr.iter().copied().sum()
); );
} }
@ -61,10 +61,10 @@ mod test {
#[test] #[test]
fn signed_sum_almost_overflow() { fn signed_sum_almost_overflow() {
let v = vec![i64::MIN, 1, -1i64, i64::MAX, i64::MAX, 1]; let arr = [i64::MIN, 1, -1i64, i64::MAX, i64::MAX, 1];
assert_eq!( assert_eq!(
v.iter().copied().safe_sum().unwrap(), arr.iter().copied().safe_sum().unwrap(),
v.iter().copied().sum() arr.iter().copied().sum()
); );
} }
} }

View File

@ -27,10 +27,7 @@ impl<Pub> Copy for GenericPublicKeyBytes<Pub> {}
impl<Pub> Clone for GenericPublicKeyBytes<Pub> { impl<Pub> Clone for GenericPublicKeyBytes<Pub> {
fn clone(&self) -> Self { fn clone(&self) -> Self {
Self { *self
bytes: self.bytes,
_phantom: PhantomData,
}
} }
} }