Clippy lints for rust 1.66 (#3810)

## Issue Addressed
Fixes the new clippy lints for rust 1.66

## Proposed Changes

Most of the changes come from:
- [unnecessary_cast](https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_cast)
- [iter_kv_map](https://rust-lang.github.io/rust-clippy/master/index.html#iter_kv_map)
- [needless_borrow](https://rust-lang.github.io/rust-clippy/master/index.html#needless_borrow)

## Additional Info

na
This commit is contained in:
Divma 2022-12-16 04:04:00 +00:00
parent 63c74b37f4
commit ffbf70e2d9
31 changed files with 58 additions and 82 deletions

View File

@ -10,7 +10,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
} }
pub fn cli_run(wallet_base_dir: PathBuf) -> Result<(), String> { pub fn cli_run(wallet_base_dir: PathBuf) -> Result<(), String> {
let mgr = WalletManager::open(&wallet_base_dir) let mgr = WalletManager::open(wallet_base_dir)
.map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?; .map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?;
for (name, _uuid) in mgr for (name, _uuid) in mgr

View File

@ -402,7 +402,7 @@ impl<T: AggregateMap> NaiveAggregationPool<T> {
/// Returns the total number of items stored in `self`. /// Returns the total number of items stored in `self`.
pub fn num_items(&self) -> usize { pub fn num_items(&self) -> usize {
self.maps.iter().map(|(_, map)| map.len()).sum() self.maps.values().map(T::len).sum()
} }
/// Returns an aggregated `T::Value` with the given `T::Data`, if any. /// Returns an aggregated `T::Value` with the given `T::Data`, if any.
@ -448,11 +448,7 @@ impl<T: AggregateMap> NaiveAggregationPool<T> {
// If we have too many maps, remove the lowest amount to ensure we only have // If we have too many maps, remove the lowest amount to ensure we only have
// `SLOTS_RETAINED` left. // `SLOTS_RETAINED` left.
if self.maps.len() > SLOTS_RETAINED { if self.maps.len() > SLOTS_RETAINED {
let mut slots = self let mut slots = self.maps.keys().copied().collect::<Vec<_>>();
.maps
.iter()
.map(|(slot, _map)| *slot)
.collect::<Vec<_>>();
// Sort is generally pretty slow, however `SLOTS_RETAINED` is quite low so it should be // Sort is generally pretty slow, however `SLOTS_RETAINED` is quite low so it should be
// negligible. // negligible.
slots.sort_unstable(); slots.sort_unstable();

View File

@ -1459,7 +1459,7 @@ where
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
let signed_block = block.sign( let signed_block = block.sign(
&self.validator_keypairs[proposer_index as usize].sk, &self.validator_keypairs[proposer_index].sk,
&state.fork(), &state.fork(),
state.genesis_validators_root(), state.genesis_validators_root(),
&self.spec, &self.spec,

View File

@ -631,10 +631,7 @@ impl<T: EthSpec> ValidatorMonitor<T> {
// Return the `id`'s of all monitored validators. // Return the `id`'s of all monitored validators.
pub fn get_all_monitored_validators(&self) -> Vec<String> { pub fn get_all_monitored_validators(&self) -> Vec<String> {
self.validators self.validators.values().map(|val| val.id.clone()).collect()
.iter()
.map(|(_, val)| val.id.clone())
.collect()
} }
/// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`. /// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`.

View File

@ -675,7 +675,7 @@ pub mod tests {
#[test] #[test]
fn test_finalization_boundaries() { fn test_finalization_boundaries() {
let n = 8; let n = 8;
let half = (n / 2) as usize; let half = n / 2;
let mut deposit_cache = get_cache_with_deposits(n as u64); let mut deposit_cache = get_cache_with_deposits(n as u64);
@ -828,9 +828,9 @@ pub mod tests {
// get_log(half+quarter) should return log with index `half+quarter` // get_log(half+quarter) should return log with index `half+quarter`
assert_eq!( assert_eq!(
q3_log_before_finalization.index, q3_log_before_finalization.index,
(half + quarter) as u64, half + quarter,
"log index should be {}", "log index should be {}",
(half + quarter), half + quarter,
); );
// get lower quarter of deposits with max deposit count // get lower quarter of deposits with max deposit count

View File

@ -27,7 +27,7 @@ impl From<jsonwebtoken::errors::Error> for Error {
/// Provides wrapper around `[u8; JWT_SECRET_LENGTH]` that implements `Zeroize`. /// Provides wrapper around `[u8; JWT_SECRET_LENGTH]` that implements `Zeroize`.
#[derive(Zeroize, Clone)] #[derive(Zeroize, Clone)]
#[zeroize(drop)] #[zeroize(drop)]
pub struct JwtKey([u8; JWT_SECRET_LENGTH as usize]); pub struct JwtKey([u8; JWT_SECRET_LENGTH]);
impl JwtKey { impl JwtKey {
/// Wrap given slice in `Self`. Returns an error if slice.len() != `JWT_SECRET_LENGTH`. /// Wrap given slice in `Self`. Returns an error if slice.len() != `JWT_SECRET_LENGTH`.

View File

@ -2840,7 +2840,7 @@ pub fn serve<T: BeaconChainTypes>(
let is_live = let is_live =
chain.validator_seen_at_epoch(index as usize, request_data.epoch); chain.validator_seen_at_epoch(index as usize, request_data.epoch);
api_types::LivenessResponseData { api_types::LivenessResponseData {
index: index as u64, index,
epoch: request_data.epoch, epoch: request_data.epoch,
is_live, is_live,
} }
@ -2876,7 +2876,7 @@ pub fn serve<T: BeaconChainTypes>(
.and_then( .and_then(
|sysinfo, app_start: std::time::Instant, data_dir, network_globals| { |sysinfo, app_start: std::time::Instant, data_dir, network_globals| {
blocking_json_task(move || { blocking_json_task(move || {
let app_uptime = app_start.elapsed().as_secs() as u64; let app_uptime = app_start.elapsed().as_secs();
Ok(api_types::GenericResponse::from(observe_system_health_bn( Ok(api_types::GenericResponse::from(observe_system_health_bn(
sysinfo, sysinfo,
data_dir, data_dir,

View File

@ -186,14 +186,7 @@ impl RealScore {
/// Add an f64 to the score abiding by the limits. /// Add an f64 to the score abiding by the limits.
fn add(&mut self, score: f64) { fn add(&mut self, score: f64) {
let mut new_score = self.lighthouse_score + score; let new_score = (self.lighthouse_score + score).clamp(MIN_SCORE, MAX_SCORE);
if new_score > MAX_SCORE {
new_score = MAX_SCORE;
}
if new_score < MIN_SCORE {
new_score = MIN_SCORE;
}
self.set_lighthouse_score(new_score); self.set_lighthouse_score(new_score);
} }

View File

@ -443,7 +443,7 @@ fn handle_length(
// Note: length-prefix of > 10 bytes(uint64) would be a decoding error // Note: length-prefix of > 10 bytes(uint64) would be a decoding error
match uvi_codec.decode(bytes).map_err(RPCError::from)? { match uvi_codec.decode(bytes).map_err(RPCError::from)? {
Some(length) => { Some(length) => {
*len = Some(length as usize); *len = Some(length);
Ok(Some(length)) Ok(Some(length))
} }
None => Ok(None), // need more bytes to decode length None => Ok(None), // need more bytes to decode length

View File

@ -270,11 +270,11 @@ impl<TSpec: EthSpec> PeerScoreSettings<TSpec> {
let modulo_smaller = max( let modulo_smaller = max(
1, 1,
smaller_committee_size / self.target_aggregators_per_committee as usize, smaller_committee_size / self.target_aggregators_per_committee,
); );
let modulo_larger = max( let modulo_larger = max(
1, 1,
(smaller_committee_size + 1) / self.target_aggregators_per_committee as usize, (smaller_committee_size + 1) / self.target_aggregators_per_committee,
); );
Ok(( Ok((

View File

@ -88,7 +88,7 @@ fn keypair_from_hex(hex_bytes: &str) -> error::Result<Keypair> {
hex_bytes.to_string() hex_bytes.to_string()
}; };
hex::decode(&hex_bytes) hex::decode(hex_bytes)
.map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into()) .map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into())
.and_then(keypair_from_bytes) .and_then(keypair_from_bytes)
} }

View File

@ -49,7 +49,7 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> {
let indices = get_attesting_indices::<T>(committee.committee, &fresh_validators).ok()?; let indices = get_attesting_indices::<T>(committee.committee, &fresh_validators).ok()?;
let fresh_validators_rewards: HashMap<u64, u64> = indices let fresh_validators_rewards: HashMap<u64, u64> = indices
.iter() .iter()
.map(|i| *i as u64) .copied()
.flat_map(|validator_index| { .flat_map(|validator_index| {
let reward = base::get_base_reward( let reward = base::get_base_reward(
state, state,

View File

@ -801,7 +801,7 @@ mod test {
fn needs_genesis_value_test_randao<F: Field<TestSpec>>(_: F) { fn needs_genesis_value_test_randao<F: Field<TestSpec>>(_: F) {
let spec = &TestSpec::default_spec(); let spec = &TestSpec::default_spec();
let max = TestSpec::slots_per_epoch() as u64 * (F::Length::to_u64() - 1); let max = TestSpec::slots_per_epoch() * (F::Length::to_u64() - 1);
for i in 0..max { for i in 0..max {
assert!( assert!(
F::slot_needs_genesis_value(Slot::new(i), spec), F::slot_needs_genesis_value(Slot::new(i), spec),

View File

@ -189,7 +189,7 @@ impl ValidatorDefinitions {
.write(true) .write(true)
.read(true) .read(true)
.create_new(false) .create_new(false)
.open(&config_path) .open(config_path)
.map_err(Error::UnableToOpenFile)?; .map_err(Error::UnableToOpenFile)?;
serde_yaml::from_reader(file).map_err(Error::UnableToParseFile) serde_yaml::from_reader(file).map_err(Error::UnableToParseFile)
} }

View File

@ -196,7 +196,7 @@ impl<'a> Builder<'a> {
if path.exists() { if path.exists() {
return Err(Error::DepositDataAlreadyExists(path)); return Err(Error::DepositDataAlreadyExists(path));
} else { } else {
let hex = format!("0x{}", hex::encode(&deposit_data)); let hex = format!("0x{}", hex::encode(deposit_data));
File::options() File::options()
.write(true) .write(true)
.read(true) .read(true)

View File

@ -63,15 +63,15 @@ mod test {
#[test] #[test]
fn encoding() { fn encoding() {
let bytes = vec![0, 255]; let bytes = vec![0, 255];
let hex = encode(&bytes); let hex = encode(bytes);
assert_eq!(hex.as_str(), "0x00ff"); assert_eq!(hex.as_str(), "0x00ff");
let bytes = vec![]; let bytes = vec![];
let hex = encode(&bytes); let hex = encode(bytes);
assert_eq!(hex.as_str(), "0x"); assert_eq!(hex.as_str(), "0x");
let bytes = vec![1, 2, 3]; let bytes = vec![1, 2, 3];
let hex = encode(&bytes); let hex = encode(bytes);
assert_eq!(hex.as_str(), "0x010203"); assert_eq!(hex.as_str(), "0x010203");
} }
} }

View File

@ -36,7 +36,7 @@ impl<'de> Visitor<'de> for QuantityVisitor {
} else if stripped.starts_with('0') { } else if stripped.starts_with('0') {
Err(de::Error::custom("cannot have leading zero")) Err(de::Error::custom("cannot have leading zero"))
} else if stripped.len() % 2 != 0 { } else if stripped.len() % 2 != 0 {
hex::decode(&format!("0{}", stripped)) hex::decode(format!("0{}", stripped))
.map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))
} else { } else {
hex::decode(stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) hex::decode(stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))

View File

@ -52,10 +52,10 @@ pub fn process_sync_aggregate<T: EthSpec>(
.zip(aggregate.sync_committee_bits.iter()) .zip(aggregate.sync_committee_bits.iter())
{ {
if participation_bit { if participation_bit {
increase_balance(state, participant_index as usize, participant_reward)?; increase_balance(state, participant_index, participant_reward)?;
increase_balance(state, proposer_index as usize, proposer_reward)?; increase_balance(state, proposer_index as usize, proposer_reward)?;
} else { } else {
decrease_balance(state, participant_index as usize, participant_reward)?; decrease_balance(state, participant_index, participant_reward)?;
} }
} }

View File

@ -76,7 +76,7 @@ pub fn get_flag_index_deltas<T: EthSpec>(
let base_reward = get_base_reward(state, index, base_reward_per_increment, spec)?; let base_reward = get_base_reward(state, index, base_reward_per_increment, spec)?;
let mut delta = Delta::default(); let mut delta = Delta::default();
if unslashed_participating_indices.contains(index as usize)? { if unslashed_participating_indices.contains(index)? {
if !state.is_in_inactivity_leak(previous_epoch, spec) { if !state.is_in_inactivity_leak(previous_epoch, spec) {
let reward_numerator = base_reward let reward_numerator = base_reward
.safe_mul(weight)? .safe_mul(weight)?
@ -89,8 +89,8 @@ pub fn get_flag_index_deltas<T: EthSpec>(
delta.penalize(base_reward.safe_mul(weight)?.safe_div(WEIGHT_DENOMINATOR)?)?; delta.penalize(base_reward.safe_mul(weight)?.safe_div(WEIGHT_DENOMINATOR)?)?;
} }
deltas deltas
.get_mut(index as usize) .get_mut(index)
.ok_or(Error::DeltaOutOfBounds(index as usize))? .ok_or(Error::DeltaOutOfBounds(index))?
.combine(delta)?; .combine(delta)?;
} }
Ok(()) Ok(())

View File

@ -235,7 +235,7 @@ fn get_inclusion_delay_delta(
let max_attester_reward = base_reward.safe_sub(proposer_reward)?; let max_attester_reward = base_reward.safe_sub(proposer_reward)?;
delta.reward(max_attester_reward.safe_div(inclusion_info.delay)?)?; delta.reward(max_attester_reward.safe_div(inclusion_info.delay)?)?;
let proposer_index = inclusion_info.proposer_index as usize; let proposer_index = inclusion_info.proposer_index;
Ok((delta, Some((proposer_index, proposer_delta)))) Ok((delta, Some((proposer_index, proposer_delta))))
} else { } else {
Ok((Delta::default(), None)) Ok((Delta::default(), None))

View File

@ -482,7 +482,7 @@ impl<T: EthSpec> BeaconState<T> {
/// Spec v0.12.1 /// Spec v0.12.1
pub fn get_committee_count_at_slot(&self, slot: Slot) -> Result<u64, Error> { pub fn get_committee_count_at_slot(&self, slot: Slot) -> Result<u64, Error> {
let cache = self.committee_cache_at_slot(slot)?; let cache = self.committee_cache_at_slot(slot)?;
Ok(cache.committees_per_slot() as u64) Ok(cache.committees_per_slot())
} }
/// Compute the number of committees in an entire epoch. /// Compute the number of committees in an entire epoch.

View File

@ -144,7 +144,7 @@ impl CommitteeCache {
self.committees_per_slot as usize, self.committees_per_slot as usize,
index as usize, index as usize,
); );
let committee = self.compute_committee(committee_index as usize)?; let committee = self.compute_committee(committee_index)?;
Some(BeaconCommittee { Some(BeaconCommittee {
slot, slot,

View File

@ -344,12 +344,7 @@ mod committees {
let cache_epoch = cache_epoch.into_epoch(state_epoch); let cache_epoch = cache_epoch.into_epoch(state_epoch);
execute_committee_consistency_test( execute_committee_consistency_test(new_head_state, cache_epoch, validator_count, spec);
new_head_state,
cache_epoch,
validator_count as usize,
spec,
);
} }
async fn committee_consistency_test_suite<T: EthSpec>(cached_epoch: RelativeEpoch) { async fn committee_consistency_test_suite<T: EthSpec>(cached_epoch: RelativeEpoch) {
@ -361,18 +356,13 @@ mod committees {
.mul(spec.target_committee_size) .mul(spec.target_committee_size)
.add(1); .add(1);
committee_consistency_test::<T>(validator_count as usize, Epoch::new(0), cached_epoch) committee_consistency_test::<T>(validator_count, Epoch::new(0), cached_epoch).await;
committee_consistency_test::<T>(validator_count, T::genesis_epoch() + 4, cached_epoch)
.await; .await;
committee_consistency_test::<T>( committee_consistency_test::<T>(
validator_count as usize, validator_count,
T::genesis_epoch() + 4,
cached_epoch,
)
.await;
committee_consistency_test::<T>(
validator_count as usize,
T::genesis_epoch() T::genesis_epoch()
+ (T::slots_per_historical_root() as u64) + (T::slots_per_historical_root() as u64)
.mul(T::slots_per_epoch()) .mul(T::slots_per_epoch())

View File

@ -202,7 +202,7 @@ mod test {
} }
fn preset_from_file<T: DeserializeOwned>(preset_name: &str, filename: &str) -> T { fn preset_from_file<T: DeserializeOwned>(preset_name: &str, filename: &str) -> T {
let f = File::open(&presets_base_path().join(preset_name).join(filename)) let f = File::open(presets_base_path().join(preset_name).join(filename))
.expect("preset file exists"); .expect("preset file exists");
serde_yaml::from_reader(f).unwrap() serde_yaml::from_reader(f).unwrap()
} }

View File

@ -39,8 +39,8 @@ fn double_vote_multi_vals() {
fn double_vote_some_vals() { fn double_vote_some_vals() {
let v1 = vec![0, 1, 2, 3, 4, 5, 6]; let v1 = vec![0, 1, 2, 3, 4, 5, 6];
let v2 = vec![0, 2, 4, 6]; let v2 = vec![0, 2, 4, 6];
let att1 = indexed_att(&v1, 0, 1, 0); let att1 = indexed_att(v1, 0, 1, 0);
let att2 = indexed_att(&v2, 0, 1, 1); let att2 = indexed_att(v2, 0, 1, 1);
let slashings = hashset![att_slashing(&att1, &att2)]; let slashings = hashset![att_slashing(&att1, &att2)];
let attestations = vec![att1, att2]; let attestations = vec![att1, att2];
slasher_test_indiv(&attestations, &slashings, 1); slasher_test_indiv(&attestations, &slashings, 1);
@ -53,9 +53,9 @@ fn double_vote_some_vals_repeat() {
let v1 = vec![0, 1, 2, 3, 4, 5, 6]; let v1 = vec![0, 1, 2, 3, 4, 5, 6];
let v2 = vec![0, 2, 4, 6]; let v2 = vec![0, 2, 4, 6];
let v3 = vec![1, 3, 5]; let v3 = vec![1, 3, 5];
let att1 = indexed_att(&v1, 0, 1, 0); let att1 = indexed_att(v1, 0, 1, 0);
let att2 = indexed_att(&v2, 0, 1, 1); let att2 = indexed_att(v2, 0, 1, 1);
let att3 = indexed_att(&v3, 0, 1, 0); let att3 = indexed_att(v3, 0, 1, 0);
let slashings = hashset![att_slashing(&att1, &att2)]; let slashings = hashset![att_slashing(&att1, &att2)];
let attestations = vec![att1, att2, att3]; let attestations = vec![att1, att2, att3];
slasher_test_indiv(&attestations, &slashings, 1); slasher_test_indiv(&attestations, &slashings, 1);
@ -67,8 +67,8 @@ fn double_vote_some_vals_repeat() {
fn no_double_vote_same_target() { fn no_double_vote_same_target() {
let v1 = vec![0, 1, 2, 3, 4, 5, 6]; let v1 = vec![0, 1, 2, 3, 4, 5, 6];
let v2 = vec![0, 1, 2, 3, 4, 5, 7, 8]; let v2 = vec![0, 1, 2, 3, 4, 5, 7, 8];
let att1 = indexed_att(&v1, 0, 1, 0); let att1 = indexed_att(v1, 0, 1, 0);
let att2 = indexed_att(&v2, 0, 1, 0); let att2 = indexed_att(v2, 0, 1, 0);
let attestations = vec![att1, att2]; let attestations = vec![att1, att2];
slasher_test_indiv(&attestations, &hashset! {}, 1); slasher_test_indiv(&attestations, &hashset! {}, 1);
slasher_test_indiv(&attestations, &hashset! {}, 1000); slasher_test_indiv(&attestations, &hashset! {}, 1000);
@ -79,8 +79,8 @@ fn no_double_vote_same_target() {
fn no_double_vote_distinct_vals() { fn no_double_vote_distinct_vals() {
let v1 = vec![0, 1, 2, 3]; let v1 = vec![0, 1, 2, 3];
let v2 = vec![4, 5, 6, 7]; let v2 = vec![4, 5, 6, 7];
let att1 = indexed_att(&v1, 0, 1, 0); let att1 = indexed_att(v1, 0, 1, 0);
let att2 = indexed_att(&v2, 0, 1, 1); let att2 = indexed_att(v2, 0, 1, 1);
let attestations = vec![att1, att2]; let attestations = vec![att1, att2];
slasher_test_indiv(&attestations, &hashset! {}, 1); slasher_test_indiv(&attestations, &hashset! {}, 1);
slasher_test_indiv(&attestations, &hashset! {}, 1000); slasher_test_indiv(&attestations, &hashset! {}, 1000);
@ -89,7 +89,7 @@ fn no_double_vote_distinct_vals() {
#[test] #[test]
fn no_double_vote_repeated() { fn no_double_vote_repeated() {
let v = vec![0, 1, 2, 3, 4]; let v = vec![0, 1, 2, 3, 4];
let att1 = indexed_att(&v, 0, 1, 0); let att1 = indexed_att(v, 0, 1, 0);
let att2 = att1.clone(); let att2 = att1.clone();
let attestations = vec![att1, att2]; let attestations = vec![att1, att2];
slasher_test_indiv(&attestations, &hashset! {}, 1); slasher_test_indiv(&attestations, &hashset! {}, 1);

View File

@ -76,7 +76,7 @@ impl GenericExecutionEngine for NethermindEngine {
fn init_datadir() -> TempDir { fn init_datadir() -> TempDir {
let datadir = TempDir::new().unwrap(); let datadir = TempDir::new().unwrap();
let genesis_json_path = datadir.path().join("genesis.json"); let genesis_json_path = datadir.path().join("genesis.json");
let mut file = File::create(&genesis_json_path).unwrap(); let mut file = File::create(genesis_json_path).unwrap();
let json = nethermind_genesis_json(); let json = nethermind_genesis_json();
serde_json::to_writer(&mut file, &json).unwrap(); serde_json::to_writer(&mut file, &json).unwrap();
datadir datadir

View File

@ -231,7 +231,7 @@ impl<E: EthSpec> LocalExecutionNode<E> {
.tempdir() .tempdir()
.expect("should create temp directory for client datadir"); .expect("should create temp directory for client datadir");
let jwt_file_path = datadir.path().join("jwt.hex"); let jwt_file_path = datadir.path().join("jwt.hex");
if let Err(e) = std::fs::write(&jwt_file_path, config.jwt_key.hex_string()) { if let Err(e) = std::fs::write(jwt_file_path, config.jwt_key.hex_string()) {
panic!("Failed to write jwt file {}", e); panic!("Failed to write jwt file {}", e);
} }
Self { Self {

View File

@ -441,7 +441,7 @@ impl DoppelgangerService {
} }
// Get a list of indices to provide to the BN API. // Get a list of indices to provide to the BN API.
let indices_only = indices_map.iter().map(|(index, _)| *index).collect(); let indices_only = indices_map.keys().copied().collect();
// Pull the liveness responses from the BN. // Pull the liveness responses from the BN.
let request_epoch = request_slot.epoch(E::slots_per_epoch()); let request_epoch = request_slot.epoch(E::slots_per_epoch());
@ -971,16 +971,16 @@ mod test {
LivenessResponses { LivenessResponses {
current_epoch_responses: detection_indices current_epoch_responses: detection_indices
.iter() .iter()
.map(|i| LivenessResponseData { .map(|&index| LivenessResponseData {
index: *i as u64, index,
epoch: current_epoch, epoch: current_epoch,
is_live: false, is_live: false,
}) })
.collect(), .collect(),
previous_epoch_responses: detection_indices previous_epoch_responses: detection_indices
.iter() .iter()
.map(|i| LivenessResponseData { .map(|&index| LivenessResponseData {
index: *i as u64, index,
epoch: current_epoch - 1, epoch: current_epoch - 1,
is_live: false, is_live: false,
}) })

View File

@ -331,7 +331,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
.and(signer.clone()) .and(signer.clone())
.and_then(|sysinfo, app_start: std::time::Instant, val_dir, signer| { .and_then(|sysinfo, app_start: std::time::Instant, val_dir, signer| {
blocking_signed_json_task(signer, move || { blocking_signed_json_task(signer, move || {
let app_uptime = app_start.elapsed().as_secs() as u64; let app_uptime = app_start.elapsed().as_secs();
Ok(api_types::GenericResponse::from(observe_system_health_vc( Ok(api_types::GenericResponse::from(observe_system_health_vc(
sysinfo, val_dir, app_uptime, sysinfo, val_dir, app_uptime,
))) )))

View File

@ -472,7 +472,7 @@ impl InitializedValidators {
/// Iterate through all voting public keys in `self` that should be used when querying for duties. /// Iterate through all voting public keys in `self` that should be used when querying for duties.
pub fn iter_voting_pubkeys(&self) -> impl Iterator<Item = &PublicKeyBytes> { pub fn iter_voting_pubkeys(&self) -> impl Iterator<Item = &PublicKeyBytes> {
self.validators.iter().map(|(pubkey, _)| pubkey) self.validators.keys()
} }
/// Returns the voting `Keypair` for a given voting `PublicKey`, if all are true: /// Returns the voting `Keypair` for a given voting `PublicKey`, if all are true:

View File

@ -104,7 +104,7 @@ impl KeyCache {
let file = File::options() let file = File::options()
.read(true) .read(true)
.create_new(false) .create_new(false)
.open(&cache_path) .open(cache_path)
.map_err(Error::UnableToOpenFile)?; .map_err(Error::UnableToOpenFile)?;
serde_json::from_reader(file).map_err(Error::UnableToParseFile) serde_json::from_reader(file).map_err(Error::UnableToParseFile)
} }