1.57.0 lints (#2850)

## Issue Addressed

New rust lints

## Proposed Changes

- Boxing some enum variants
- removing some unused fields (is the validator lockfile unused? seemed so to me)

## Additional Info

- some error fields were marked as dead code but are logged out in areas
- left some dead fields in our ef test code because I assume they are useful for debugging?

Co-authored-by: realbigsean <seananderson33@gmail.com>
This commit is contained in:
realbigsean 2021-12-03 04:44:30 +00:00
parent f3c237cfa0
commit a80ccc3a33
22 changed files with 64 additions and 56 deletions

View File

@ -1602,7 +1602,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// This method is called for API and gossip attestations, so this covers all unaggregated attestation events // This method is called for API and gossip attestations, so this covers all unaggregated attestation events
if let Some(event_handler) = self.event_handler.as_ref() { if let Some(event_handler) = self.event_handler.as_ref() {
if event_handler.has_attestation_subscribers() { if event_handler.has_attestation_subscribers() {
event_handler.register(EventKind::Attestation(v.attestation().clone())); event_handler
.register(EventKind::Attestation(Box::new(v.attestation().clone())));
} }
} }
metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_SUCCESSES); metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_SUCCESSES);
@ -1638,7 +1639,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// This method is called for API and gossip attestations, so this covers all aggregated attestation events // This method is called for API and gossip attestations, so this covers all aggregated attestation events
if let Some(event_handler) = self.event_handler.as_ref() { if let Some(event_handler) = self.event_handler.as_ref() {
if event_handler.has_attestation_subscribers() { if event_handler.has_attestation_subscribers() {
event_handler.register(EventKind::Attestation(v.attestation().clone())); event_handler
.register(EventKind::Attestation(Box::new(v.attestation().clone())));
} }
} }
metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_PROCESSING_SUCCESSES); metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_PROCESSING_SUCCESSES);

View File

@ -41,9 +41,17 @@ const MAX_ADVANCE_DISTANCE: u64 = 4;
enum Error { enum Error {
BeaconChain(BeaconChainError), BeaconChain(BeaconChainError),
HeadMissingFromSnapshotCache(Hash256), HeadMissingFromSnapshotCache(Hash256),
MaxDistanceExceeded { current_slot: Slot, head_slot: Slot }, MaxDistanceExceeded {
StateAlreadyAdvanced { block_root: Hash256 }, current_slot: Slot,
BadStateSlot { state_slot: Slot, block_slot: Slot }, head_slot: Slot,
},
StateAlreadyAdvanced {
block_root: Hash256,
},
BadStateSlot {
_state_slot: Slot,
_block_slot: Slot,
},
} }
impl From<BeaconChainError> for Error { impl From<BeaconChainError> for Error {
@ -224,8 +232,8 @@ fn advance_head<T: BeaconChainTypes>(
// Advancing more than one slot without storing the intermediate state would corrupt the // Advancing more than one slot without storing the intermediate state would corrupt the
// database. Future works might store temporary, intermediate states inside this function. // database. Future works might store temporary, intermediate states inside this function.
return Err(Error::BadStateSlot { return Err(Error::BadStateSlot {
block_slot: head_slot, _block_slot: head_slot,
state_slot: state.slot(), _state_slot: state.slot(),
}); });
}; };

View File

@ -243,8 +243,8 @@ enum Error {
/// The file read from disk does not have a contiguous list of validator public keys. The file /// The file read from disk does not have a contiguous list of validator public keys. The file
/// has become corrupted. /// has become corrupted.
InconsistentIndex { InconsistentIndex {
expected: Option<usize>, _expected: Option<usize>,
found: usize, _found: usize,
}, },
} }
@ -296,8 +296,8 @@ impl ValidatorPubkeyCacheFile {
indices.insert(pubkey, index); indices.insert(pubkey, index);
} else { } else {
return Err(Error::InconsistentIndex { return Err(Error::InconsistentIndex {
expected, _expected: expected,
found: index, _found: index,
}); });
} }
} }

View File

@ -2310,7 +2310,7 @@ impl ApiTester {
self.attestations self.attestations
.clone() .clone()
.into_iter() .into_iter()
.map(|attestation| EventKind::Attestation(attestation)) .map(|attestation| EventKind::Attestation(Box::new(attestation)))
.collect::<Vec<_>>() .collect::<Vec<_>>()
.as_slice() .as_slice()
); );

View File

@ -63,7 +63,7 @@ use types::{
SyncCommitteeMessage, SyncSubnetId, SyncCommitteeMessage, SyncSubnetId,
}; };
use work_reprocessing_queue::{ use work_reprocessing_queue::{
spawn_reprocess_scheduler, QueuedAggregate, QueuedBlock, QueuedUnaggregate, ReadyWork, spawn_reprocess_scheduler, QueuedAggregate, QueuedUnaggregate, ReadyWork,
}; };
use worker::{Toolbox, Worker}; use worker::{Toolbox, Worker};
@ -72,6 +72,7 @@ mod tests;
mod work_reprocessing_queue; mod work_reprocessing_queue;
mod worker; mod worker;
use crate::beacon_processor::work_reprocessing_queue::QueuedBlock;
pub use worker::{GossipAggregatePackage, GossipAttestationPackage, ProcessId}; pub use worker::{GossipAggregatePackage, GossipAttestationPackage, ProcessId};
/// The maximum size of the channel for work events to the `BeaconProcessor`. /// The maximum size of the channel for work events to the `BeaconProcessor`.
@ -574,7 +575,7 @@ impl<T: BeaconChainTypes> std::convert::From<ReadyWork<T>> for WorkEvent<T> {
drop_during_sync: false, drop_during_sync: false,
work: Work::DelayedImportBlock { work: Work::DelayedImportBlock {
peer_id, peer_id,
block: Box::new(block), block,
seen_timestamp, seen_timestamp,
}, },
}, },

View File

@ -91,7 +91,7 @@ pub struct QueuedAggregate<T: EthSpec> {
/// A block that arrived early and has been queued for later import. /// A block that arrived early and has been queued for later import.
pub struct QueuedBlock<T: BeaconChainTypes> { pub struct QueuedBlock<T: BeaconChainTypes> {
pub peer_id: PeerId, pub peer_id: PeerId,
pub block: GossipVerifiedBlock<T>, pub block: Box<GossipVerifiedBlock<T>>,
pub seen_timestamp: Duration, pub seen_timestamp: Duration,
} }

View File

@ -121,7 +121,6 @@ pub struct GossipAttestationPackage<E: EthSpec> {
peer_id: PeerId, peer_id: PeerId,
attestation: Box<Attestation<E>>, attestation: Box<Attestation<E>>,
subnet_id: SubnetId, subnet_id: SubnetId,
beacon_block_root: Hash256,
should_import: bool, should_import: bool,
seen_timestamp: Duration, seen_timestamp: Duration,
} }
@ -138,7 +137,6 @@ impl<E: EthSpec> GossipAttestationPackage<E> {
Self { Self {
message_id, message_id,
peer_id, peer_id,
beacon_block_root: attestation.data.beacon_block_root,
attestation, attestation,
subnet_id, subnet_id,
should_import, should_import,
@ -830,7 +828,7 @@ impl<T: BeaconChainTypes> Worker<T> {
if reprocess_tx if reprocess_tx
.try_send(ReprocessQueueMessage::EarlyBlock(QueuedBlock { .try_send(ReprocessQueueMessage::EarlyBlock(QueuedBlock {
peer_id, peer_id,
block: verified_block, block: Box::new(verified_block),
seen_timestamp: seen_duration, seen_timestamp: seen_duration,
})) }))
.is_err() .is_err()

View File

@ -226,7 +226,7 @@ pub fn get_config<E: EthSpec>(
client_config.sync_eth1_chain = true; client_config.sync_eth1_chain = true;
client_config.eth1.endpoints = endpoints client_config.eth1.endpoints = endpoints
.split(',') .split(',')
.map(|s| SensitiveUrl::parse(s)) .map(SensitiveUrl::parse)
.collect::<Result<_, _>>() .collect::<Result<_, _>>()
.map_err(|e| format!("eth1-endpoints contains an invalid URL {:?}", e))?; .map_err(|e| format!("eth1-endpoints contains an invalid URL {:?}", e))?;
} }
@ -245,7 +245,7 @@ pub fn get_config<E: EthSpec>(
client_config.sync_eth1_chain = true; client_config.sync_eth1_chain = true;
client_config.execution_endpoints = endpoints client_config.execution_endpoints = endpoints
.split(',') .split(',')
.map(|s| SensitiveUrl::parse(s)) .map(SensitiveUrl::parse)
.collect::<Result<_, _>>() .collect::<Result<_, _>>()
.map(Some) .map(Some)
.map_err(|e| format!("execution-endpoints contains an invalid URL {:?}", e))?; .map_err(|e| format!("execution-endpoints contains an invalid URL {:?}", e))?;

View File

@ -777,7 +777,7 @@ pub struct SseLateHead {
#[derive(PartialEq, Debug, Serialize, Clone)] #[derive(PartialEq, Debug, Serialize, Clone)]
#[serde(bound = "T: EthSpec", untagged)] #[serde(bound = "T: EthSpec", untagged)]
pub enum EventKind<T: EthSpec> { pub enum EventKind<T: EthSpec> {
Attestation(Attestation<T>), Attestation(Box<Attestation<T>>),
Block(SseBlock), Block(SseBlock),
FinalizedCheckpoint(SseFinalizedCheckpoint), FinalizedCheckpoint(SseFinalizedCheckpoint),
Head(SseHead), Head(SseHead),

View File

@ -11,7 +11,7 @@ use std::path::{Path, PathBuf};
/// outage) caused the lockfile not to be deleted. /// outage) caused the lockfile not to be deleted.
#[derive(Debug)] #[derive(Debug)]
pub struct Lockfile { pub struct Lockfile {
file: File, _file: File,
path: PathBuf, path: PathBuf,
file_existed: bool, file_existed: bool,
} }
@ -43,7 +43,7 @@ impl Lockfile {
_ => LockfileError::IoError(path.clone(), e), _ => LockfileError::IoError(path.clone(), e),
})?; })?;
Ok(Self { Ok(Self {
file, _file: file,
path, path,
file_existed, file_existed,
}) })

View File

@ -99,7 +99,7 @@ impl<'a> AlignedRecordDecorator<'a> {
impl<'a> Write for AlignedRecordDecorator<'a> { impl<'a> Write for AlignedRecordDecorator<'a> {
fn write(&mut self, buf: &[u8]) -> Result<usize> { fn write(&mut self, buf: &[u8]) -> Result<usize> {
if buf.iter().any(|c| is_ascii_control(c)) { if buf.iter().any(u8::is_ascii_control) {
let filtered = buf let filtered = buf
.iter() .iter()
.cloned() .cloned()

View File

@ -128,7 +128,7 @@ impl MonitoringHttpClient {
Error::BeaconMetricsFailed("Beacon metrics require db path".to_string()) Error::BeaconMetricsFailed("Beacon metrics require db path".to_string())
})?; })?;
let freezer_db_path = self.db_path.as_ref().ok_or_else(|| { let freezer_db_path = self.freezer_db_path.as_ref().ok_or_else(|| {
Error::BeaconMetricsFailed("Beacon metrics require freezer db path".to_string()) Error::BeaconMetricsFailed("Beacon metrics require freezer db path".to_string())
})?; })?;
let metrics = let metrics =

View File

@ -63,7 +63,7 @@ pub struct Eth1DepositData {
pub struct ValidatorDir { pub struct ValidatorDir {
dir: PathBuf, dir: PathBuf,
#[derivative(PartialEq = "ignore")] #[derivative(PartialEq = "ignore")]
lockfile: Lockfile, _lockfile: Lockfile,
} }
impl ValidatorDir { impl ValidatorDir {
@ -85,7 +85,10 @@ impl ValidatorDir {
let lockfile_path = dir.join(format!("{}.lock", VOTING_KEYSTORE_FILE)); let lockfile_path = dir.join(format!("{}.lock", VOTING_KEYSTORE_FILE));
let lockfile = Lockfile::new(lockfile_path).map_err(Error::LockfileError)?; let lockfile = Lockfile::new(lockfile_path).map_err(Error::LockfileError)?;
Ok(Self { dir, lockfile }) Ok(Self {
dir,
_lockfile: lockfile,
})
} }
/// Returns the `dir` provided to `Self::open`. /// Returns the `dir` provided to `Self::open`.

View File

@ -491,8 +491,8 @@ mod tests {
subs.push(sub); subs.push(sub);
} }
for mut sub in subs.iter_mut() { for sub in subs.iter_mut() {
test_routine(arena, &mut sub); test_routine(arena, sub);
} }
} }
} }

View File

@ -113,7 +113,7 @@ impl<'a> SszEncoder<'a> {
F: Fn(&mut Vec<u8>), F: Fn(&mut Vec<u8>),
{ {
if is_ssz_fixed_len { if is_ssz_fixed_len {
ssz_append(&mut self.buf); ssz_append(self.buf);
} else { } else {
self.buf self.buf
.extend_from_slice(&encode_length(self.offset + self.variable_bytes.len())); .extend_from_slice(&encode_length(self.offset + self.variable_bytes.len()));
@ -129,7 +129,7 @@ impl<'a> SszEncoder<'a> {
pub fn finalize(&mut self) -> &mut Vec<u8> { pub fn finalize(&mut self) -> &mut Vec<u8> {
self.buf.append(&mut self.variable_bytes); self.buf.append(&mut self.variable_bytes);
&mut self.buf self.buf
} }
} }

View File

@ -187,14 +187,13 @@ fn valid_4_deposits() {
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT);
let mut state = harness.get_current_state(); let mut state = harness.get_current_state();
let (deposits, mut state) = harness.make_deposits(&mut state, 4, None, None); let (deposits, state) = harness.make_deposits(&mut state, 4, None, None);
let deposits = VariableList::from(deposits); let deposits = VariableList::from(deposits);
let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0;
*head_block.to_mut().body_mut().deposits_mut() = deposits; *head_block.to_mut().body_mut().deposits_mut() = deposits;
let result = let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec);
// Expecting Ok because these are valid deposits. // Expecting Ok because these are valid deposits.
assert_eq!(result, Ok(())); assert_eq!(result, Ok(()));
@ -206,7 +205,7 @@ fn invalid_deposit_deposit_count_too_big() {
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT);
let mut state = harness.get_current_state(); let mut state = harness.get_current_state();
let (deposits, mut state) = harness.make_deposits(&mut state, 1, None, None); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None);
let deposits = VariableList::from(deposits); let deposits = VariableList::from(deposits);
let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0;
@ -214,8 +213,7 @@ fn invalid_deposit_deposit_count_too_big() {
let big_deposit_count = NUM_DEPOSITS + 1; let big_deposit_count = NUM_DEPOSITS + 1;
state.eth1_data_mut().deposit_count = big_deposit_count; state.eth1_data_mut().deposit_count = big_deposit_count;
let result = let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec);
// Expecting DepositCountInvalid because we incremented the deposit_count // Expecting DepositCountInvalid because we incremented the deposit_count
assert_eq!( assert_eq!(
@ -233,7 +231,7 @@ fn invalid_deposit_count_too_small() {
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT);
let mut state = harness.get_current_state(); let mut state = harness.get_current_state();
let (deposits, mut state) = harness.make_deposits(&mut state, 1, None, None); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None);
let deposits = VariableList::from(deposits); let deposits = VariableList::from(deposits);
let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0;
@ -241,8 +239,7 @@ fn invalid_deposit_count_too_small() {
let small_deposit_count = NUM_DEPOSITS - 1; let small_deposit_count = NUM_DEPOSITS - 1;
state.eth1_data_mut().deposit_count = small_deposit_count; state.eth1_data_mut().deposit_count = small_deposit_count;
let result = let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec);
// Expecting DepositCountInvalid because we decremented the deposit_count // Expecting DepositCountInvalid because we decremented the deposit_count
assert_eq!( assert_eq!(
@ -260,7 +257,7 @@ fn invalid_deposit_bad_merkle_proof() {
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT);
let mut state = harness.get_current_state(); let mut state = harness.get_current_state();
let (deposits, mut state) = harness.make_deposits(&mut state, 1, None, None); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None);
let deposits = VariableList::from(deposits); let deposits = VariableList::from(deposits);
let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0;
@ -270,8 +267,7 @@ fn invalid_deposit_bad_merkle_proof() {
// Manually offsetting deposit count and index to trigger bad merkle proof // Manually offsetting deposit count and index to trigger bad merkle proof
state.eth1_data_mut().deposit_count += 1; state.eth1_data_mut().deposit_count += 1;
*state.eth1_deposit_index_mut() += 1; *state.eth1_deposit_index_mut() += 1;
let result = let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec);
// Expecting BadMerkleProof because the proofs were created with different indices // Expecting BadMerkleProof because the proofs were created with different indices
assert_eq!( assert_eq!(
@ -289,15 +285,14 @@ fn invalid_deposit_wrong_sig() {
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT);
let mut state = harness.get_current_state(); let mut state = harness.get_current_state();
let (deposits, mut state) = let (deposits, state) =
harness.make_deposits(&mut state, 1, None, Some(SignatureBytes::empty())); harness.make_deposits(&mut state, 1, None, Some(SignatureBytes::empty()));
let deposits = VariableList::from(deposits); let deposits = VariableList::from(deposits);
let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0;
*head_block.to_mut().body_mut().deposits_mut() = deposits; *head_block.to_mut().body_mut().deposits_mut() = deposits;
let result = let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec);
// Expecting Ok(()) even though the block signature does not correspond to the correct public key // Expecting Ok(()) even though the block signature does not correspond to the correct public key
assert_eq!(result, Ok(())); assert_eq!(result, Ok(()));
} }
@ -308,15 +303,14 @@ fn invalid_deposit_invalid_pub_key() {
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT);
let mut state = harness.get_current_state(); let mut state = harness.get_current_state();
let (deposits, mut state) = let (deposits, state) =
harness.make_deposits(&mut state, 1, Some(PublicKeyBytes::empty()), None); harness.make_deposits(&mut state, 1, Some(PublicKeyBytes::empty()), None);
let deposits = VariableList::from(deposits); let deposits = VariableList::from(deposits);
let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0;
*head_block.to_mut().body_mut().deposits_mut() = deposits; *head_block.to_mut().body_mut().deposits_mut() = deposits;
let result = let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec);
// Expecting Ok(()) even though we passed in invalid publickeybytes in the public key field of the deposit data. // Expecting Ok(()) even though we passed in invalid publickeybytes in the public key field of the deposit data.
assert_eq!(result, Ok(())); assert_eq!(result, Ok(()));

View File

@ -62,7 +62,8 @@ pub enum Step<B, A, P> {
#[derive(Debug, Clone, Deserialize)] #[derive(Debug, Clone, Deserialize)]
#[serde(deny_unknown_fields)] #[serde(deny_unknown_fields)]
pub struct Meta { pub struct Meta {
description: String, #[serde(rename(deserialize = "description"))]
_description: String,
} }
#[derive(Debug)] #[derive(Debug)]

View File

@ -7,7 +7,8 @@ use types::{BeaconState, EthSpec, ForkName};
#[derive(Debug, Clone, Deserialize)] #[derive(Debug, Clone, Deserialize)]
pub struct Metadata { pub struct Metadata {
description: String, #[serde(rename(deserialize = "description"))]
_description: String,
} }
#[derive(Debug, Clone, Deserialize)] #[derive(Debug, Clone, Deserialize)]

View File

@ -15,7 +15,8 @@ use types::{BitList, BitVector, FixedVector, ForkName, VariableList};
#[derive(Debug, Clone, Deserialize)] #[derive(Debug, Clone, Deserialize)]
struct Metadata { struct Metadata {
root: String, root: String,
signing_root: Option<String>, #[serde(rename(deserialize = "signing_root"))]
_signing_root: Option<String>,
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]

View File

@ -10,7 +10,8 @@ use types::{BeaconBlock, BeaconState, ForkName, Hash256, SignedBeaconBlock};
#[derive(Debug, Clone, Deserialize)] #[derive(Debug, Clone, Deserialize)]
struct SszStaticRoots { struct SszStaticRoots {
root: String, root: String,
signing_root: Option<String>, #[serde(rename(deserialize = "signing_root"))]
_signing_root: Option<String>,
} }
/// Runner for types that implement `ssz::Decode`. /// Runner for types that implement `ssz::Decode`.

View File

@ -131,7 +131,7 @@ impl Config {
if let Some(beacon_nodes) = parse_optional::<String>(cli_args, "beacon-nodes")? { if let Some(beacon_nodes) = parse_optional::<String>(cli_args, "beacon-nodes")? {
config.beacon_nodes = beacon_nodes config.beacon_nodes = beacon_nodes
.split(',') .split(',')
.map(|s| SensitiveUrl::parse(s)) .map(SensitiveUrl::parse)
.collect::<Result<_, _>>() .collect::<Result<_, _>>()
.map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?; .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?;
} }

View File

@ -84,7 +84,6 @@ pub struct ProductionValidatorClient<T: EthSpec> {
doppelganger_service: Option<Arc<DoppelgangerService>>, doppelganger_service: Option<Arc<DoppelgangerService>>,
validator_store: Arc<ValidatorStore<SystemTimeSlotClock, T>>, validator_store: Arc<ValidatorStore<SystemTimeSlotClock, T>>,
http_api_listen_addr: Option<SocketAddr>, http_api_listen_addr: Option<SocketAddr>,
http_metrics_ctx: Option<Arc<http_metrics::Context<T>>>,
config: Config, config: Config,
} }
@ -431,7 +430,6 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
validator_store, validator_store,
config, config,
http_api_listen_addr: None, http_api_listen_addr: None,
http_metrics_ctx,
}) })
} }