Clippy 1.49.0 updates and dht persistence test fix (#2156)
## Issue Addressed `test_dht_persistence` failing ## Proposed Changes Bind `NetworkService::start` to an underscore prefixed variable rather than `_`. `_` was causing it to be dropped immediately This was failing 5/100 times before this update, but I haven't been able to get it to fail after updating it Co-authored-by: realbigsean <seananderson33@gmail.com>
This commit is contained in:
parent
e5b1a37110
commit
7a71977987
@ -1,7 +1,6 @@
|
|||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::iter::FromIterator;
|
|
||||||
use types::{Hash256, Slot};
|
use types::{Hash256, Slot};
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
@ -61,13 +60,12 @@ impl HeadTracker {
|
|||||||
slots_len,
|
slots_len,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
let map = HashMap::from_iter(
|
let map = ssz_container
|
||||||
ssz_container
|
|
||||||
.roots
|
.roots
|
||||||
.iter()
|
.iter()
|
||||||
.zip(ssz_container.slots.iter())
|
.zip(ssz_container.slots.iter())
|
||||||
.map(|(root, slot)| (*root, *slot)),
|
.map(|(root, slot)| (*root, *slot))
|
||||||
);
|
.collect::<HashMap<_, _>>();
|
||||||
|
|
||||||
Ok(Self(RwLock::new(map)))
|
Ok(Self(RwLock::new(map)))
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,6 @@ use derivative::Derivative;
|
|||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
use state_processing::{SigVerifiedOp, VerifyOperation};
|
use state_processing::{SigVerifiedOp, VerifyOperation};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::iter::FromIterator;
|
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use types::{
|
use types::{
|
||||||
AttesterSlashing, BeaconState, ChainSpec, EthSpec, ProposerSlashing, SignedVoluntaryExit,
|
AttesterSlashing, BeaconState, ChainSpec, EthSpec, ProposerSlashing, SignedVoluntaryExit,
|
||||||
@ -57,10 +56,18 @@ impl<E: EthSpec> ObservableOperation<E> for ProposerSlashing {
|
|||||||
|
|
||||||
impl<E: EthSpec> ObservableOperation<E> for AttesterSlashing<E> {
|
impl<E: EthSpec> ObservableOperation<E> for AttesterSlashing<E> {
|
||||||
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
|
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
|
||||||
let attestation_1_indices =
|
let attestation_1_indices = self
|
||||||
HashSet::<u64>::from_iter(self.attestation_1.attesting_indices.iter().copied());
|
.attestation_1
|
||||||
let attestation_2_indices =
|
.attesting_indices
|
||||||
HashSet::<u64>::from_iter(self.attestation_2.attesting_indices.iter().copied());
|
.iter()
|
||||||
|
.copied()
|
||||||
|
.collect::<HashSet<u64>>();
|
||||||
|
let attestation_2_indices = self
|
||||||
|
.attestation_2
|
||||||
|
.attesting_indices
|
||||||
|
.iter()
|
||||||
|
.copied()
|
||||||
|
.collect::<HashSet<u64>>();
|
||||||
attestation_1_indices
|
attestation_1_indices
|
||||||
.intersection(&attestation_2_indices)
|
.intersection(&attestation_2_indices)
|
||||||
.copied()
|
.copied()
|
||||||
|
@ -878,7 +878,13 @@ impl Service {
|
|||||||
// imported if any one of them cannot be parsed.
|
// imported if any one of them cannot be parsed.
|
||||||
.collect::<Result<Vec<_>, _>>()?
|
.collect::<Result<Vec<_>, _>>()?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|deposit_log| {
|
// Returns if a deposit is unable to be added to the cache.
|
||||||
|
//
|
||||||
|
// If this error occurs, the cache will no longer be guaranteed to hold either
|
||||||
|
// none or all of the logs for each block (i.e., they may exist _some_ logs for
|
||||||
|
// a block, but not _all_ logs for that block). This scenario can cause the
|
||||||
|
// node to choose an invalid genesis state or propose an invalid block.
|
||||||
|
.try_for_each(|deposit_log| {
|
||||||
if let DepositCacheInsertOutcome::Inserted = cache
|
if let DepositCacheInsertOutcome::Inserted = cache
|
||||||
.cache
|
.cache
|
||||||
.insert_log(deposit_log)
|
.insert_log(deposit_log)
|
||||||
@ -888,14 +894,7 @@ impl Service {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
})?;
|
||||||
// Returns if a deposit is unable to be added to the cache.
|
|
||||||
//
|
|
||||||
// If this error occurs, the cache will no longer be guaranteed to hold either
|
|
||||||
// none or all of the logs for each block (i.e., they may exist _some_ logs for
|
|
||||||
// a block, but not _all_ logs for that block). This scenario can cause the
|
|
||||||
// node to choose an invalid genesis state or propose an invalid block.
|
|
||||||
.collect::<Result<_, _>>()?;
|
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
|
@ -70,16 +70,16 @@ impl<TSpec: EthSpec> PeerScoreSettings<TSpec> {
|
|||||||
enr_fork_id: &EnrForkId,
|
enr_fork_id: &EnrForkId,
|
||||||
current_slot: Slot,
|
current_slot: Slot,
|
||||||
) -> error::Result<PeerScoreParams> {
|
) -> error::Result<PeerScoreParams> {
|
||||||
let mut params = PeerScoreParams::default();
|
let mut params = PeerScoreParams {
|
||||||
|
decay_interval: self.decay_interval,
|
||||||
params.decay_interval = self.decay_interval;
|
decay_to_zero: self.decay_to_zero,
|
||||||
params.decay_to_zero = self.decay_to_zero;
|
retain_score: self.epoch * 100,
|
||||||
params.retain_score = self.epoch * 100;
|
app_specific_weight: 1.0,
|
||||||
params.app_specific_weight = 1.0;
|
ip_colocation_factor_threshold: 3.0,
|
||||||
params.ip_colocation_factor_threshold = 3.0;
|
behaviour_penalty_threshold: 6.0,
|
||||||
params.behaviour_penalty_threshold = 6.0;
|
behaviour_penalty_decay: self.score_parameter_decay(self.epoch * 10),
|
||||||
|
..Default::default()
|
||||||
params.behaviour_penalty_decay = self.score_parameter_decay(self.epoch * 10);
|
};
|
||||||
|
|
||||||
let target_value = Self::decay_convergence(
|
let target_value = Self::decay_convergence(
|
||||||
params.behaviour_penalty_decay,
|
params.behaviour_penalty_decay,
|
||||||
|
@ -97,7 +97,7 @@ impl<T: EthSpec> PeerInfo<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the seen IP addresses of the peer.
|
/// Returns the seen IP addresses of the peer.
|
||||||
pub fn seen_addresses<'a>(&'a self) -> impl Iterator<Item = IpAddr> + 'a {
|
pub fn seen_addresses(&self) -> impl Iterator<Item = IpAddr> + '_ {
|
||||||
self.seen_addresses
|
self.seen_addresses
|
||||||
.iter()
|
.iter()
|
||||||
.map(|socket_addr| socket_addr.ip())
|
.map(|socket_addr| socket_addr.ip())
|
||||||
|
@ -29,14 +29,12 @@ pub struct SyncInfo {
|
|||||||
|
|
||||||
impl std::cmp::PartialEq for PeerSyncStatus {
|
impl std::cmp::PartialEq for PeerSyncStatus {
|
||||||
fn eq(&self, other: &Self) -> bool {
|
fn eq(&self, other: &Self) -> bool {
|
||||||
match (self, other) {
|
matches!((self, other),
|
||||||
(PeerSyncStatus::Synced { .. }, PeerSyncStatus::Synced { .. }) => true,
|
(PeerSyncStatus::Synced { .. }, PeerSyncStatus::Synced { .. }) |
|
||||||
(PeerSyncStatus::Advanced { .. }, PeerSyncStatus::Advanced { .. }) => true,
|
(PeerSyncStatus::Advanced { .. }, PeerSyncStatus::Advanced { .. }) |
|
||||||
(PeerSyncStatus::Behind { .. }, PeerSyncStatus::Behind { .. }) => true,
|
(PeerSyncStatus::Behind { .. }, PeerSyncStatus::Behind { .. }) |
|
||||||
(PeerSyncStatus::IrrelevantPeer, PeerSyncStatus::IrrelevantPeer) => true,
|
(PeerSyncStatus::IrrelevantPeer, PeerSyncStatus::IrrelevantPeer) |
|
||||||
(PeerSyncStatus::Unknown, PeerSyncStatus::Unknown) => true,
|
(PeerSyncStatus::Unknown, PeerSyncStatus::Unknown))
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,14 +23,12 @@ pub enum SyncState {
|
|||||||
|
|
||||||
impl PartialEq for SyncState {
|
impl PartialEq for SyncState {
|
||||||
fn eq(&self, other: &Self) -> bool {
|
fn eq(&self, other: &Self) -> bool {
|
||||||
match (self, other) {
|
matches!((self, other),
|
||||||
(SyncState::SyncingFinalized { .. }, SyncState::SyncingFinalized { .. }) => true,
|
(SyncState::SyncingFinalized { .. }, SyncState::SyncingFinalized { .. }) |
|
||||||
(SyncState::SyncingHead { .. }, SyncState::SyncingHead { .. }) => true,
|
(SyncState::SyncingHead { .. }, SyncState::SyncingHead { .. }) |
|
||||||
(SyncState::Synced, SyncState::Synced) => true,
|
(SyncState::Synced, SyncState::Synced) |
|
||||||
(SyncState::Stalled, SyncState::Stalled) => true,
|
(SyncState::Stalled, SyncState::Stalled) |
|
||||||
(SyncState::SyncTransition, SyncState::SyncTransition) => true,
|
(SyncState::SyncTransition, SyncState::SyncTransition))
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,11 +70,9 @@ mod tests {
|
|||||||
// Create a new network service which implicitly gets dropped at the
|
// Create a new network service which implicitly gets dropped at the
|
||||||
// end of the block.
|
// end of the block.
|
||||||
|
|
||||||
let _ = NetworkService::start(beacon_chain.clone(), &config, executor)
|
let _network_service = NetworkService::start(beacon_chain.clone(), &config, executor)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
// Allow the network task to spawn on the executor before shutting down.
|
|
||||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
|
||||||
drop(signal);
|
drop(signal);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -161,7 +161,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Peers currently syncing this chain.
|
/// Peers currently syncing this chain.
|
||||||
pub fn peers<'a>(&'a self) -> impl Iterator<Item = PeerId> + 'a {
|
pub fn peers(&self) -> impl Iterator<Item = PeerId> + '_ {
|
||||||
self.peers.keys().cloned()
|
self.peers.keys().cloned()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,9 +26,10 @@ pub fn get_config<E: EthSpec>(
|
|||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
log: Logger,
|
log: Logger,
|
||||||
) -> Result<ClientConfig, String> {
|
) -> Result<ClientConfig, String> {
|
||||||
let mut client_config = ClientConfig::default();
|
let mut client_config = ClientConfig {
|
||||||
|
data_dir: get_data_dir(cli_args),
|
||||||
client_config.data_dir = get_data_dir(cli_args);
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
// If necessary, remove any existing database and configuration
|
// If necessary, remove any existing database and configuration
|
||||||
if client_config.data_dir.exists() && cli_args.is_present("purge-db") {
|
if client_config.data_dir.exists() && cli_args.is_present("purge-db") {
|
||||||
|
@ -202,9 +202,7 @@ impl<E: EthSpec> HotColdDB<E, LevelDB<E>, LevelDB<E>> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Return an iterator over the state roots of all temporary states.
|
/// Return an iterator over the state roots of all temporary states.
|
||||||
pub fn iter_temporary_state_roots<'a>(
|
pub fn iter_temporary_state_roots(&self) -> impl Iterator<Item = Result<Hash256, Error>> + '_ {
|
||||||
&'a self,
|
|
||||||
) -> impl Iterator<Item = Result<Hash256, Error>> + 'a {
|
|
||||||
let column = DBColumn::BeaconStateTemporary;
|
let column = DBColumn::BeaconStateTemporary;
|
||||||
let start_key =
|
let start_key =
|
||||||
BytesKey::from_vec(get_key_for_col(column.into(), Hash256::zero().as_bytes()));
|
BytesKey::from_vec(get_key_for_col(column.into(), Hash256::zero().as_bytes()));
|
||||||
|
@ -12,7 +12,6 @@ use slog::{error, Logger};
|
|||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fs::{self, OpenOptions};
|
use std::fs::{self, OpenOptions};
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::iter::FromIterator;
|
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use types::PublicKey;
|
use types::PublicKey;
|
||||||
use validator_dir::VOTING_KEYSTORE_FILE;
|
use validator_dir::VOTING_KEYSTORE_FILE;
|
||||||
@ -154,13 +153,16 @@ impl ValidatorDefinitions {
|
|||||||
recursively_find_voting_keystores(validators_dir, &mut keystore_paths)
|
recursively_find_voting_keystores(validators_dir, &mut keystore_paths)
|
||||||
.map_err(Error::UnableToSearchForKeystores)?;
|
.map_err(Error::UnableToSearchForKeystores)?;
|
||||||
|
|
||||||
let known_paths: HashSet<&PathBuf> =
|
let known_paths: HashSet<&PathBuf> = self
|
||||||
HashSet::from_iter(self.0.iter().map(|def| match &def.signing_definition {
|
.0
|
||||||
|
.iter()
|
||||||
|
.map(|def| match &def.signing_definition {
|
||||||
SigningDefinition::LocalKeystore {
|
SigningDefinition::LocalKeystore {
|
||||||
voting_keystore_path,
|
voting_keystore_path,
|
||||||
..
|
..
|
||||||
} => voting_keystore_path,
|
} => voting_keystore_path,
|
||||||
}));
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
let mut new_defs = keystore_paths
|
let mut new_defs = keystore_paths
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
@ -449,7 +449,7 @@ impl<T: FromStr> TryFrom<String> for QueryVec<T> {
|
|||||||
type Error = String;
|
type Error = String;
|
||||||
|
|
||||||
fn try_from(string: String) -> Result<Self, Self::Error> {
|
fn try_from(string: String) -> Result<Self, Self::Error> {
|
||||||
if string == "" {
|
if string.is_empty() {
|
||||||
return Ok(Self(vec![]));
|
return Ok(Self(vec![]));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,15 +24,15 @@ pub fn u64_leaf_count(len: usize) -> usize {
|
|||||||
(len + vals_per_chunk - 1) / vals_per_chunk
|
(len + vals_per_chunk - 1) / vals_per_chunk
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn hash256_iter<'a>(
|
pub fn hash256_iter(
|
||||||
values: &'a [Hash256],
|
values: &[Hash256],
|
||||||
) -> impl Iterator<Item = [u8; BYTES_PER_CHUNK]> + ExactSizeIterator + 'a {
|
) -> impl Iterator<Item = [u8; BYTES_PER_CHUNK]> + ExactSizeIterator + '_ {
|
||||||
values.iter().copied().map(Hash256::to_fixed_bytes)
|
values.iter().copied().map(Hash256::to_fixed_bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn u64_iter<'a>(
|
pub fn u64_iter(
|
||||||
values: &'a [u64],
|
values: &[u64],
|
||||||
) -> impl Iterator<Item = [u8; BYTES_PER_CHUNK]> + ExactSizeIterator + 'a {
|
) -> impl Iterator<Item = [u8; BYTES_PER_CHUNK]> + ExactSizeIterator + '_ {
|
||||||
let type_size = size_of::<u64>();
|
let type_size = size_of::<u64>();
|
||||||
let vals_per_chunk = BYTES_PER_CHUNK / type_size;
|
let vals_per_chunk = BYTES_PER_CHUNK / type_size;
|
||||||
values.chunks(vals_per_chunk).map(move |xs| {
|
values.chunks(vals_per_chunk).map(move |xs| {
|
||||||
|
@ -4,7 +4,6 @@ use crate::{
|
|||||||
};
|
};
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::iter::FromIterator;
|
|
||||||
use types::{Epoch, Hash256};
|
use types::{Epoch, Hash256};
|
||||||
|
|
||||||
#[derive(Encode, Decode)]
|
#[derive(Encode, Decode)]
|
||||||
@ -41,7 +40,7 @@ impl From<SszContainer> for ProtoArrayForkChoice {
|
|||||||
justified_epoch: from.justified_epoch,
|
justified_epoch: from.justified_epoch,
|
||||||
finalized_epoch: from.finalized_epoch,
|
finalized_epoch: from.finalized_epoch,
|
||||||
nodes: from.nodes,
|
nodes: from.nodes,
|
||||||
indices: HashMap::from_iter(from.indices.into_iter()),
|
indices: from.indices.into_iter().collect::<HashMap<_, _>>(),
|
||||||
};
|
};
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
|
@ -14,9 +14,7 @@ use syn::{parse_macro_input, DeriveInput};
|
|||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time.
|
/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time.
|
||||||
fn get_serializable_named_field_idents<'a>(
|
fn get_serializable_named_field_idents(struct_data: &syn::DataStruct) -> Vec<&syn::Ident> {
|
||||||
struct_data: &'a syn::DataStruct,
|
|
||||||
) -> Vec<&'a syn::Ident> {
|
|
||||||
struct_data
|
struct_data
|
||||||
.fields
|
.fields
|
||||||
.iter()
|
.iter()
|
||||||
@ -35,7 +33,7 @@ fn get_serializable_named_field_idents<'a>(
|
|||||||
|
|
||||||
/// Returns a Vec of `syn::Type` for each named field in the struct, whilst filtering out fields
|
/// Returns a Vec of `syn::Type` for each named field in the struct, whilst filtering out fields
|
||||||
/// that should not be serialized.
|
/// that should not be serialized.
|
||||||
fn get_serializable_field_types<'a>(struct_data: &'a syn::DataStruct) -> Vec<&'a syn::Type> {
|
fn get_serializable_field_types(struct_data: &syn::DataStruct) -> Vec<&syn::Type> {
|
||||||
struct_data
|
struct_data
|
||||||
.fields
|
.fields
|
||||||
.iter()
|
.iter()
|
||||||
|
@ -44,10 +44,10 @@ impl From<BeaconStateError> for Error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Helper function to get a public key from a `state`.
|
/// Helper function to get a public key from a `state`.
|
||||||
pub fn get_pubkey_from_state<'a, T>(
|
pub fn get_pubkey_from_state<T>(
|
||||||
state: &'a BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
validator_index: usize,
|
validator_index: usize,
|
||||||
) -> Option<Cow<'a, PublicKey>>
|
) -> Option<Cow<PublicKey>>
|
||||||
where
|
where
|
||||||
T: EthSpec,
|
T: EthSpec,
|
||||||
{
|
{
|
||||||
|
@ -391,7 +391,7 @@ fn invalid_attestation_wrong_justified_checkpoint() {
|
|||||||
root: Hash256::zero(),
|
root: Hash256::zero(),
|
||||||
},
|
},
|
||||||
attestation: Checkpoint {
|
attestation: Checkpoint {
|
||||||
epoch: Epoch::from(0 as u64),
|
epoch: Epoch::from(0_u64),
|
||||||
root: Hash256::zero(),
|
root: Hash256::zero(),
|
||||||
},
|
},
|
||||||
is_current: true,
|
is_current: true,
|
||||||
@ -877,7 +877,7 @@ fn invalid_proposer_slashing_proposal_epoch_mismatch() {
|
|||||||
Err(BlockProcessingError::ProposerSlashingInvalid {
|
Err(BlockProcessingError::ProposerSlashingInvalid {
|
||||||
index: 0,
|
index: 0,
|
||||||
reason: ProposerSlashingInvalid::ProposalSlotMismatch(
|
reason: ProposerSlashingInvalid::ProposalSlotMismatch(
|
||||||
Slot::from(0 as u64),
|
Slot::from(0_u64),
|
||||||
Slot::from(128 as u64)
|
Slot::from(128 as u64)
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
@ -10,7 +10,7 @@ use syn::{parse_macro_input, Attribute, DeriveInput, Meta};
|
|||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time.
|
/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time.
|
||||||
fn get_hashable_fields<'a>(struct_data: &'a syn::DataStruct) -> Vec<&'a syn::Ident> {
|
fn get_hashable_fields(struct_data: &syn::DataStruct) -> Vec<&syn::Ident> {
|
||||||
get_hashable_fields_and_their_caches(struct_data)
|
get_hashable_fields_and_their_caches(struct_data)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(ident, _, _)| ident)
|
.map(|(ident, _, _)| ident)
|
||||||
@ -18,9 +18,9 @@ fn get_hashable_fields<'a>(struct_data: &'a syn::DataStruct) -> Vec<&'a syn::Ide
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Return a Vec of the hashable fields of a struct, and each field's type and optional cache field.
|
/// Return a Vec of the hashable fields of a struct, and each field's type and optional cache field.
|
||||||
fn get_hashable_fields_and_their_caches<'a>(
|
fn get_hashable_fields_and_their_caches(
|
||||||
struct_data: &'a syn::DataStruct,
|
struct_data: &syn::DataStruct,
|
||||||
) -> Vec<(&'a syn::Ident, syn::Type, Option<syn::Ident>)> {
|
) -> Vec<(&syn::Ident, syn::Type, Option<syn::Ident>)> {
|
||||||
struct_data
|
struct_data
|
||||||
.fields
|
.fields
|
||||||
.iter()
|
.iter()
|
||||||
|
@ -68,7 +68,7 @@ impl<T: EthSpec> BeaconBlock<T> {
|
|||||||
};
|
};
|
||||||
let indexed_attestation: IndexedAttestation<T> = IndexedAttestation {
|
let indexed_attestation: IndexedAttestation<T> = IndexedAttestation {
|
||||||
attesting_indices: VariableList::new(vec![
|
attesting_indices: VariableList::new(vec![
|
||||||
0 as u64;
|
0_u64;
|
||||||
T::MaxValidatorsPerCommittee::to_usize()
|
T::MaxValidatorsPerCommittee::to_usize()
|
||||||
])
|
])
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
|
@ -69,7 +69,7 @@ impl TestingAttestationDataBuilder {
|
|||||||
}
|
}
|
||||||
AttestationTestTask::WrongJustifiedCheckpoint => {
|
AttestationTestTask::WrongJustifiedCheckpoint => {
|
||||||
source = Checkpoint {
|
source = Checkpoint {
|
||||||
epoch: Epoch::from(0 as u64),
|
epoch: Epoch::from(0_u64),
|
||||||
root: Hash256::zero(),
|
root: Hash256::zero(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,11 @@ pub const MOD_R_L: usize = 48;
|
|||||||
#[zeroize(drop)]
|
#[zeroize(drop)]
|
||||||
pub struct DerivedKey(ZeroizeHash);
|
pub struct DerivedKey(ZeroizeHash);
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum Error {
|
||||||
|
EmptySeed,
|
||||||
|
}
|
||||||
|
|
||||||
impl DerivedKey {
|
impl DerivedKey {
|
||||||
/// Instantiates `Self` from some secret seed bytes.
|
/// Instantiates `Self` from some secret seed bytes.
|
||||||
///
|
///
|
||||||
@ -42,10 +47,10 @@ impl DerivedKey {
|
|||||||
///
|
///
|
||||||
/// ## Errors
|
/// ## Errors
|
||||||
///
|
///
|
||||||
/// Returns `Err(())` if `seed.is_empty()`, otherwise always returns `Ok(self)`.
|
/// Returns `Err(Error::EmptySeed)` if `seed.is_empty()`, otherwise always returns `Ok(self)`.
|
||||||
pub fn from_seed(seed: &[u8]) -> Result<Self, ()> {
|
pub fn from_seed(seed: &[u8]) -> Result<Self, Error> {
|
||||||
if seed.is_empty() {
|
if seed.is_empty() {
|
||||||
Err(())
|
Err(Error::EmptySeed)
|
||||||
} else {
|
} else {
|
||||||
Ok(Self(derive_master_sk(seed)))
|
Ok(Self(derive_master_sk(seed)))
|
||||||
}
|
}
|
||||||
|
@ -8,4 +8,5 @@ mod secret_bytes;
|
|||||||
|
|
||||||
pub use bls::ZeroizeHash;
|
pub use bls::ZeroizeHash;
|
||||||
pub use derived_key::DerivedKey;
|
pub use derived_key::DerivedKey;
|
||||||
|
pub use derived_key::Error as DerivedKeyError;
|
||||||
pub use plain_text::PlainText;
|
pub use plain_text::PlainText;
|
||||||
|
@ -5,17 +5,16 @@ use crate::{
|
|||||||
},
|
},
|
||||||
KeyType, ValidatorPath,
|
KeyType, ValidatorPath,
|
||||||
};
|
};
|
||||||
|
pub use bip39::{Mnemonic, Seed as Bip39Seed};
|
||||||
|
pub use eth2_key_derivation::{DerivedKey, DerivedKeyError};
|
||||||
use eth2_keystore::{
|
use eth2_keystore::{
|
||||||
decrypt, default_kdf, encrypt, keypair_from_secret, Keystore, KeystoreBuilder, IV_SIZE,
|
decrypt, default_kdf, encrypt, keypair_from_secret, Keystore, KeystoreBuilder, IV_SIZE,
|
||||||
SALT_SIZE,
|
SALT_SIZE,
|
||||||
};
|
};
|
||||||
|
pub use eth2_keystore::{Error as KeystoreError, PlainText};
|
||||||
use rand::prelude::*;
|
use rand::prelude::*;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::io::{Read, Write};
|
use std::io::{Read, Write};
|
||||||
|
|
||||||
pub use bip39::{Mnemonic, Seed as Bip39Seed};
|
|
||||||
pub use eth2_key_derivation::DerivedKey;
|
|
||||||
pub use eth2_keystore::{Error as KeystoreError, PlainText};
|
|
||||||
pub use uuid::Uuid;
|
pub use uuid::Uuid;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
@ -24,6 +23,7 @@ pub enum Error {
|
|||||||
PathExhausted,
|
PathExhausted,
|
||||||
EmptyPassword,
|
EmptyPassword,
|
||||||
EmptySeed,
|
EmptySeed,
|
||||||
|
InvalidNextAccount { old: u32, new: u32 },
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<KeystoreError> for Error {
|
impl From<KeystoreError> for Error {
|
||||||
@ -32,6 +32,14 @@ impl From<KeystoreError> for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<DerivedKeyError> for Error {
|
||||||
|
fn from(e: DerivedKeyError) -> Error {
|
||||||
|
match e {
|
||||||
|
DerivedKeyError::EmptySeed => Error::EmptySeed,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Contains the two keystores required for an eth2 validator.
|
/// Contains the two keystores required for an eth2 validator.
|
||||||
pub struct ValidatorKeystores {
|
pub struct ValidatorKeystores {
|
||||||
/// Contains the secret key used for signing every-day consensus messages (blocks,
|
/// Contains the secret key used for signing every-day consensus messages (blocks,
|
||||||
@ -222,12 +230,15 @@ impl Wallet {
|
|||||||
///
|
///
|
||||||
/// Returns `Err(())` if `nextaccount` is less than `self.nextaccount()` without mutating
|
/// Returns `Err(())` if `nextaccount` is less than `self.nextaccount()` without mutating
|
||||||
/// `self`. This is to protect against duplicate validator generation.
|
/// `self`. This is to protect against duplicate validator generation.
|
||||||
pub fn set_nextaccount(&mut self, nextaccount: u32) -> Result<(), ()> {
|
pub fn set_nextaccount(&mut self, nextaccount: u32) -> Result<(), Error> {
|
||||||
if nextaccount >= self.nextaccount() {
|
if nextaccount >= self.nextaccount() {
|
||||||
self.json.nextaccount = nextaccount;
|
self.json.nextaccount = nextaccount;
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
Err(())
|
Err(Error::InvalidNextAccount {
|
||||||
|
old: self.json.nextaccount,
|
||||||
|
new: nextaccount,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -295,7 +306,7 @@ pub fn recover_validator_secret(
|
|||||||
) -> Result<(PlainText, ValidatorPath), Error> {
|
) -> Result<(PlainText, ValidatorPath), Error> {
|
||||||
let path = ValidatorPath::new(index, key_type);
|
let path = ValidatorPath::new(index, key_type);
|
||||||
let secret = wallet.decrypt_seed(wallet_password)?;
|
let secret = wallet.decrypt_seed(wallet_password)?;
|
||||||
let master = DerivedKey::from_seed(secret.as_bytes()).map_err(|()| Error::EmptyPassword)?;
|
let master = DerivedKey::from_seed(secret.as_bytes()).map_err(Error::from)?;
|
||||||
|
|
||||||
let destination = path.iter_nodes().fold(master, |dk, i| dk.child(*i));
|
let destination = path.iter_nodes().fold(master, |dk, i| dk.child(*i));
|
||||||
|
|
||||||
@ -311,7 +322,7 @@ pub fn recover_validator_secret_from_mnemonic(
|
|||||||
key_type: KeyType,
|
key_type: KeyType,
|
||||||
) -> Result<(PlainText, ValidatorPath), Error> {
|
) -> Result<(PlainText, ValidatorPath), Error> {
|
||||||
let path = ValidatorPath::new(index, key_type);
|
let path = ValidatorPath::new(index, key_type);
|
||||||
let master = DerivedKey::from_seed(secret).map_err(|()| Error::EmptyPassword)?;
|
let master = DerivedKey::from_seed(secret).map_err(Error::from)?;
|
||||||
|
|
||||||
let destination = path.iter_nodes().fold(master, |dk, i| dk.child(*i));
|
let destination = path.iter_nodes().fold(master, |dk, i| dk.child(*i));
|
||||||
|
|
||||||
|
@ -25,10 +25,12 @@ pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut config = NetworkConfig::default();
|
let config = NetworkConfig {
|
||||||
config.enr_address = Some(ip);
|
enr_address: Some(ip),
|
||||||
config.enr_udp_port = Some(udp_port);
|
enr_udp_port: Some(udp_port),
|
||||||
config.enr_tcp_port = Some(tcp_port);
|
enr_tcp_port: Some(tcp_port),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
let local_keypair = Keypair::generate_secp256k1();
|
let local_keypair = Keypair::generate_secp256k1();
|
||||||
let enr_key = CombinedKey::from_libp2p(&local_keypair)?;
|
let enr_key = CombinedKey::from_libp2p(&local_keypair)?;
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
use slog::Logger;
|
use slog::Logger;
|
||||||
use sloggers::Build;
|
use sloggers::Build;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::iter::FromIterator;
|
|
||||||
use types::{
|
use types::{
|
||||||
AggregateSignature, AttestationData, AttesterSlashing, BeaconBlockHeader, Checkpoint, Epoch,
|
AggregateSignature, AttestationData, AttesterSlashing, BeaconBlockHeader, Checkpoint, Epoch,
|
||||||
Hash256, IndexedAttestation, MainnetEthSpec, Signature, SignedBeaconBlockHeader, Slot,
|
Hash256, IndexedAttestation, MainnetEthSpec, Signature, SignedBeaconBlockHeader, Slot,
|
||||||
@ -59,8 +58,14 @@ pub fn hashset_intersection(
|
|||||||
attestation_1_indices: &[u64],
|
attestation_1_indices: &[u64],
|
||||||
attestation_2_indices: &[u64],
|
attestation_2_indices: &[u64],
|
||||||
) -> HashSet<u64> {
|
) -> HashSet<u64> {
|
||||||
&HashSet::from_iter(attestation_1_indices.iter().copied())
|
&attestation_1_indices
|
||||||
& &HashSet::from_iter(attestation_2_indices.iter().copied())
|
.iter()
|
||||||
|
.copied()
|
||||||
|
.collect::<HashSet<u64>>()
|
||||||
|
& &attestation_2_indices
|
||||||
|
.iter()
|
||||||
|
.copied()
|
||||||
|
.collect::<HashSet<u64>>()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn slashed_validators_from_slashings(slashings: &HashSet<AttesterSlashing<E>>) -> HashSet<u64> {
|
pub fn slashed_validators_from_slashings(slashings: &HashSet<AttesterSlashing<E>>) -> HashSet<u64> {
|
||||||
|
@ -102,10 +102,7 @@ pub fn get_block<E: EthSpec>(seed: u64) -> BeaconBlock<E> {
|
|||||||
signature: Signature::empty(),
|
signature: Signature::empty(),
|
||||||
};
|
};
|
||||||
let indexed_attestation: IndexedAttestation<E> = IndexedAttestation {
|
let indexed_attestation: IndexedAttestation<E> = IndexedAttestation {
|
||||||
attesting_indices: VariableList::new(vec![
|
attesting_indices: VariableList::new(vec![0_u64; E::MaxValidatorsPerCommittee::to_usize()])
|
||||||
0 as u64;
|
|
||||||
E::MaxValidatorsPerCommittee::to_usize()
|
|
||||||
])
|
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
data: AttestationData::default(),
|
data: AttestationData::default(),
|
||||||
signature: AggregateSignature::empty(),
|
signature: AggregateSignature::empty(),
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::iter::FromIterator;
|
|
||||||
use types::{Epoch, Hash256, PublicKey, Slot};
|
use types::{Epoch, Hash256, PublicKey, Slot};
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
|
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
|
||||||
@ -60,8 +59,8 @@ impl Interchange {
|
|||||||
|
|
||||||
/// Do these two `Interchange`s contain the same data (ignoring ordering)?
|
/// Do these two `Interchange`s contain the same data (ignoring ordering)?
|
||||||
pub fn equiv(&self, other: &Self) -> bool {
|
pub fn equiv(&self, other: &Self) -> bool {
|
||||||
let self_set = HashSet::<_>::from_iter(self.data.iter());
|
let self_set = self.data.iter().collect::<HashSet<_>>();
|
||||||
let other_set = HashSet::<_>::from_iter(other.data.iter());
|
let other_set = other.data.iter().collect::<HashSet<_>>();
|
||||||
self.metadata == other.metadata && self_set == other_set
|
self.metadata == other.metadata && self_set == other_set
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,8 +43,11 @@ pub async fn create_validators<P: AsRef<Path>, T: 'static + SlotClock, E: EthSpe
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
if let Some(nextaccount) = key_derivation_path_offset {
|
if let Some(nextaccount) = key_derivation_path_offset {
|
||||||
wallet.set_nextaccount(nextaccount).map_err(|()| {
|
wallet.set_nextaccount(nextaccount).map_err(|e| {
|
||||||
warp_utils::reject::custom_server_error("unable to set wallet nextaccount".to_string())
|
warp_utils::reject::custom_server_error(format!(
|
||||||
|
"unable to set wallet nextaccount: {:?}",
|
||||||
|
e
|
||||||
|
))
|
||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user