Clippy 1.49.0 updates and dht persistence test fix (#2156)
## Issue Addressed `test_dht_persistence` failing ## Proposed Changes Bind `NetworkService::start` to an underscore prefixed variable rather than `_`. `_` was causing it to be dropped immediately This was failing 5/100 times before this update, but I haven't been able to get it to fail after updating it Co-authored-by: realbigsean <seananderson33@gmail.com>
This commit is contained in:
parent
e5b1a37110
commit
7a71977987
@ -1,7 +1,6 @@
|
||||
use parking_lot::RwLock;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use std::collections::HashMap;
|
||||
use std::iter::FromIterator;
|
||||
use types::{Hash256, Slot};
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
@ -61,13 +60,12 @@ impl HeadTracker {
|
||||
slots_len,
|
||||
})
|
||||
} else {
|
||||
let map = HashMap::from_iter(
|
||||
ssz_container
|
||||
let map = ssz_container
|
||||
.roots
|
||||
.iter()
|
||||
.zip(ssz_container.slots.iter())
|
||||
.map(|(root, slot)| (*root, *slot)),
|
||||
);
|
||||
.map(|(root, slot)| (*root, *slot))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
Ok(Self(RwLock::new(map)))
|
||||
}
|
||||
|
@ -2,7 +2,6 @@ use derivative::Derivative;
|
||||
use smallvec::SmallVec;
|
||||
use state_processing::{SigVerifiedOp, VerifyOperation};
|
||||
use std::collections::HashSet;
|
||||
use std::iter::FromIterator;
|
||||
use std::marker::PhantomData;
|
||||
use types::{
|
||||
AttesterSlashing, BeaconState, ChainSpec, EthSpec, ProposerSlashing, SignedVoluntaryExit,
|
||||
@ -57,10 +56,18 @@ impl<E: EthSpec> ObservableOperation<E> for ProposerSlashing {
|
||||
|
||||
impl<E: EthSpec> ObservableOperation<E> for AttesterSlashing<E> {
|
||||
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
|
||||
let attestation_1_indices =
|
||||
HashSet::<u64>::from_iter(self.attestation_1.attesting_indices.iter().copied());
|
||||
let attestation_2_indices =
|
||||
HashSet::<u64>::from_iter(self.attestation_2.attesting_indices.iter().copied());
|
||||
let attestation_1_indices = self
|
||||
.attestation_1
|
||||
.attesting_indices
|
||||
.iter()
|
||||
.copied()
|
||||
.collect::<HashSet<u64>>();
|
||||
let attestation_2_indices = self
|
||||
.attestation_2
|
||||
.attesting_indices
|
||||
.iter()
|
||||
.copied()
|
||||
.collect::<HashSet<u64>>();
|
||||
attestation_1_indices
|
||||
.intersection(&attestation_2_indices)
|
||||
.copied()
|
||||
|
@ -878,7 +878,13 @@ impl Service {
|
||||
// imported if any one of them cannot be parsed.
|
||||
.collect::<Result<Vec<_>, _>>()?
|
||||
.into_iter()
|
||||
.map(|deposit_log| {
|
||||
// Returns if a deposit is unable to be added to the cache.
|
||||
//
|
||||
// If this error occurs, the cache will no longer be guaranteed to hold either
|
||||
// none or all of the logs for each block (i.e., they may exist _some_ logs for
|
||||
// a block, but not _all_ logs for that block). This scenario can cause the
|
||||
// node to choose an invalid genesis state or propose an invalid block.
|
||||
.try_for_each(|deposit_log| {
|
||||
if let DepositCacheInsertOutcome::Inserted = cache
|
||||
.cache
|
||||
.insert_log(deposit_log)
|
||||
@ -888,14 +894,7 @@ impl Service {
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
// Returns if a deposit is unable to be added to the cache.
|
||||
//
|
||||
// If this error occurs, the cache will no longer be guaranteed to hold either
|
||||
// none or all of the logs for each block (i.e., they may exist _some_ logs for
|
||||
// a block, but not _all_ logs for that block). This scenario can cause the
|
||||
// node to choose an invalid genesis state or propose an invalid block.
|
||||
.collect::<Result<_, _>>()?;
|
||||
})?;
|
||||
|
||||
debug!(
|
||||
self.log,
|
||||
|
@ -70,16 +70,16 @@ impl<TSpec: EthSpec> PeerScoreSettings<TSpec> {
|
||||
enr_fork_id: &EnrForkId,
|
||||
current_slot: Slot,
|
||||
) -> error::Result<PeerScoreParams> {
|
||||
let mut params = PeerScoreParams::default();
|
||||
|
||||
params.decay_interval = self.decay_interval;
|
||||
params.decay_to_zero = self.decay_to_zero;
|
||||
params.retain_score = self.epoch * 100;
|
||||
params.app_specific_weight = 1.0;
|
||||
params.ip_colocation_factor_threshold = 3.0;
|
||||
params.behaviour_penalty_threshold = 6.0;
|
||||
|
||||
params.behaviour_penalty_decay = self.score_parameter_decay(self.epoch * 10);
|
||||
let mut params = PeerScoreParams {
|
||||
decay_interval: self.decay_interval,
|
||||
decay_to_zero: self.decay_to_zero,
|
||||
retain_score: self.epoch * 100,
|
||||
app_specific_weight: 1.0,
|
||||
ip_colocation_factor_threshold: 3.0,
|
||||
behaviour_penalty_threshold: 6.0,
|
||||
behaviour_penalty_decay: self.score_parameter_decay(self.epoch * 10),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let target_value = Self::decay_convergence(
|
||||
params.behaviour_penalty_decay,
|
||||
|
@ -97,7 +97,7 @@ impl<T: EthSpec> PeerInfo<T> {
|
||||
}
|
||||
|
||||
/// Returns the seen IP addresses of the peer.
|
||||
pub fn seen_addresses<'a>(&'a self) -> impl Iterator<Item = IpAddr> + 'a {
|
||||
pub fn seen_addresses(&self) -> impl Iterator<Item = IpAddr> + '_ {
|
||||
self.seen_addresses
|
||||
.iter()
|
||||
.map(|socket_addr| socket_addr.ip())
|
||||
|
@ -29,14 +29,12 @@ pub struct SyncInfo {
|
||||
|
||||
impl std::cmp::PartialEq for PeerSyncStatus {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
match (self, other) {
|
||||
(PeerSyncStatus::Synced { .. }, PeerSyncStatus::Synced { .. }) => true,
|
||||
(PeerSyncStatus::Advanced { .. }, PeerSyncStatus::Advanced { .. }) => true,
|
||||
(PeerSyncStatus::Behind { .. }, PeerSyncStatus::Behind { .. }) => true,
|
||||
(PeerSyncStatus::IrrelevantPeer, PeerSyncStatus::IrrelevantPeer) => true,
|
||||
(PeerSyncStatus::Unknown, PeerSyncStatus::Unknown) => true,
|
||||
_ => false,
|
||||
}
|
||||
matches!((self, other),
|
||||
(PeerSyncStatus::Synced { .. }, PeerSyncStatus::Synced { .. }) |
|
||||
(PeerSyncStatus::Advanced { .. }, PeerSyncStatus::Advanced { .. }) |
|
||||
(PeerSyncStatus::Behind { .. }, PeerSyncStatus::Behind { .. }) |
|
||||
(PeerSyncStatus::IrrelevantPeer, PeerSyncStatus::IrrelevantPeer) |
|
||||
(PeerSyncStatus::Unknown, PeerSyncStatus::Unknown))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -23,14 +23,12 @@ pub enum SyncState {
|
||||
|
||||
impl PartialEq for SyncState {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
match (self, other) {
|
||||
(SyncState::SyncingFinalized { .. }, SyncState::SyncingFinalized { .. }) => true,
|
||||
(SyncState::SyncingHead { .. }, SyncState::SyncingHead { .. }) => true,
|
||||
(SyncState::Synced, SyncState::Synced) => true,
|
||||
(SyncState::Stalled, SyncState::Stalled) => true,
|
||||
(SyncState::SyncTransition, SyncState::SyncTransition) => true,
|
||||
_ => false,
|
||||
}
|
||||
matches!((self, other),
|
||||
(SyncState::SyncingFinalized { .. }, SyncState::SyncingFinalized { .. }) |
|
||||
(SyncState::SyncingHead { .. }, SyncState::SyncingHead { .. }) |
|
||||
(SyncState::Synced, SyncState::Synced) |
|
||||
(SyncState::Stalled, SyncState::Stalled) |
|
||||
(SyncState::SyncTransition, SyncState::SyncTransition))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -70,11 +70,9 @@ mod tests {
|
||||
// Create a new network service which implicitly gets dropped at the
|
||||
// end of the block.
|
||||
|
||||
let _ = NetworkService::start(beacon_chain.clone(), &config, executor)
|
||||
let _network_service = NetworkService::start(beacon_chain.clone(), &config, executor)
|
||||
.await
|
||||
.unwrap();
|
||||
// Allow the network task to spawn on the executor before shutting down.
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
drop(signal);
|
||||
});
|
||||
|
||||
|
@ -161,7 +161,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
}
|
||||
|
||||
/// Peers currently syncing this chain.
|
||||
pub fn peers<'a>(&'a self) -> impl Iterator<Item = PeerId> + 'a {
|
||||
pub fn peers(&self) -> impl Iterator<Item = PeerId> + '_ {
|
||||
self.peers.keys().cloned()
|
||||
}
|
||||
|
||||
|
@ -26,9 +26,10 @@ pub fn get_config<E: EthSpec>(
|
||||
spec: &ChainSpec,
|
||||
log: Logger,
|
||||
) -> Result<ClientConfig, String> {
|
||||
let mut client_config = ClientConfig::default();
|
||||
|
||||
client_config.data_dir = get_data_dir(cli_args);
|
||||
let mut client_config = ClientConfig {
|
||||
data_dir: get_data_dir(cli_args),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// If necessary, remove any existing database and configuration
|
||||
if client_config.data_dir.exists() && cli_args.is_present("purge-db") {
|
||||
|
@ -202,9 +202,7 @@ impl<E: EthSpec> HotColdDB<E, LevelDB<E>, LevelDB<E>> {
|
||||
}
|
||||
|
||||
/// Return an iterator over the state roots of all temporary states.
|
||||
pub fn iter_temporary_state_roots<'a>(
|
||||
&'a self,
|
||||
) -> impl Iterator<Item = Result<Hash256, Error>> + 'a {
|
||||
pub fn iter_temporary_state_roots(&self) -> impl Iterator<Item = Result<Hash256, Error>> + '_ {
|
||||
let column = DBColumn::BeaconStateTemporary;
|
||||
let start_key =
|
||||
BytesKey::from_vec(get_key_for_col(column.into(), Hash256::zero().as_bytes()));
|
||||
|
@ -12,7 +12,6 @@ use slog::{error, Logger};
|
||||
use std::collections::HashSet;
|
||||
use std::fs::{self, OpenOptions};
|
||||
use std::io;
|
||||
use std::iter::FromIterator;
|
||||
use std::path::{Path, PathBuf};
|
||||
use types::PublicKey;
|
||||
use validator_dir::VOTING_KEYSTORE_FILE;
|
||||
@ -154,13 +153,16 @@ impl ValidatorDefinitions {
|
||||
recursively_find_voting_keystores(validators_dir, &mut keystore_paths)
|
||||
.map_err(Error::UnableToSearchForKeystores)?;
|
||||
|
||||
let known_paths: HashSet<&PathBuf> =
|
||||
HashSet::from_iter(self.0.iter().map(|def| match &def.signing_definition {
|
||||
let known_paths: HashSet<&PathBuf> = self
|
||||
.0
|
||||
.iter()
|
||||
.map(|def| match &def.signing_definition {
|
||||
SigningDefinition::LocalKeystore {
|
||||
voting_keystore_path,
|
||||
..
|
||||
} => voting_keystore_path,
|
||||
}));
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut new_defs = keystore_paths
|
||||
.into_iter()
|
||||
|
@ -449,7 +449,7 @@ impl<T: FromStr> TryFrom<String> for QueryVec<T> {
|
||||
type Error = String;
|
||||
|
||||
fn try_from(string: String) -> Result<Self, Self::Error> {
|
||||
if string == "" {
|
||||
if string.is_empty() {
|
||||
return Ok(Self(vec![]));
|
||||
}
|
||||
|
||||
|
@ -24,15 +24,15 @@ pub fn u64_leaf_count(len: usize) -> usize {
|
||||
(len + vals_per_chunk - 1) / vals_per_chunk
|
||||
}
|
||||
|
||||
pub fn hash256_iter<'a>(
|
||||
values: &'a [Hash256],
|
||||
) -> impl Iterator<Item = [u8; BYTES_PER_CHUNK]> + ExactSizeIterator + 'a {
|
||||
pub fn hash256_iter(
|
||||
values: &[Hash256],
|
||||
) -> impl Iterator<Item = [u8; BYTES_PER_CHUNK]> + ExactSizeIterator + '_ {
|
||||
values.iter().copied().map(Hash256::to_fixed_bytes)
|
||||
}
|
||||
|
||||
pub fn u64_iter<'a>(
|
||||
values: &'a [u64],
|
||||
) -> impl Iterator<Item = [u8; BYTES_PER_CHUNK]> + ExactSizeIterator + 'a {
|
||||
pub fn u64_iter(
|
||||
values: &[u64],
|
||||
) -> impl Iterator<Item = [u8; BYTES_PER_CHUNK]> + ExactSizeIterator + '_ {
|
||||
let type_size = size_of::<u64>();
|
||||
let vals_per_chunk = BYTES_PER_CHUNK / type_size;
|
||||
values.chunks(vals_per_chunk).map(move |xs| {
|
||||
|
@ -4,7 +4,6 @@ use crate::{
|
||||
};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use std::collections::HashMap;
|
||||
use std::iter::FromIterator;
|
||||
use types::{Epoch, Hash256};
|
||||
|
||||
#[derive(Encode, Decode)]
|
||||
@ -41,7 +40,7 @@ impl From<SszContainer> for ProtoArrayForkChoice {
|
||||
justified_epoch: from.justified_epoch,
|
||||
finalized_epoch: from.finalized_epoch,
|
||||
nodes: from.nodes,
|
||||
indices: HashMap::from_iter(from.indices.into_iter()),
|
||||
indices: from.indices.into_iter().collect::<HashMap<_, _>>(),
|
||||
};
|
||||
|
||||
Self {
|
||||
|
@ -14,9 +14,7 @@ use syn::{parse_macro_input, DeriveInput};
|
||||
///
|
||||
/// # Panics
|
||||
/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time.
|
||||
fn get_serializable_named_field_idents<'a>(
|
||||
struct_data: &'a syn::DataStruct,
|
||||
) -> Vec<&'a syn::Ident> {
|
||||
fn get_serializable_named_field_idents(struct_data: &syn::DataStruct) -> Vec<&syn::Ident> {
|
||||
struct_data
|
||||
.fields
|
||||
.iter()
|
||||
@ -35,7 +33,7 @@ fn get_serializable_named_field_idents<'a>(
|
||||
|
||||
/// Returns a Vec of `syn::Type` for each named field in the struct, whilst filtering out fields
|
||||
/// that should not be serialized.
|
||||
fn get_serializable_field_types<'a>(struct_data: &'a syn::DataStruct) -> Vec<&'a syn::Type> {
|
||||
fn get_serializable_field_types(struct_data: &syn::DataStruct) -> Vec<&syn::Type> {
|
||||
struct_data
|
||||
.fields
|
||||
.iter()
|
||||
|
@ -44,10 +44,10 @@ impl From<BeaconStateError> for Error {
|
||||
}
|
||||
|
||||
/// Helper function to get a public key from a `state`.
|
||||
pub fn get_pubkey_from_state<'a, T>(
|
||||
state: &'a BeaconState<T>,
|
||||
pub fn get_pubkey_from_state<T>(
|
||||
state: &BeaconState<T>,
|
||||
validator_index: usize,
|
||||
) -> Option<Cow<'a, PublicKey>>
|
||||
) -> Option<Cow<PublicKey>>
|
||||
where
|
||||
T: EthSpec,
|
||||
{
|
||||
|
@ -391,7 +391,7 @@ fn invalid_attestation_wrong_justified_checkpoint() {
|
||||
root: Hash256::zero(),
|
||||
},
|
||||
attestation: Checkpoint {
|
||||
epoch: Epoch::from(0 as u64),
|
||||
epoch: Epoch::from(0_u64),
|
||||
root: Hash256::zero(),
|
||||
},
|
||||
is_current: true,
|
||||
@ -877,7 +877,7 @@ fn invalid_proposer_slashing_proposal_epoch_mismatch() {
|
||||
Err(BlockProcessingError::ProposerSlashingInvalid {
|
||||
index: 0,
|
||||
reason: ProposerSlashingInvalid::ProposalSlotMismatch(
|
||||
Slot::from(0 as u64),
|
||||
Slot::from(0_u64),
|
||||
Slot::from(128 as u64)
|
||||
)
|
||||
})
|
||||
|
@ -10,7 +10,7 @@ use syn::{parse_macro_input, Attribute, DeriveInput, Meta};
|
||||
///
|
||||
/// # Panics
|
||||
/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time.
|
||||
fn get_hashable_fields<'a>(struct_data: &'a syn::DataStruct) -> Vec<&'a syn::Ident> {
|
||||
fn get_hashable_fields(struct_data: &syn::DataStruct) -> Vec<&syn::Ident> {
|
||||
get_hashable_fields_and_their_caches(struct_data)
|
||||
.into_iter()
|
||||
.map(|(ident, _, _)| ident)
|
||||
@ -18,9 +18,9 @@ fn get_hashable_fields<'a>(struct_data: &'a syn::DataStruct) -> Vec<&'a syn::Ide
|
||||
}
|
||||
|
||||
/// Return a Vec of the hashable fields of a struct, and each field's type and optional cache field.
|
||||
fn get_hashable_fields_and_their_caches<'a>(
|
||||
struct_data: &'a syn::DataStruct,
|
||||
) -> Vec<(&'a syn::Ident, syn::Type, Option<syn::Ident>)> {
|
||||
fn get_hashable_fields_and_their_caches(
|
||||
struct_data: &syn::DataStruct,
|
||||
) -> Vec<(&syn::Ident, syn::Type, Option<syn::Ident>)> {
|
||||
struct_data
|
||||
.fields
|
||||
.iter()
|
||||
|
@ -68,7 +68,7 @@ impl<T: EthSpec> BeaconBlock<T> {
|
||||
};
|
||||
let indexed_attestation: IndexedAttestation<T> = IndexedAttestation {
|
||||
attesting_indices: VariableList::new(vec![
|
||||
0 as u64;
|
||||
0_u64;
|
||||
T::MaxValidatorsPerCommittee::to_usize()
|
||||
])
|
||||
.unwrap(),
|
||||
|
@ -69,7 +69,7 @@ impl TestingAttestationDataBuilder {
|
||||
}
|
||||
AttestationTestTask::WrongJustifiedCheckpoint => {
|
||||
source = Checkpoint {
|
||||
epoch: Epoch::from(0 as u64),
|
||||
epoch: Epoch::from(0_u64),
|
||||
root: Hash256::zero(),
|
||||
}
|
||||
}
|
||||
|
@ -35,6 +35,11 @@ pub const MOD_R_L: usize = 48;
|
||||
#[zeroize(drop)]
|
||||
pub struct DerivedKey(ZeroizeHash);
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
EmptySeed,
|
||||
}
|
||||
|
||||
impl DerivedKey {
|
||||
/// Instantiates `Self` from some secret seed bytes.
|
||||
///
|
||||
@ -42,10 +47,10 @@ impl DerivedKey {
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// Returns `Err(())` if `seed.is_empty()`, otherwise always returns `Ok(self)`.
|
||||
pub fn from_seed(seed: &[u8]) -> Result<Self, ()> {
|
||||
/// Returns `Err(Error::EmptySeed)` if `seed.is_empty()`, otherwise always returns `Ok(self)`.
|
||||
pub fn from_seed(seed: &[u8]) -> Result<Self, Error> {
|
||||
if seed.is_empty() {
|
||||
Err(())
|
||||
Err(Error::EmptySeed)
|
||||
} else {
|
||||
Ok(Self(derive_master_sk(seed)))
|
||||
}
|
||||
|
@ -8,4 +8,5 @@ mod secret_bytes;
|
||||
|
||||
pub use bls::ZeroizeHash;
|
||||
pub use derived_key::DerivedKey;
|
||||
pub use derived_key::Error as DerivedKeyError;
|
||||
pub use plain_text::PlainText;
|
||||
|
@ -5,17 +5,16 @@ use crate::{
|
||||
},
|
||||
KeyType, ValidatorPath,
|
||||
};
|
||||
pub use bip39::{Mnemonic, Seed as Bip39Seed};
|
||||
pub use eth2_key_derivation::{DerivedKey, DerivedKeyError};
|
||||
use eth2_keystore::{
|
||||
decrypt, default_kdf, encrypt, keypair_from_secret, Keystore, KeystoreBuilder, IV_SIZE,
|
||||
SALT_SIZE,
|
||||
};
|
||||
pub use eth2_keystore::{Error as KeystoreError, PlainText};
|
||||
use rand::prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::io::{Read, Write};
|
||||
|
||||
pub use bip39::{Mnemonic, Seed as Bip39Seed};
|
||||
pub use eth2_key_derivation::DerivedKey;
|
||||
pub use eth2_keystore::{Error as KeystoreError, PlainText};
|
||||
pub use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
@ -24,6 +23,7 @@ pub enum Error {
|
||||
PathExhausted,
|
||||
EmptyPassword,
|
||||
EmptySeed,
|
||||
InvalidNextAccount { old: u32, new: u32 },
|
||||
}
|
||||
|
||||
impl From<KeystoreError> for Error {
|
||||
@ -32,6 +32,14 @@ impl From<KeystoreError> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DerivedKeyError> for Error {
|
||||
fn from(e: DerivedKeyError) -> Error {
|
||||
match e {
|
||||
DerivedKeyError::EmptySeed => Error::EmptySeed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Contains the two keystores required for an eth2 validator.
|
||||
pub struct ValidatorKeystores {
|
||||
/// Contains the secret key used for signing every-day consensus messages (blocks,
|
||||
@ -222,12 +230,15 @@ impl Wallet {
|
||||
///
|
||||
/// Returns `Err(())` if `nextaccount` is less than `self.nextaccount()` without mutating
|
||||
/// `self`. This is to protect against duplicate validator generation.
|
||||
pub fn set_nextaccount(&mut self, nextaccount: u32) -> Result<(), ()> {
|
||||
pub fn set_nextaccount(&mut self, nextaccount: u32) -> Result<(), Error> {
|
||||
if nextaccount >= self.nextaccount() {
|
||||
self.json.nextaccount = nextaccount;
|
||||
Ok(())
|
||||
} else {
|
||||
Err(())
|
||||
Err(Error::InvalidNextAccount {
|
||||
old: self.json.nextaccount,
|
||||
new: nextaccount,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -295,7 +306,7 @@ pub fn recover_validator_secret(
|
||||
) -> Result<(PlainText, ValidatorPath), Error> {
|
||||
let path = ValidatorPath::new(index, key_type);
|
||||
let secret = wallet.decrypt_seed(wallet_password)?;
|
||||
let master = DerivedKey::from_seed(secret.as_bytes()).map_err(|()| Error::EmptyPassword)?;
|
||||
let master = DerivedKey::from_seed(secret.as_bytes()).map_err(Error::from)?;
|
||||
|
||||
let destination = path.iter_nodes().fold(master, |dk, i| dk.child(*i));
|
||||
|
||||
@ -311,7 +322,7 @@ pub fn recover_validator_secret_from_mnemonic(
|
||||
key_type: KeyType,
|
||||
) -> Result<(PlainText, ValidatorPath), Error> {
|
||||
let path = ValidatorPath::new(index, key_type);
|
||||
let master = DerivedKey::from_seed(secret).map_err(|()| Error::EmptyPassword)?;
|
||||
let master = DerivedKey::from_seed(secret).map_err(Error::from)?;
|
||||
|
||||
let destination = path.iter_nodes().fold(master, |dk, i| dk.child(*i));
|
||||
|
||||
|
@ -25,10 +25,12 @@ pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
|
||||
));
|
||||
}
|
||||
|
||||
let mut config = NetworkConfig::default();
|
||||
config.enr_address = Some(ip);
|
||||
config.enr_udp_port = Some(udp_port);
|
||||
config.enr_tcp_port = Some(tcp_port);
|
||||
let config = NetworkConfig {
|
||||
enr_address: Some(ip),
|
||||
enr_udp_port: Some(udp_port),
|
||||
enr_tcp_port: Some(tcp_port),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let local_keypair = Keypair::generate_secp256k1();
|
||||
let enr_key = CombinedKey::from_libp2p(&local_keypair)?;
|
||||
|
@ -1,7 +1,6 @@
|
||||
use slog::Logger;
|
||||
use sloggers::Build;
|
||||
use std::collections::HashSet;
|
||||
use std::iter::FromIterator;
|
||||
use types::{
|
||||
AggregateSignature, AttestationData, AttesterSlashing, BeaconBlockHeader, Checkpoint, Epoch,
|
||||
Hash256, IndexedAttestation, MainnetEthSpec, Signature, SignedBeaconBlockHeader, Slot,
|
||||
@ -59,8 +58,14 @@ pub fn hashset_intersection(
|
||||
attestation_1_indices: &[u64],
|
||||
attestation_2_indices: &[u64],
|
||||
) -> HashSet<u64> {
|
||||
&HashSet::from_iter(attestation_1_indices.iter().copied())
|
||||
& &HashSet::from_iter(attestation_2_indices.iter().copied())
|
||||
&attestation_1_indices
|
||||
.iter()
|
||||
.copied()
|
||||
.collect::<HashSet<u64>>()
|
||||
& &attestation_2_indices
|
||||
.iter()
|
||||
.copied()
|
||||
.collect::<HashSet<u64>>()
|
||||
}
|
||||
|
||||
pub fn slashed_validators_from_slashings(slashings: &HashSet<AttesterSlashing<E>>) -> HashSet<u64> {
|
||||
|
@ -102,10 +102,7 @@ pub fn get_block<E: EthSpec>(seed: u64) -> BeaconBlock<E> {
|
||||
signature: Signature::empty(),
|
||||
};
|
||||
let indexed_attestation: IndexedAttestation<E> = IndexedAttestation {
|
||||
attesting_indices: VariableList::new(vec![
|
||||
0 as u64;
|
||||
E::MaxValidatorsPerCommittee::to_usize()
|
||||
])
|
||||
attesting_indices: VariableList::new(vec![0_u64; E::MaxValidatorsPerCommittee::to_usize()])
|
||||
.unwrap(),
|
||||
data: AttestationData::default(),
|
||||
signature: AggregateSignature::empty(),
|
||||
|
@ -1,6 +1,5 @@
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::collections::HashSet;
|
||||
use std::iter::FromIterator;
|
||||
use types::{Epoch, Hash256, PublicKey, Slot};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
|
||||
@ -60,8 +59,8 @@ impl Interchange {
|
||||
|
||||
/// Do these two `Interchange`s contain the same data (ignoring ordering)?
|
||||
pub fn equiv(&self, other: &Self) -> bool {
|
||||
let self_set = HashSet::<_>::from_iter(self.data.iter());
|
||||
let other_set = HashSet::<_>::from_iter(other.data.iter());
|
||||
let self_set = self.data.iter().collect::<HashSet<_>>();
|
||||
let other_set = other.data.iter().collect::<HashSet<_>>();
|
||||
self.metadata == other.metadata && self_set == other_set
|
||||
}
|
||||
|
||||
|
@ -43,8 +43,11 @@ pub async fn create_validators<P: AsRef<Path>, T: 'static + SlotClock, E: EthSpe
|
||||
})?;
|
||||
|
||||
if let Some(nextaccount) = key_derivation_path_offset {
|
||||
wallet.set_nextaccount(nextaccount).map_err(|()| {
|
||||
warp_utils::reject::custom_server_error("unable to set wallet nextaccount".to_string())
|
||||
wallet.set_nextaccount(nextaccount).map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"unable to set wallet nextaccount: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user