Fix clippy warnings (#1385)
## Issue Addressed NA ## Proposed Changes Fixes most clippy warnings and ignores the rest of them, see issue #1388.
This commit is contained in:
parent
ba10c80633
commit
23a8f31f83
2
Makefile
2
Makefile
@ -53,7 +53,7 @@ test-full: cargo-fmt test-release test-debug test-ef
|
||||
# Lints the code for bad style and potentially unsafe arithmetic using Clippy.
|
||||
# Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
|
||||
lint:
|
||||
cargo clippy --all -- -A clippy::all --D clippy::perf --D clippy::correctness
|
||||
cargo clippy --all -- -D warnings
|
||||
|
||||
# Runs the makefile in the `ef_tests` repo.
|
||||
#
|
||||
|
@ -112,7 +112,7 @@ fn upgrade_keypair<P: AsRef<Path>>(
|
||||
let validator_dir = validator_dir.as_ref();
|
||||
let secrets_dir = secrets_dir.as_ref();
|
||||
|
||||
let keypair: Keypair = load_unencrypted_keypair(validator_dir.join(input_filename))?.into();
|
||||
let keypair: Keypair = load_unencrypted_keypair(validator_dir.join(input_filename))?;
|
||||
|
||||
let password = rand::thread_rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
@ -136,7 +136,7 @@ fn upgrade_keypair<P: AsRef<Path>>(
|
||||
.to_json_writer(&mut file)
|
||||
.map_err(|e| format!("Cannot write keystore to {:?}: {:?}", keystore_path, e))?;
|
||||
|
||||
let password_path = secrets_dir.join(format!("{}", keypair.pk.as_hex_string()));
|
||||
let password_path = secrets_dir.join(keypair.pk.as_hex_string());
|
||||
|
||||
if password_path.exists() {
|
||||
return Err(format!("{:?} already exists", password_path));
|
||||
|
@ -1,6 +1,5 @@
|
||||
use crate::VALIDATOR_DIR_FLAG;
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use clap_utils;
|
||||
use deposit_contract::DEPOSIT_GAS;
|
||||
use environment::Environment;
|
||||
use futures::compat::Future01CompatExt;
|
||||
|
@ -28,11 +28,9 @@ pub fn cli_run<T: EthSpec>(matches: &ArgMatches, env: Environment<T>) -> Result<
|
||||
match matches.subcommand() {
|
||||
(create::CMD, Some(matches)) => create::cli_run::<T>(matches, env, base_wallet_dir),
|
||||
(deposit::CMD, Some(matches)) => deposit::cli_run::<T>(matches, env),
|
||||
(unknown, _) => {
|
||||
return Err(format!(
|
||||
"{} does not have a {} command. See --help",
|
||||
CMD, unknown
|
||||
));
|
||||
}
|
||||
(unknown, _) => Err(format!(
|
||||
"{} does not have a {} command. See --help",
|
||||
CMD, unknown
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
@ -117,24 +117,24 @@ pub fn cli_run(matches: &ArgMatches, base_dir: PathBuf) -> Result<(), String> {
|
||||
}
|
||||
|
||||
println!("Your wallet's 12-word BIP-39 mnemonic is:");
|
||||
println!("");
|
||||
println!();
|
||||
println!("\t{}", mnemonic.phrase());
|
||||
println!("");
|
||||
println!();
|
||||
println!("This mnemonic can be used to fully restore your wallet, should ");
|
||||
println!("you lose the JSON file or your password. ");
|
||||
println!("");
|
||||
println!();
|
||||
println!("It is very important that you DO NOT SHARE this mnemonic as it will ");
|
||||
println!("reveal the private keys of all validators and keys generated with ");
|
||||
println!("this wallet. That would be catastrophic.");
|
||||
println!("");
|
||||
println!();
|
||||
println!("It is also important to store a backup of this mnemonic so you can ");
|
||||
println!("recover your private keys in the case of data loss. Writing it on ");
|
||||
println!("a piece of paper and storing it in a safe place would be prudent.");
|
||||
println!("");
|
||||
println!();
|
||||
println!("Your wallet's UUID is:");
|
||||
println!("");
|
||||
println!();
|
||||
println!("\t{}", wallet.wallet().uuid());
|
||||
println!("");
|
||||
println!();
|
||||
println!("You do not need to backup your UUID or keep it secret.");
|
||||
|
||||
Ok(())
|
||||
|
@ -30,11 +30,9 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
||||
match matches.subcommand() {
|
||||
(create::CMD, Some(matches)) => create::cli_run(matches, base_dir),
|
||||
(list::CMD, Some(_)) => list::cli_run(base_dir),
|
||||
(unknown, _) => {
|
||||
return Err(format!(
|
||||
"{} does not have a {} command. See --help",
|
||||
CMD, unknown
|
||||
));
|
||||
}
|
||||
(unknown, _) => Err(format!(
|
||||
"{} does not have a {} command. See --help",
|
||||
CMD, unknown
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
@ -287,7 +287,7 @@ impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> {
|
||||
if chain
|
||||
.observed_aggregators
|
||||
.observe_validator(&attestation, aggregator_index as usize)
|
||||
.map_err(|e| BeaconChainError::from(e))?
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
return Err(Error::PriorAttestationKnown {
|
||||
validator_index: aggregator_index,
|
||||
@ -370,7 +370,7 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
||||
if chain
|
||||
.observed_attesters
|
||||
.validator_has_been_observed(&attestation, validator_index as usize)
|
||||
.map_err(|e| BeaconChainError::from(e))?
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
return Err(Error::PriorAttestationKnown {
|
||||
validator_index,
|
||||
@ -390,7 +390,7 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
||||
if chain
|
||||
.observed_attesters
|
||||
.observe_validator(&attestation, validator_index as usize)
|
||||
.map_err(|e| BeaconChainError::from(e))?
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
return Err(Error::PriorAttestationKnown {
|
||||
validator_index,
|
||||
@ -504,7 +504,7 @@ pub fn verify_attestation_signature<T: BeaconChainTypes>(
|
||||
.canonical_head
|
||||
.try_read_for(HEAD_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| BeaconChainError::CanonicalHeadLockTimeout)
|
||||
.map(|head| head.beacon_state.fork.clone())?;
|
||||
.map(|head| head.beacon_state.fork)?;
|
||||
|
||||
let signature_set = indexed_attestation_signature_set_from_pubkeys(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
@ -559,7 +559,7 @@ pub fn verify_signed_aggregate_signatures<T: BeaconChainTypes>(
|
||||
.canonical_head
|
||||
.try_read_for(HEAD_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| BeaconChainError::CanonicalHeadLockTimeout)
|
||||
.map(|head| head.beacon_state.fork.clone())?;
|
||||
.map(|head| head.beacon_state.fork)?;
|
||||
|
||||
let signature_sets = vec![
|
||||
signed_aggregate_selection_proof_signature_set(
|
||||
@ -694,7 +694,7 @@ where
|
||||
// The state roots are not useful for the shuffling, so there's no need to
|
||||
// compute them.
|
||||
per_slot_processing(&mut state, Some(Hash256::zero()), &chain.spec)
|
||||
.map_err(|e| BeaconChainError::from(e))?;
|
||||
.map_err(BeaconChainError::from)?;
|
||||
}
|
||||
|
||||
metrics::stop_timer(state_skip_timer);
|
||||
@ -706,11 +706,11 @@ where
|
||||
|
||||
state
|
||||
.build_committee_cache(relative_epoch, &chain.spec)
|
||||
.map_err(|e| BeaconChainError::from(e))?;
|
||||
.map_err(BeaconChainError::from)?;
|
||||
|
||||
let committee_cache = state
|
||||
.committee_cache(relative_epoch)
|
||||
.map_err(|e| BeaconChainError::from(e))?;
|
||||
.map_err(BeaconChainError::from)?;
|
||||
|
||||
chain
|
||||
.shuffling_cache
|
||||
|
@ -199,6 +199,8 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
||||
pub genesis_block_root: Hash256,
|
||||
/// The root of the list of genesis validators, used during syncing.
|
||||
pub genesis_validators_root: Hash256,
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
/// A state-machine that is updated with information from the network and chooses a canonical
|
||||
/// head block.
|
||||
pub fork_choice: RwLock<
|
||||
@ -493,9 +495,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
slot: head.beacon_block.slot(),
|
||||
block_root: head.beacon_block_root,
|
||||
state_root: head.beacon_state_root,
|
||||
current_justified_checkpoint: head.beacon_state.current_justified_checkpoint.clone(),
|
||||
finalized_checkpoint: head.beacon_state.finalized_checkpoint.clone(),
|
||||
fork: head.beacon_state.fork.clone(),
|
||||
current_justified_checkpoint: head.beacon_state.current_justified_checkpoint,
|
||||
finalized_checkpoint: head.beacon_state.finalized_checkpoint,
|
||||
fork: head.beacon_state.fork,
|
||||
genesis_time: head.beacon_state.genesis_time,
|
||||
genesis_validators_root: head.beacon_state.genesis_validators_root,
|
||||
})
|
||||
@ -853,8 +855,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
data: AttestationData {
|
||||
slot,
|
||||
index,
|
||||
beacon_block_root: beacon_block_root,
|
||||
source: state.current_justified_checkpoint.clone(),
|
||||
beacon_block_root,
|
||||
source: state.current_justified_checkpoint,
|
||||
target: Checkpoint {
|
||||
epoch,
|
||||
root: target_root,
|
||||
@ -986,8 +988,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.try_read_for(HEAD_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| Error::CanonicalHeadLockTimeout)?
|
||||
.beacon_state
|
||||
.fork
|
||||
.clone();
|
||||
.fork;
|
||||
|
||||
self.op_pool
|
||||
.insert_attestation(
|
||||
@ -1348,7 +1349,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
};
|
||||
|
||||
// Verify and import the block.
|
||||
let result = match import_block(unverified_block) {
|
||||
match import_block(unverified_block) {
|
||||
// The block was successfully verified and imported. Yay.
|
||||
Ok(block_root) => {
|
||||
trace!(
|
||||
@ -1362,7 +1363,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES);
|
||||
|
||||
let _ = self.event_handler.register(EventKind::BeaconBlockImported {
|
||||
block_root: block_root,
|
||||
block_root,
|
||||
block: Box::new(block),
|
||||
});
|
||||
|
||||
@ -1399,9 +1400,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
Err(other)
|
||||
}
|
||||
};
|
||||
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
/// Accepts a fully-verified block and imports it into the chain without performing any
|
||||
@ -1642,7 +1641,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
body: BeaconBlockBody {
|
||||
randao_reveal,
|
||||
eth1_data,
|
||||
graffiti: self.graffiti.clone(),
|
||||
graffiti: self.graffiti,
|
||||
proposer_slashings: proposer_slashings.into(),
|
||||
attester_slashings: attester_slashings.into(),
|
||||
attestations: self
|
||||
@ -1718,7 +1717,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.snapshot_cache
|
||||
.try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
||||
.and_then(|snapshot_cache| snapshot_cache.get_cloned(beacon_block_root))
|
||||
.map::<Result<_, Error>, _>(|snapshot| Ok(snapshot))
|
||||
.map::<Result<_, Error>, _>(Ok)
|
||||
.unwrap_or_else(|| {
|
||||
let beacon_block = self
|
||||
.get_block(&beacon_block_root)?
|
||||
@ -2010,8 +2009,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
let mut finalized_blocks: HashSet<Hash256> = HashSet::new();
|
||||
|
||||
let genesis_block_hash = Hash256::zero();
|
||||
write!(output, "digraph beacon {{\n").unwrap();
|
||||
write!(output, "\t_{:?}[label=\"genesis\"];\n", genesis_block_hash).unwrap();
|
||||
writeln!(output, "digraph beacon {{").unwrap();
|
||||
writeln!(output, "\t_{:?}[label=\"genesis\"];", genesis_block_hash).unwrap();
|
||||
|
||||
// Canonical head needs to be processed first as otherwise finalized blocks aren't detected
|
||||
// properly.
|
||||
@ -2045,36 +2044,36 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
}
|
||||
|
||||
if block_hash == canonical_head_hash {
|
||||
write!(
|
||||
writeln!(
|
||||
output,
|
||||
"\t_{:?}[label=\"{} ({})\" shape=box3d];\n",
|
||||
"\t_{:?}[label=\"{} ({})\" shape=box3d];",
|
||||
block_hash,
|
||||
block_hash,
|
||||
signed_beacon_block.slot()
|
||||
)
|
||||
.unwrap();
|
||||
} else if finalized_blocks.contains(&block_hash) {
|
||||
write!(
|
||||
writeln!(
|
||||
output,
|
||||
"\t_{:?}[label=\"{} ({})\" shape=Msquare];\n",
|
||||
"\t_{:?}[label=\"{} ({})\" shape=Msquare];",
|
||||
block_hash,
|
||||
block_hash,
|
||||
signed_beacon_block.slot()
|
||||
)
|
||||
.unwrap();
|
||||
} else {
|
||||
write!(
|
||||
writeln!(
|
||||
output,
|
||||
"\t_{:?}[label=\"{} ({})\" shape=box];\n",
|
||||
"\t_{:?}[label=\"{} ({})\" shape=box];",
|
||||
block_hash,
|
||||
block_hash,
|
||||
signed_beacon_block.slot()
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
write!(
|
||||
writeln!(
|
||||
output,
|
||||
"\t_{:?} -> _{:?};\n",
|
||||
"\t_{:?} -> _{:?};",
|
||||
block_hash,
|
||||
signed_beacon_block.parent_root()
|
||||
)
|
||||
@ -2082,7 +2081,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
}
|
||||
}
|
||||
|
||||
write!(output, "}}\n").unwrap();
|
||||
writeln!(output, "}}").unwrap();
|
||||
}
|
||||
|
||||
// Used for debugging
|
||||
@ -2135,7 +2134,7 @@ impl From<BeaconStateError> for Error {
|
||||
}
|
||||
|
||||
impl ChainSegmentResult {
|
||||
pub fn to_block_error(self) -> Result<(), BlockError> {
|
||||
pub fn into_block_error(self) -> Result<(), BlockError> {
|
||||
match self {
|
||||
ChainSegmentResult::Failed { error, .. } => Err(error),
|
||||
ChainSegmentResult::Successful { .. } => Ok(()),
|
||||
|
@ -584,8 +584,9 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> {
|
||||
state_root
|
||||
};
|
||||
|
||||
per_slot_processing(&mut state, Some(state_root), &chain.spec)?
|
||||
.map(|summary| summaries.push(summary));
|
||||
if let Some(summary) = per_slot_processing(&mut state, Some(state_root), &chain.spec)? {
|
||||
summaries.push(summary)
|
||||
}
|
||||
}
|
||||
|
||||
expose_participation_metrics(&summaries);
|
||||
|
@ -93,6 +93,7 @@ where
|
||||
///
|
||||
/// See the tests for an example of a complete working example.
|
||||
pub struct BeaconChainBuilder<T: BeaconChainTypes> {
|
||||
#[allow(clippy::type_complexity)]
|
||||
store: Option<Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>>,
|
||||
store_migrator: Option<T::StoreMigrator>,
|
||||
canonical_head: Option<BeaconSnapshot<T::EthSpec>>,
|
||||
@ -461,13 +462,10 @@ where
|
||||
.pubkey_cache_path
|
||||
.ok_or_else(|| "Cannot build without a pubkey cache path".to_string())?;
|
||||
|
||||
let validator_pubkey_cache = self
|
||||
.validator_pubkey_cache
|
||||
.map(|cache| Ok(cache))
|
||||
.unwrap_or_else(|| {
|
||||
ValidatorPubkeyCache::new(&canonical_head.beacon_state, pubkey_cache_path)
|
||||
.map_err(|e| format!("Unable to init validator pubkey cache: {:?}", e))
|
||||
})?;
|
||||
let validator_pubkey_cache = self.validator_pubkey_cache.map(Ok).unwrap_or_else(|| {
|
||||
ValidatorPubkeyCache::new(&canonical_head.beacon_state, pubkey_cache_path)
|
||||
.map_err(|e| format!("Unable to init validator pubkey cache: {:?}", e))
|
||||
})?;
|
||||
|
||||
let persisted_fork_choice = store
|
||||
.get_item::<PersistedForkChoice>(&Hash256::from_slice(&FORK_CHOICE_DB_KEY))
|
||||
|
@ -331,7 +331,7 @@ impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
|
||||
//
|
||||
// Here we choose the eth1_data corresponding to the latest block in our voting window.
|
||||
// If no votes exist, choose `state.eth1_data` as default vote.
|
||||
let default_vote = votes_to_consider
|
||||
votes_to_consider
|
||||
.iter()
|
||||
.max_by(|(_, x), (_, y)| x.cmp(y))
|
||||
.map(|vote| {
|
||||
@ -355,8 +355,7 @@ impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
|
||||
);
|
||||
metrics::inc_counter(&metrics::DEFAULT_ETH1_VOTES);
|
||||
vote
|
||||
});
|
||||
default_vote
|
||||
})
|
||||
};
|
||||
|
||||
debug!(
|
||||
|
@ -37,7 +37,7 @@ impl<T: EthSpec> ServerSentEvents<T> {
|
||||
let arc = Arc::new(mutex);
|
||||
let this = Self {
|
||||
head_changed_queue: arc.clone(),
|
||||
log: log,
|
||||
log,
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
(this, arc)
|
||||
@ -52,7 +52,10 @@ impl<T: EthSpec> EventHandler<T> for ServerSentEvents<T> {
|
||||
..
|
||||
} => {
|
||||
let mut guard = self.head_changed_queue.lock();
|
||||
if let Err(_) = guard.try_broadcast(current_head_beacon_block_root.into()) {
|
||||
if guard
|
||||
.try_broadcast(current_head_beacon_block_root.into())
|
||||
.is_err()
|
||||
{
|
||||
error!(
|
||||
self.log,
|
||||
"Head change streaming queue full";
|
||||
@ -75,14 +78,15 @@ pub struct TeeEventHandler<E: EthSpec> {
|
||||
}
|
||||
|
||||
impl<E: EthSpec> TeeEventHandler<E> {
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn new(
|
||||
log: Logger,
|
||||
websockets_handler: WebSocketSender<E>,
|
||||
) -> Result<(Self, Arc<Mutex<Bus<SignedBeaconBlockHash>>>), String> {
|
||||
let (sse_handler, bus) = ServerSentEvents::new(log);
|
||||
let result = Self {
|
||||
websockets_handler: websockets_handler,
|
||||
sse_handler: sse_handler,
|
||||
websockets_handler,
|
||||
sse_handler,
|
||||
};
|
||||
Ok((result, bus))
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ pub trait Migrate<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>:
|
||||
|
||||
let batch: Vec<StoreOp<E>> = abandoned_blocks
|
||||
.into_iter()
|
||||
.map(|block_hash| StoreOp::DeleteBlock(block_hash))
|
||||
.map(StoreOp::DeleteBlock)
|
||||
.chain(
|
||||
abandoned_states
|
||||
.into_iter()
|
||||
@ -296,6 +296,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
finality_distance > max_finality_distance
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
/// Spawn a new child thread to run the migration process.
|
||||
///
|
||||
/// Return a channel handle for sending new finalized states to the thread.
|
||||
|
@ -88,9 +88,9 @@ impl Item for EpochBitfield {
|
||||
.unwrap_or_else(|| {
|
||||
self.bitfield
|
||||
.resize(validator_index.saturating_add(1), false);
|
||||
self.bitfield
|
||||
.get_mut(validator_index)
|
||||
.map(|mut bit| *bit = true);
|
||||
if let Some(mut bit) = self.bitfield.get_mut(validator_index) {
|
||||
*bit = true;
|
||||
}
|
||||
false
|
||||
})
|
||||
}
|
||||
|
@ -393,6 +393,7 @@ where
|
||||
(block_root.into(), new_state)
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
/// `add_block()` repeated `num_blocks` times.
|
||||
pub fn add_blocks(
|
||||
&self,
|
||||
@ -422,6 +423,7 @@ where
|
||||
(blocks, states, slot, head_hash, state)
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
/// A wrapper on `add_blocks()` to avoid passing enums explicitly.
|
||||
pub fn add_canonical_chain_blocks(
|
||||
&self,
|
||||
@ -446,6 +448,7 @@ where
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
/// A wrapper on `add_blocks()` to avoid passing enums explicitly.
|
||||
pub fn add_stray_blocks(
|
||||
&self,
|
||||
|
@ -138,8 +138,8 @@ struct ValidatorPubkeyCacheFile(File);
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Error {
|
||||
IoError(io::Error),
|
||||
SszError(DecodeError),
|
||||
Io(io::Error),
|
||||
Ssz(DecodeError),
|
||||
/// The file read from disk does not have a contiguous list of validator public keys. The file
|
||||
/// has become corrupted.
|
||||
InconsistentIndex {
|
||||
@ -162,7 +162,7 @@ impl ValidatorPubkeyCacheFile {
|
||||
.write(true)
|
||||
.open(path)
|
||||
.map(Self)
|
||||
.map_err(Error::IoError)
|
||||
.map_err(Error::Io)
|
||||
}
|
||||
|
||||
/// Opens an existing file for reading and writing.
|
||||
@ -174,7 +174,7 @@ impl ValidatorPubkeyCacheFile {
|
||||
.append(true)
|
||||
.open(path)
|
||||
.map(Self)
|
||||
.map_err(Error::IoError)
|
||||
.map_err(Error::Io)
|
||||
}
|
||||
|
||||
/// Append a public key to file.
|
||||
@ -188,10 +188,9 @@ impl ValidatorPubkeyCacheFile {
|
||||
/// Creates a `ValidatorPubkeyCache` by reading and parsing the underlying file.
|
||||
pub fn into_cache(mut self) -> Result<ValidatorPubkeyCache, Error> {
|
||||
let mut bytes = vec![];
|
||||
self.0.read_to_end(&mut bytes).map_err(Error::IoError)?;
|
||||
self.0.read_to_end(&mut bytes).map_err(Error::Io)?;
|
||||
|
||||
let list: Vec<(usize, PublicKeyBytes)> =
|
||||
Vec::from_ssz_bytes(&bytes).map_err(Error::SszError)?;
|
||||
let list: Vec<(usize, PublicKeyBytes)> = Vec::from_ssz_bytes(&bytes).map_err(Error::Ssz)?;
|
||||
|
||||
let mut last = None;
|
||||
let mut pubkeys = Vec::with_capacity(list.len());
|
||||
@ -201,7 +200,7 @@ impl ValidatorPubkeyCacheFile {
|
||||
let expected = last.map(|n| n + 1);
|
||||
if expected.map_or(true, |expected| index == expected) {
|
||||
last = Some(index);
|
||||
pubkeys.push((&pubkey).try_into().map_err(Error::SszError)?);
|
||||
pubkeys.push((&pubkey).try_into().map_err(Error::Ssz)?);
|
||||
indices.insert(pubkey, index);
|
||||
} else {
|
||||
return Err(Error::InconsistentIndex {
|
||||
@ -225,7 +224,7 @@ fn append_to_file(file: &mut File, index: usize, pubkey: &PublicKeyBytes) -> Res
|
||||
index.ssz_append(&mut line);
|
||||
pubkey.ssz_append(&mut line);
|
||||
|
||||
file.write_all(&mut line).map_err(Error::IoError)
|
||||
file.write_all(&line).map_err(Error::Io)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -126,13 +126,13 @@ fn chain_segment_full_segment() {
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(vec![])
|
||||
.to_block_error()
|
||||
.into_block_error()
|
||||
.expect("should import empty chain segment");
|
||||
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.to_block_error()
|
||||
.into_block_error()
|
||||
.expect("should import chain segment");
|
||||
|
||||
harness.chain.fork_choice().expect("should run fork choice");
|
||||
@ -163,7 +163,7 @@ fn chain_segment_varying_chunk_size() {
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(chunk.to_vec())
|
||||
.to_block_error()
|
||||
.into_block_error()
|
||||
.expect(&format!(
|
||||
"should import chain segment of len {}",
|
||||
chunk_size
|
||||
@ -203,7 +203,7 @@ fn chain_segment_non_linear_parent_roots() {
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.to_block_error(),
|
||||
.into_block_error(),
|
||||
Err(BlockError::NonLinearParentRoots)
|
||||
),
|
||||
"should not import chain with missing parent"
|
||||
@ -220,7 +220,7 @@ fn chain_segment_non_linear_parent_roots() {
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.to_block_error(),
|
||||
.into_block_error(),
|
||||
Err(BlockError::NonLinearParentRoots)
|
||||
),
|
||||
"should not import chain with a broken parent root link"
|
||||
@ -247,7 +247,7 @@ fn chain_segment_non_linear_slots() {
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.to_block_error(),
|
||||
.into_block_error(),
|
||||
Err(BlockError::NonLinearSlots)
|
||||
),
|
||||
"should not import chain with a parent that has a lower slot than its child"
|
||||
@ -265,7 +265,7 @@ fn chain_segment_non_linear_slots() {
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.to_block_error(),
|
||||
.into_block_error(),
|
||||
Err(BlockError::NonLinearSlots)
|
||||
),
|
||||
"should not import chain with a parent that has an equal slot to its child"
|
||||
@ -292,7 +292,7 @@ fn invalid_signatures() {
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(ancestor_blocks)
|
||||
.to_block_error()
|
||||
.into_block_error()
|
||||
.expect("should import all blocks prior to the one being tested");
|
||||
|
||||
// For the given snapshots, test the following:
|
||||
@ -312,7 +312,10 @@ fn invalid_signatures() {
|
||||
// Ensure the block will be rejected if imported in a chain segment.
|
||||
assert!(
|
||||
matches!(
|
||||
harness.chain.process_chain_segment(blocks).to_block_error(),
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks)
|
||||
.into_block_error(),
|
||||
Err(BlockError::InvalidSignature)
|
||||
),
|
||||
"should not import chain segment with an invalid {} signature",
|
||||
@ -351,7 +354,10 @@ fn invalid_signatures() {
|
||||
// Ensure the block will be rejected if imported in a chain segment.
|
||||
assert!(
|
||||
matches!(
|
||||
harness.chain.process_chain_segment(blocks).to_block_error(),
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks)
|
||||
.into_block_error(),
|
||||
Err(BlockError::InvalidSignature)
|
||||
),
|
||||
"should not import chain segment with an invalid gossip signature",
|
||||
@ -489,7 +495,10 @@ fn invalid_signatures() {
|
||||
.collect();
|
||||
assert!(
|
||||
!matches!(
|
||||
harness.chain.process_chain_segment(blocks).to_block_error(),
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks)
|
||||
.into_block_error(),
|
||||
Err(BlockError::InvalidSignature)
|
||||
),
|
||||
"should not throw an invalid signature error for a bad deposit signature"
|
||||
|
@ -50,6 +50,7 @@ pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 7_000;
|
||||
/// `self.memory_store(..)` has been called.
|
||||
pub struct ClientBuilder<T: BeaconChainTypes> {
|
||||
slot_clock: Option<T::SlotClock>,
|
||||
#[allow(clippy::type_complexity)]
|
||||
store: Option<Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>>,
|
||||
store_migrator: Option<T::StoreMigrator>,
|
||||
runtime_context: Option<RuntimeContext<T::EthSpec>>,
|
||||
@ -134,7 +135,7 @@ where
|
||||
let eth_spec_instance = self.eth_spec_instance.clone();
|
||||
let data_dir = config.data_dir.clone();
|
||||
let disabled_forks = config.disabled_forks.clone();
|
||||
let graffiti = config.graffiti.clone();
|
||||
let graffiti = config.graffiti;
|
||||
|
||||
let store =
|
||||
store.ok_or_else(|| "beacon_chain_start_method requires a store".to_string())?;
|
||||
@ -452,6 +453,7 @@ where
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
{
|
||||
#[allow(clippy::type_complexity)]
|
||||
/// Specifies that the `BeaconChain` should publish events using the WebSocket server.
|
||||
pub fn tee_event_handler(
|
||||
mut self,
|
||||
|
@ -1,5 +1,3 @@
|
||||
use network;
|
||||
|
||||
use error_chain::error_chain;
|
||||
|
||||
error_chain! {
|
||||
|
@ -7,7 +7,6 @@ use slog::{debug, error, info, warn};
|
||||
use slot_clock::SlotClock;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use time;
|
||||
use tokio::time::delay_for;
|
||||
use types::{EthSpec, Slot};
|
||||
|
||||
@ -64,7 +63,7 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
|
||||
}
|
||||
|
||||
// Perform post-genesis logging.
|
||||
while let Some(_) = interval.next().await {
|
||||
while interval.next().await.is_some() {
|
||||
let connected_peer_count = network.connected_peers();
|
||||
let sync_state = network.sync_state();
|
||||
|
||||
@ -131,34 +130,32 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
|
||||
"speed" => sync_speed_pretty(speedo.slots_per_second()),
|
||||
"est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)),
|
||||
);
|
||||
} else {
|
||||
if sync_state.is_synced() {
|
||||
let block_info = if current_slot > head_slot {
|
||||
format!(" … empty")
|
||||
} else {
|
||||
format!("{}", head_root)
|
||||
};
|
||||
info!(
|
||||
log,
|
||||
"Synced";
|
||||
"peers" => peer_count_pretty(connected_peer_count),
|
||||
"finalized_root" => format!("{}", finalized_root),
|
||||
"finalized_epoch" => finalized_epoch,
|
||||
"epoch" => current_epoch,
|
||||
"block" => block_info,
|
||||
"slot" => current_slot,
|
||||
);
|
||||
} else if sync_state.is_synced() {
|
||||
let block_info = if current_slot > head_slot {
|
||||
" … empty".to_string()
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Searching for peers";
|
||||
"peers" => peer_count_pretty(connected_peer_count),
|
||||
"finalized_root" => format!("{}", finalized_root),
|
||||
"finalized_epoch" => finalized_epoch,
|
||||
"head_slot" => head_slot,
|
||||
"current_slot" => current_slot,
|
||||
);
|
||||
}
|
||||
head_root.to_string()
|
||||
};
|
||||
info!(
|
||||
log,
|
||||
"Synced";
|
||||
"peers" => peer_count_pretty(connected_peer_count),
|
||||
"finalized_root" => format!("{}", finalized_root),
|
||||
"finalized_epoch" => finalized_epoch,
|
||||
"epoch" => current_epoch,
|
||||
"block" => block_info,
|
||||
"slot" => current_slot,
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Searching for peers";
|
||||
"peers" => peer_count_pretty(connected_peer_count),
|
||||
"finalized_root" => format!("{}", finalized_root),
|
||||
"finalized_epoch" => finalized_epoch,
|
||||
"head_slot" => head_slot,
|
||||
"current_slot" => current_slot,
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok::<(), ()>(())
|
||||
|
@ -434,7 +434,7 @@ impl Service {
|
||||
for (block_range, log_chunk) in logs.iter() {
|
||||
let mut cache = self.deposits().write();
|
||||
log_chunk
|
||||
.into_iter()
|
||||
.iter()
|
||||
.map(|raw_log| {
|
||||
DepositLog::from_log(&raw_log, self.inner.spec()).map_err(|error| {
|
||||
Error::FailedToParseDepositLog {
|
||||
|
@ -188,10 +188,10 @@ impl<TSpec: EthSpec> ProtocolsHandler for DelegatingHandler<TSpec> {
|
||||
// Identify
|
||||
(
|
||||
EitherOutput::Second(EitherOutput::Second(protocol)),
|
||||
EitherOutput::Second(EitherOutput::Second(info)),
|
||||
EitherOutput::Second(EitherOutput::Second(())),
|
||||
) => self
|
||||
.identify_handler
|
||||
.inject_fully_negotiated_outbound(protocol, info),
|
||||
.inject_fully_negotiated_outbound(protocol, ()),
|
||||
// Reaching here means we got a protocol and info for different behaviours
|
||||
_ => unreachable!("output and protocol don't match"),
|
||||
}
|
||||
@ -201,7 +201,7 @@ impl<TSpec: EthSpec> ProtocolsHandler for DelegatingHandler<TSpec> {
|
||||
match event {
|
||||
DelegateIn::Gossipsub(ev) => self.gossip_handler.inject_event(ev),
|
||||
DelegateIn::RPC(ev) => self.rpc_handler.inject_event(ev),
|
||||
DelegateIn::Identify(ev) => self.identify_handler.inject_event(ev),
|
||||
DelegateIn::Identify(()) => self.identify_handler.inject_event(()),
|
||||
}
|
||||
}
|
||||
|
||||
@ -263,23 +263,23 @@ impl<TSpec: EthSpec> ProtocolsHandler for DelegatingHandler<TSpec> {
|
||||
}
|
||||
},
|
||||
// Identify
|
||||
EitherOutput::Second(EitherOutput::Second(info)) => match error {
|
||||
EitherOutput::Second(EitherOutput::Second(())) => match error {
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => {
|
||||
self.identify_handler.inject_dial_upgrade_error(
|
||||
info,
|
||||
(),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)),
|
||||
)
|
||||
}
|
||||
ProtocolsHandlerUpgrErr::Timer => self
|
||||
.identify_handler
|
||||
.inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timer),
|
||||
.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timer),
|
||||
ProtocolsHandlerUpgrErr::Timeout => self
|
||||
.identify_handler
|
||||
.inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timeout),
|
||||
.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::B(
|
||||
EitherError::B(err),
|
||||
))) => self.identify_handler.inject_dial_upgrade_error(
|
||||
info,
|
||||
(),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)),
|
||||
),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(_)) => {
|
||||
@ -296,6 +296,7 @@ impl<TSpec: EthSpec> ProtocolsHandler for DelegatingHandler<TSpec> {
|
||||
.max(self.identify_handler.connection_keep_alive())
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn poll(
|
||||
&mut self,
|
||||
cx: &mut Context,
|
||||
@ -350,10 +351,10 @@ impl<TSpec: EthSpec> ProtocolsHandler for DelegatingHandler<TSpec> {
|
||||
Poll::Ready(ProtocolsHandlerEvent::Close(event)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Identify(event)));
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info }) => {
|
||||
Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () }) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
|
||||
protocol: protocol.map_upgrade(|u| EitherUpgrade::B(EitherUpgrade::B(u))),
|
||||
info: EitherOutput::Second(EitherOutput::Second(info)),
|
||||
info: EitherOutput::Second(EitherOutput::Second(())),
|
||||
});
|
||||
}
|
||||
Poll::Pending => (),
|
||||
|
@ -100,6 +100,7 @@ impl<TSpec: EthSpec> ProtocolsHandler for BehaviourHandler<TSpec> {
|
||||
KeepAlive::Yes
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn poll(
|
||||
&mut self,
|
||||
cx: &mut Context,
|
||||
|
@ -397,7 +397,7 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
||||
if self.seen_gossip_messages.put(id.clone(), ()).is_none() {
|
||||
match PubsubMessage::decode(&gs_msg.topics, &gs_msg.data) {
|
||||
Err(e) => {
|
||||
debug!(self.log, "Could not decode gossipsub message"; "error" => format!("{}", e))
|
||||
debug!(self.log, "Could not decode gossipsub message"; "error" => e)
|
||||
}
|
||||
Ok(msg) => {
|
||||
// if this message isn't a duplicate, notify the network
|
||||
@ -412,7 +412,7 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
||||
} else {
|
||||
match PubsubMessage::<TSpec>::decode(&gs_msg.topics, &gs_msg.data) {
|
||||
Err(e) => {
|
||||
debug!(self.log, "Could not decode gossipsub message"; "error" => format!("{}", e))
|
||||
debug!(self.log, "Could not decode gossipsub message"; "error" => e)
|
||||
}
|
||||
Ok(msg) => {
|
||||
debug!(self.log, "A duplicate gossipsub message was received"; "message_source" => format!("{}", gs_msg.source), "propagated_peer" => format!("{}",propagation_source), "message" => format!("{}", msg));
|
||||
|
@ -17,9 +17,9 @@ use std::str::FromStr;
|
||||
use types::{EnrForkId, EthSpec};
|
||||
|
||||
/// The ENR field specifying the fork id.
|
||||
pub const ETH2_ENR_KEY: &'static str = "eth2";
|
||||
pub const ETH2_ENR_KEY: &str = "eth2";
|
||||
/// The ENR field specifying the subnet bitfield.
|
||||
pub const BITFIELD_ENR_KEY: &'static str = "attnets";
|
||||
pub const BITFIELD_ENR_KEY: &str = "attnets";
|
||||
|
||||
/// Extension trait for ENR's within Eth2.
|
||||
pub trait Eth2Enr {
|
||||
|
@ -197,7 +197,7 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result<discv5::enr::NodeId, Strin
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(&uncompressed_key_bytes);
|
||||
hasher.finalize(&mut output);
|
||||
return Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"));
|
||||
Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"))
|
||||
}
|
||||
PublicKey::Ed25519(pk) => {
|
||||
let uncompressed_key_bytes = pk.encode();
|
||||
@ -205,9 +205,9 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result<discv5::enr::NodeId, Strin
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(&uncompressed_key_bytes);
|
||||
hasher.finalize(&mut output);
|
||||
return Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"));
|
||||
Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"))
|
||||
}
|
||||
_ => return Err("Unsupported public key".into()),
|
||||
_ => Err("Unsupported public key".into()),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -194,10 +194,8 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
// Update the PeerDB state.
|
||||
if let Some(peer_id) = ban_peer.take() {
|
||||
self.network_globals.peers.write().ban(&peer_id);
|
||||
} else {
|
||||
if let Some(peer_id) = unban_peer.take() {
|
||||
self.network_globals.peers.write().unban(&peer_id);
|
||||
}
|
||||
} else if let Some(peer_id) = unban_peer.take() {
|
||||
self.network_globals.peers.write().unban(&peer_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,7 @@ use serde::Serialize;
|
||||
use std::time::Instant;
|
||||
|
||||
lazy_static! {
|
||||
static ref HALFLIFE_DECAY: f64 = -2.0f64.ln() / SCORE_HALFLIFE;
|
||||
static ref HALFLIFE_DECAY: f64 = -(2.0f64.ln()) / SCORE_HALFLIFE;
|
||||
}
|
||||
|
||||
/// The default score for new peers.
|
||||
|
@ -196,10 +196,8 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyInboundCodec<TSpec> {
|
||||
Err(e) => match e.kind() {
|
||||
// Haven't received enough bytes to decode yet
|
||||
// TODO: check if this is the only Error variant where we return `Ok(None)`
|
||||
ErrorKind::UnexpectedEof => {
|
||||
return Ok(None);
|
||||
}
|
||||
_ => return Err(e).map_err(RPCError::from),
|
||||
ErrorKind::UnexpectedEof => Ok(None),
|
||||
_ => Err(e).map_err(RPCError::from),
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -368,10 +366,8 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyOutboundCodec<TSpec> {
|
||||
Err(e) => match e.kind() {
|
||||
// Haven't received enough bytes to decode yet
|
||||
// TODO: check if this is the only Error variant where we return `Ok(None)`
|
||||
ErrorKind::UnexpectedEof => {
|
||||
return Ok(None);
|
||||
}
|
||||
_ => return Err(e).map_err(RPCError::from),
|
||||
ErrorKind::UnexpectedEof => Ok(None),
|
||||
_ => Err(e).map_err(RPCError::from),
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -412,10 +408,8 @@ impl<TSpec: EthSpec> OutboundCodec<RPCRequest<TSpec>> for SSZSnappyOutboundCodec
|
||||
Err(e) => match e.kind() {
|
||||
// Haven't received enough bytes to decode yet
|
||||
// TODO: check if this is the only Error variant where we return `Ok(None)`
|
||||
ErrorKind::UnexpectedEof => {
|
||||
return Ok(None);
|
||||
}
|
||||
_ => return Err(e).map_err(RPCError::from),
|
||||
ErrorKind::UnexpectedEof => Ok(None),
|
||||
_ => Err(e).map_err(RPCError::from),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -321,16 +321,13 @@ where
|
||||
};
|
||||
|
||||
// If the response we are sending is an error, report back for handling
|
||||
match response {
|
||||
RPCCodedResponse::Error(ref code, ref reason) => {
|
||||
let err = HandlerErr::Inbound {
|
||||
id: inbound_id,
|
||||
proto: inbound_info.protocol,
|
||||
error: RPCError::ErrorResponse(*code, reason.to_string()),
|
||||
};
|
||||
self.pending_errors.push(err);
|
||||
}
|
||||
_ => {} // not an error, continue.
|
||||
if let RPCCodedResponse::Error(ref code, ref reason) = response {
|
||||
let err = HandlerErr::Inbound {
|
||||
id: inbound_id,
|
||||
proto: inbound_info.protocol,
|
||||
error: RPCError::ErrorResponse(*code, reason.to_string()),
|
||||
};
|
||||
self.pending_errors.push(err);
|
||||
}
|
||||
|
||||
if matches!(self.state, HandlerState::Deactivated) {
|
||||
@ -661,13 +658,13 @@ where
|
||||
// if we can't close right now, put the substream back and try again later
|
||||
Poll::Pending => info.state = InboundState::Idle(substream),
|
||||
Poll::Ready(res) => {
|
||||
substreams_to_remove.push(id.clone());
|
||||
substreams_to_remove.push(*id);
|
||||
if let Some(ref delay_key) = info.delay_key {
|
||||
self.inbound_substreams_delay.remove(delay_key);
|
||||
}
|
||||
if let Err(error) = res {
|
||||
self.pending_errors.push(HandlerErr::Inbound {
|
||||
id: id.clone(),
|
||||
id: *id,
|
||||
error,
|
||||
proto: info.protocol,
|
||||
});
|
||||
@ -697,7 +694,7 @@ where
|
||||
})
|
||||
}
|
||||
if remove {
|
||||
substreams_to_remove.push(id.clone());
|
||||
substreams_to_remove.push(*id);
|
||||
if let Some(ref delay_key) = info.delay_key {
|
||||
self.inbound_substreams_delay.remove(delay_key);
|
||||
}
|
||||
@ -808,7 +805,7 @@ where
|
||||
//trace!(self.log, "RPC Response - stream closed by remote");
|
||||
// drop the stream
|
||||
let delay_key = &entry.get().delay_key;
|
||||
let request_id = *&entry.get().req_id;
|
||||
let request_id = entry.get().req_id;
|
||||
self.outbound_substreams_delay.remove(delay_key);
|
||||
entry.remove_entry();
|
||||
self.update_keep_alive();
|
||||
|
@ -182,7 +182,7 @@ impl ssz::Decode for GoodbyeReason {
|
||||
}
|
||||
|
||||
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
|
||||
u64::from_ssz_bytes(bytes).and_then(|n| Ok(n.into()))
|
||||
u64::from_ssz_bytes(bytes).map(|n| n.into())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,22 +2,25 @@
|
||||
//! given time. It schedules subscriptions to shard subnets, requests peer discoveries and
|
||||
//! determines whether attestations should be aggregated and/or passed to the beacon node.
|
||||
|
||||
use crate::metrics;
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use eth2_libp2p::{types::GossipKind, NetworkGlobals};
|
||||
use futures::prelude::*;
|
||||
use hashset_delay::HashSetDelay;
|
||||
use rand::seq::SliceRandom;
|
||||
use rest_types::ValidatorSubscription;
|
||||
use slog::{crit, debug, error, o, trace, warn};
|
||||
use slot_clock::SlotClock;
|
||||
use std::collections::VecDeque;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use futures::prelude::*;
|
||||
use rand::seq::SliceRandom;
|
||||
use slog::{crit, debug, error, o, trace, warn};
|
||||
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use eth2_libp2p::{types::GossipKind, NetworkGlobals};
|
||||
use hashset_delay::HashSetDelay;
|
||||
use rest_types::ValidatorSubscription;
|
||||
use slot_clock::SlotClock;
|
||||
use types::{Attestation, EthSpec, Slot, SubnetId};
|
||||
|
||||
use crate::metrics;
|
||||
|
||||
mod tests;
|
||||
|
||||
/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the
|
||||
@ -276,7 +279,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
attestation: &Attestation<T::EthSpec>,
|
||||
) -> bool {
|
||||
let exact_subnet = ExactSubnet {
|
||||
subnet_id: subnet.clone(),
|
||||
subnet_id: subnet,
|
||||
slot: attestation.data.slot,
|
||||
};
|
||||
self.aggregate_validators_on_subnet.contains(&exact_subnet)
|
||||
@ -360,35 +363,33 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
let mut is_duplicate = false;
|
||||
|
||||
self.events.iter_mut().for_each(|event| {
|
||||
match event {
|
||||
AttServiceMessage::DiscoverPeers {
|
||||
subnet_id: other_subnet_id,
|
||||
min_ttl: other_min_ttl,
|
||||
} => {
|
||||
if subnet_id == *other_subnet_id {
|
||||
let other_min_ttl_clone = other_min_ttl.clone();
|
||||
match (min_ttl, other_min_ttl_clone) {
|
||||
(Some(min_ttl_instant), Some(other_min_ttl_instant)) =>
|
||||
// only update the min_ttl if it is greater than the existing min_ttl and a DURATION_DIFFERENCE padding
|
||||
if let AttServiceMessage::DiscoverPeers {
|
||||
subnet_id: other_subnet_id,
|
||||
min_ttl: other_min_ttl,
|
||||
} = event
|
||||
{
|
||||
if subnet_id == *other_subnet_id {
|
||||
let other_min_ttl_clone = *other_min_ttl;
|
||||
match (min_ttl, other_min_ttl_clone) {
|
||||
(Some(min_ttl_instant), Some(other_min_ttl_instant)) =>
|
||||
// only update the min_ttl if it is greater than the existing min_ttl and a DURATION_DIFFERENCE padding
|
||||
{
|
||||
if min_ttl_instant.saturating_duration_since(other_min_ttl_instant)
|
||||
> DURATION_DIFFERENCE
|
||||
{
|
||||
if min_ttl_instant.saturating_duration_since(other_min_ttl_instant)
|
||||
> DURATION_DIFFERENCE
|
||||
{
|
||||
*other_min_ttl = min_ttl;
|
||||
}
|
||||
*other_min_ttl = min_ttl;
|
||||
}
|
||||
(None, Some(_)) => {} // Keep the current one as it has an actual min_ttl
|
||||
(Some(min_ttl), None) => {
|
||||
// Update the request to include a min_ttl.
|
||||
*other_min_ttl = Some(min_ttl);
|
||||
}
|
||||
(None, None) => {} // Duplicate message, do nothing.
|
||||
}
|
||||
is_duplicate = true;
|
||||
return;
|
||||
(None, Some(_)) => {} // Keep the current one as it has an actual min_ttl
|
||||
(Some(min_ttl), None) => {
|
||||
// Update the request to include a min_ttl.
|
||||
*other_min_ttl = Some(min_ttl);
|
||||
}
|
||||
(None, None) => {} // Duplicate message, do nothing.
|
||||
}
|
||||
is_duplicate = true;
|
||||
return;
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
});
|
||||
if !is_duplicate {
|
||||
@ -542,8 +543,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
.gossipsub_subscriptions
|
||||
.read()
|
||||
.iter()
|
||||
.find(|topic| topic.kind() == topic_kind)
|
||||
.is_some();
|
||||
.any(|topic| topic.kind() == topic_kind);
|
||||
|
||||
if !already_subscribed {
|
||||
// send a discovery request and a subscription
|
||||
@ -735,7 +735,7 @@ impl<T: BeaconChainTypes> Stream for AttestationService<T> {
|
||||
match self.discover_peers.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(exact_subnet))) => self.handle_discover_peers(exact_subnet),
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
error!(self.log, "Failed to check for peer discovery requests"; "error"=> format ! ("{}", e));
|
||||
error!(self.log, "Failed to check for peer discovery requests"; "error"=> e);
|
||||
}
|
||||
Poll::Ready(None) | Poll::Pending => {}
|
||||
}
|
||||
@ -744,7 +744,7 @@ impl<T: BeaconChainTypes> Stream for AttestationService<T> {
|
||||
match self.subscriptions.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(exact_subnet))) => self.handle_subscriptions(exact_subnet),
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
error!(self.log, "Failed to check for subnet subscription times"; "error"=> format!("{}", e));
|
||||
error!(self.log, "Failed to check for subnet subscription times"; "error"=> e);
|
||||
}
|
||||
Poll::Ready(None) | Poll::Pending => {}
|
||||
}
|
||||
@ -753,7 +753,7 @@ impl<T: BeaconChainTypes> Stream for AttestationService<T> {
|
||||
match self.unsubscriptions.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(exact_subnet))) => self.handle_unsubscriptions(exact_subnet),
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> format!("{}", e));
|
||||
error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> e);
|
||||
}
|
||||
Poll::Ready(None) | Poll::Pending => {}
|
||||
}
|
||||
@ -762,7 +762,7 @@ impl<T: BeaconChainTypes> Stream for AttestationService<T> {
|
||||
match self.random_subnets.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(subnet))) => self.handle_random_subnet_expiry(subnet),
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
error!(self.log, "Failed to check for random subnet cycles"; "error"=> format!("{}", e));
|
||||
error!(self.log, "Failed to check for random subnet cycles"; "error"=> e);
|
||||
}
|
||||
Poll::Ready(None) | Poll::Pending => {}
|
||||
}
|
||||
@ -773,13 +773,13 @@ impl<T: BeaconChainTypes> Stream for AttestationService<T> {
|
||||
let _ = self.handle_known_validator_expiry();
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
error!(self.log, "Failed to check for random subnet cycles"; "error"=> format!("{}", e));
|
||||
error!(self.log, "Failed to check for random subnet cycles"; "error"=> e);
|
||||
}
|
||||
Poll::Ready(None) | Poll::Pending => {}
|
||||
}
|
||||
// poll to remove entries on expiration, no need to act on expiration events
|
||||
if let Poll::Ready(Some(Err(e))) = self.aggregate_validators_on_subnet.poll_next_unpin(cx) {
|
||||
error!(self.log, "Failed to check for aggregate validator on subnet expirations"; "error"=> format!("{}", e));
|
||||
error!(self.log, "Failed to check for aggregate validator on subnet expirations"; "error"=> e);
|
||||
}
|
||||
|
||||
// process any generated events
|
||||
|
@ -1,5 +1,4 @@
|
||||
use eth2_libp2p::Enr;
|
||||
use rlp;
|
||||
use std::sync::Arc;
|
||||
use store::{DBColumn, Error as StoreError, HotColdDB, ItemStore, StoreItem};
|
||||
use types::{EthSpec, Hash256};
|
||||
|
@ -92,6 +92,7 @@ pub struct NetworkService<T: BeaconChainTypes> {
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> NetworkService<T> {
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn start(
|
||||
beacon_chain: Arc<BeaconChain<T>>,
|
||||
config: &NetworkConfig,
|
||||
|
@ -315,7 +315,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
if let Some(block_request) = self.single_block_lookups.get_mut(&request_id) {
|
||||
// update the state of the lookup indicating a block was received from the peer
|
||||
block_request.block_returned = true;
|
||||
single_block_hash = Some(block_request.hash.clone());
|
||||
single_block_hash = Some(block_request.hash);
|
||||
}
|
||||
if let Some(block_hash) = single_block_hash {
|
||||
self.single_block_lookup_response(peer_id, block, block_hash);
|
||||
@ -498,8 +498,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
if self
|
||||
.single_block_lookups
|
||||
.values()
|
||||
.find(|single_block_request| single_block_request.hash == block_hash)
|
||||
.is_some()
|
||||
.any(|single_block_request| single_block_request.hash == block_hash)
|
||||
{
|
||||
return;
|
||||
}
|
||||
@ -598,6 +597,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
// These functions are called in the main poll function to transition the state of the sync
|
||||
// manager
|
||||
|
||||
#[allow(clippy::needless_return)]
|
||||
/// A new block has been received for a parent lookup query, process it.
|
||||
fn process_parent_request(&mut self, mut parent_request: ParentRequests<T::EthSpec>) {
|
||||
// verify the last added block is the parent of the last requested block
|
||||
|
@ -102,12 +102,7 @@ impl PeerSyncInfo {
|
||||
/// than SLOT_IMPORT_TOLERANCE of our current head.
|
||||
/// 2) The peer has a greater finalized slot/epoch than our own.
|
||||
fn is_advanced_peer(&self, remote: &PeerSyncInfo) -> bool {
|
||||
if remote.head_slot.sub(self.head_slot).as_usize() > SLOT_IMPORT_TOLERANCE
|
||||
remote.head_slot.sub(self.head_slot).as_usize() > SLOT_IMPORT_TOLERANCE
|
||||
|| self.finalized_epoch < remote.finalized_epoch
|
||||
{
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -104,6 +104,7 @@ pub enum ChainSyncingState {
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
id: u64,
|
||||
start_epoch: Epoch,
|
||||
@ -257,7 +258,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
/// Sends a batch to the batch processor.
|
||||
fn process_batch(&mut self, mut batch: Batch<T::EthSpec>) {
|
||||
let downloaded_blocks = std::mem::replace(&mut batch.downloaded_blocks, Vec::new());
|
||||
let process_id = ProcessId::RangeBatchId(self.id.clone(), batch.id.clone());
|
||||
let process_id = ProcessId::RangeBatchId(self.id, batch.id);
|
||||
self.current_processing_batch = Some(batch);
|
||||
spawn_block_processor(
|
||||
Arc::downgrade(&self.chain.clone()),
|
||||
|
@ -557,9 +557,7 @@ pub async fn attester_slashing<T: BeaconChainTypes>(
|
||||
format!("Error while importing attester slashing: {:?}", e)
|
||||
})
|
||||
} else {
|
||||
Err(format!(
|
||||
"Attester slashing only covers already slashed indices"
|
||||
))
|
||||
Err("Attester slashing only covers already slashed indices".to_string())
|
||||
}
|
||||
})
|
||||
.map_err(ApiError::BadRequest)
|
||||
|
@ -2,7 +2,6 @@ use crate::{ApiError, ApiResult, NetworkChannel};
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes, StateSkipConfig};
|
||||
use bls::PublicKeyBytes;
|
||||
use eth2_libp2p::PubsubMessage;
|
||||
use hex;
|
||||
use http::header;
|
||||
use hyper::{Body, Request};
|
||||
use itertools::process_results;
|
||||
|
@ -5,7 +5,6 @@ use hyper::{Body, Request};
|
||||
use rest_types::{Health, SyncingResponse, SyncingStatus};
|
||||
use std::sync::Arc;
|
||||
use types::{EthSpec, Slot};
|
||||
use version;
|
||||
|
||||
/// Read the version string from the current Lighthouse build.
|
||||
pub fn get_version(req: Request<Body>) -> ApiResult {
|
||||
@ -43,7 +42,7 @@ pub fn syncing<T: EthSpec>(
|
||||
}
|
||||
|
||||
pub fn get_health(req: Request<Body>) -> ApiResult {
|
||||
let health = Health::observe().map_err(|e| ApiError::ServerError(e))?;
|
||||
let health = Health::observe().map_err(ApiError::ServerError)?;
|
||||
|
||||
ResponseBuilder::new(&req)?.body_no_ssz(&health)
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ use clap_utils::BAD_TESTNET_DIR_MESSAGE;
|
||||
use client::{config::DEFAULT_DATADIR, ClientConfig, ClientGenesis};
|
||||
use eth2_libp2p::{Enr, Multiaddr};
|
||||
use eth2_testnet_config::Eth2TestnetConfig;
|
||||
use hyper;
|
||||
use slog::{crit, info, Logger};
|
||||
use ssz::Encode;
|
||||
use std::fs;
|
||||
@ -68,7 +67,7 @@ pub fn get_config<E: EthSpec>(
|
||||
let mut log_dir = client_config.data_dir.clone();
|
||||
// remove /beacon from the end
|
||||
log_dir.pop();
|
||||
info!(log, "Data directory initialised"; "datadir" => format!("{}",log_dir.into_os_string().into_string().expect("Datadir should be a valid os string")));
|
||||
info!(log, "Data directory initialised"; "datadir" => log_dir.into_os_string().into_string().expect("Datadir should be a valid os string"));
|
||||
|
||||
client_config.spec_constants = spec_constants.into();
|
||||
client_config.testnet_dir = get_testnet_dir(cli_args);
|
||||
@ -181,7 +180,7 @@ pub fn get_config<E: EthSpec>(
|
||||
resolved_addrs
|
||||
.next()
|
||||
.map(|a| a.ip())
|
||||
.ok_or_else(|| format!("Resolved dns addr contains no entries"))?
|
||||
.ok_or_else(|| "Resolved dns addr contains no entries".to_string())?
|
||||
} else {
|
||||
return Err(format!("Failed to parse enr-address: {}", enr_address));
|
||||
};
|
||||
@ -406,7 +405,7 @@ pub fn get_eth2_testnet_config<E: EthSpec>(
|
||||
} else {
|
||||
Eth2TestnetConfig::hard_coded()
|
||||
.map_err(|e| format!("Error parsing hardcoded testnet: {}", e))?
|
||||
.ok_or_else(|| format!("{}", BAD_TESTNET_DIR_MESSAGE))
|
||||
.ok_or_else(|| BAD_TESTNET_DIR_MESSAGE.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -126,7 +126,7 @@ impl<E: EthSpec> ProductionBeaconNode<E> {
|
||||
|
||||
let builder = builder
|
||||
.build_beacon_chain()?
|
||||
.network(&mut client_config.network)?
|
||||
.network(&client_config.network)?
|
||||
.notifier()?;
|
||||
|
||||
let builder = if client_config.rest_api.enabled {
|
||||
|
@ -381,6 +381,7 @@ pub fn store_updated_vector<F: Field<E>, E: EthSpec, S: KeyValueStore<E>>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn store_range<F, E, S, I>(
|
||||
_: F,
|
||||
range: I,
|
||||
|
@ -78,7 +78,7 @@ impl SimpleForwardsBlockRootsIterator {
|
||||
.collect::<Vec<_>>()
|
||||
},
|
||||
)?;
|
||||
Ok(Self { values: values })
|
||||
Ok(Self { values })
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -77,7 +77,7 @@ impl<E: EthSpec> KeyValueStore<E> for LevelDB<E> {
|
||||
}
|
||||
|
||||
fn sync(&self) -> Result<(), Error> {
|
||||
self.put_bytes_sync("sync", "sync".as_bytes(), "sync".as_bytes())
|
||||
self.put_bytes_sync("sync", b"sync", b"sync")
|
||||
}
|
||||
|
||||
/// Retrieve some bytes in `column` with `key`.
|
||||
@ -108,7 +108,7 @@ impl<E: EthSpec> KeyValueStore<E> for LevelDB<E> {
|
||||
self.db
|
||||
.get(self.read_options(), BytesKey::from_vec(column_key))
|
||||
.map_err(Into::into)
|
||||
.and_then(|val| Ok(val.is_some()))
|
||||
.map(|val| val.is_some())
|
||||
}
|
||||
|
||||
/// Removes `key` from `column`.
|
||||
|
@ -75,7 +75,7 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
||||
genesis_time: s.genesis_time,
|
||||
genesis_validators_root: s.genesis_validators_root,
|
||||
slot: s.slot,
|
||||
fork: s.fork.clone(),
|
||||
fork: s.fork,
|
||||
|
||||
// History
|
||||
latest_block_header: s.latest_block_header.clone(),
|
||||
@ -107,9 +107,9 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
||||
|
||||
// Finality
|
||||
justification_bits: s.justification_bits.clone(),
|
||||
previous_justified_checkpoint: s.previous_justified_checkpoint.clone(),
|
||||
current_justified_checkpoint: s.current_justified_checkpoint.clone(),
|
||||
finalized_checkpoint: s.finalized_checkpoint.clone(),
|
||||
previous_justified_checkpoint: s.previous_justified_checkpoint,
|
||||
current_justified_checkpoint: s.current_justified_checkpoint,
|
||||
finalized_checkpoint: s.finalized_checkpoint,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -21,13 +21,13 @@ impl TryFrom<&ArgMatches<'_>> for BootNodeConfig {
|
||||
.value_of("listen-address")
|
||||
.expect("required parameter")
|
||||
.parse::<IpAddr>()
|
||||
.map_err(|_| format!("Invalid listening address"))?;
|
||||
.map_err(|_| "Invalid listening address".to_string())?;
|
||||
|
||||
let listen_port = matches
|
||||
.value_of("port")
|
||||
.expect("required parameter")
|
||||
.parse::<u16>()
|
||||
.map_err(|_| format!("Invalid listening port"))?;
|
||||
.map_err(|_| "Invalid listening port".to_string())?;
|
||||
|
||||
let boot_nodes = {
|
||||
if let Some(boot_nodes) = matches.value_of("boot-nodes") {
|
||||
@ -43,7 +43,7 @@ impl TryFrom<&ArgMatches<'_>> for BootNodeConfig {
|
||||
let enr_port = {
|
||||
if let Some(port) = matches.value_of("boot-node-enr-port") {
|
||||
port.parse::<u16>()
|
||||
.map_err(|_| format!("Invalid ENR port"))?
|
||||
.map_err(|_| "Invalid ENR port".to_string())?
|
||||
} else {
|
||||
listen_port
|
||||
}
|
||||
@ -59,7 +59,7 @@ impl TryFrom<&ArgMatches<'_>> for BootNodeConfig {
|
||||
let auto_update = matches.is_present("enable-enr_auto_update");
|
||||
|
||||
// the address to listen on
|
||||
let listen_socket = SocketAddr::new(listen_address.into(), enr_port);
|
||||
let listen_socket = SocketAddr::new(listen_address, enr_port);
|
||||
|
||||
// Generate a new key and build a new ENR
|
||||
let local_key = CombinedKey::generate_secp256k1();
|
||||
@ -95,7 +95,7 @@ fn resolve_address(address_string: String, port: u16) -> Result<IpAddr, String>
|
||||
resolved_addrs
|
||||
.next()
|
||||
.map(|a| a.ip())
|
||||
.ok_or_else(|| format!("Resolved dns addr contains no entries")))
|
||||
.ok_or_else(|| "Resolved dns addr contains no entries".to_string()))
|
||||
.map_err(|_| format!("Failed to parse enr-address: {}", address_string))?
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,5 @@
|
||||
//! Creates a simple DISCV5 server which can be used to bootstrap an Eth2 network.
|
||||
use clap::ArgMatches;
|
||||
use slog;
|
||||
use slog::{o, Drain, Level, Logger};
|
||||
|
||||
use std::convert::TryFrom;
|
||||
|
@ -2,7 +2,6 @@
|
||||
|
||||
use clap::ArgMatches;
|
||||
use eth2_testnet_config::Eth2TestnetConfig;
|
||||
use hex;
|
||||
use ssz::Decode;
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
|
@ -3,7 +3,6 @@
|
||||
//!
|
||||
//! These files are required for some `include_bytes` calls used in this crate.
|
||||
|
||||
use hex;
|
||||
use serde_json::Value;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::env;
|
||||
@ -87,7 +86,7 @@ pub fn download_deposit_contract(
|
||||
|
||||
let abi = contract
|
||||
.get("abi")
|
||||
.ok_or(format!("Response does not contain key: abi"))?
|
||||
.ok_or_else(|| "Response does not contain key: abi".to_string())?
|
||||
.to_string();
|
||||
|
||||
verify_checksum(abi.as_bytes(), abi_checksum);
|
||||
@ -98,7 +97,7 @@ pub fn download_deposit_contract(
|
||||
|
||||
let bytecode = contract
|
||||
.get("bytecode")
|
||||
.ok_or(format!("Response does not contain key: bytecode"))?
|
||||
.ok_or_else(|| "Response does not contain key: bytecode".to_string())?
|
||||
.to_string();
|
||||
|
||||
verify_checksum(bytecode.as_bytes(), bytecode_checksum);
|
||||
|
@ -1,6 +1,5 @@
|
||||
//! Downloads a testnet configuration from Github.
|
||||
|
||||
use reqwest;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
|
@ -123,7 +123,7 @@ impl WalletManager {
|
||||
}
|
||||
|
||||
let wallet = WalletBuilder::from_mnemonic(mnemonic, password, name)?.build()?;
|
||||
let uuid = wallet.uuid().clone();
|
||||
let uuid = *wallet.uuid();
|
||||
|
||||
let wallet_dir = self.dir.join(format!("{}", uuid));
|
||||
|
||||
|
@ -94,6 +94,11 @@ where
|
||||
self.entries.len()
|
||||
}
|
||||
|
||||
/// Checks if the mapping is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.entries.is_empty()
|
||||
}
|
||||
|
||||
/// Updates the timeout for a given key. Returns true if the key existed, false otherwise.
|
||||
///
|
||||
/// Panics if the duration is too far in the future.
|
||||
|
@ -153,7 +153,7 @@ async fn error_for_status(response: Response) -> Result<Response, Error> {
|
||||
let status = response.status();
|
||||
|
||||
if status.is_success() {
|
||||
return Ok(response);
|
||||
Ok(response)
|
||||
} else {
|
||||
let text_result = response.text().await;
|
||||
match text_result {
|
||||
|
@ -86,18 +86,18 @@ impl Health {
|
||||
psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?;
|
||||
|
||||
Ok(Self {
|
||||
pid: process.pid().into(),
|
||||
pid: process.pid(),
|
||||
pid_num_threads: stat.num_threads,
|
||||
pid_mem_resident_set_size: process_mem.rss().into(),
|
||||
pid_mem_virtual_memory_size: process_mem.vms().into(),
|
||||
sys_virt_mem_total: vm.total().into(),
|
||||
sys_virt_mem_available: vm.available().into(),
|
||||
sys_virt_mem_used: vm.used().into(),
|
||||
sys_virt_mem_free: vm.free().into(),
|
||||
sys_virt_mem_percent: vm.percent().into(),
|
||||
sys_loadavg_1: loadavg.one.into(),
|
||||
sys_loadavg_5: loadavg.five.into(),
|
||||
sys_loadavg_15: loadavg.fifteen.into(),
|
||||
pid_mem_resident_set_size: process_mem.rss(),
|
||||
pid_mem_virtual_memory_size: process_mem.vms(),
|
||||
sys_virt_mem_total: vm.total(),
|
||||
sys_virt_mem_available: vm.available(),
|
||||
sys_virt_mem_used: vm.used(),
|
||||
sys_virt_mem_free: vm.free(),
|
||||
sys_virt_mem_percent: vm.percent(),
|
||||
sys_loadavg_1: loadavg.one,
|
||||
sys_loadavg_5: loadavg.five,
|
||||
sys_loadavg_15: loadavg.fifteen,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -18,10 +18,10 @@ pub struct ManualSlotClock {
|
||||
impl Clone for ManualSlotClock {
|
||||
fn clone(&self) -> Self {
|
||||
ManualSlotClock {
|
||||
genesis_slot: self.genesis_slot.clone(),
|
||||
genesis_duration: self.genesis_duration.clone(),
|
||||
current_time: RwLock::new(self.current_time.read().clone()),
|
||||
slot_duration: self.slot_duration.clone(),
|
||||
genesis_slot: self.genesis_slot,
|
||||
genesis_duration: self.genesis_duration,
|
||||
current_time: RwLock::new(*self.current_time.read()),
|
||||
slot_duration: self.slot_duration,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -98,7 +98,7 @@ impl SlotClock for ManualSlotClock {
|
||||
|
||||
Self {
|
||||
genesis_slot,
|
||||
current_time: RwLock::new(genesis_duration.clone()),
|
||||
current_time: RwLock::new(genesis_duration),
|
||||
genesis_duration,
|
||||
slot_duration,
|
||||
}
|
||||
|
@ -264,7 +264,7 @@ fn unlock_keypair<P: AsRef<Path>>(
|
||||
.as_ref()
|
||||
.join(format!("0x{}", keystore.pubkey()));
|
||||
let password: PlainText = read(&password_path)
|
||||
.map_err(|_| Error::UnableToReadPassword(password_path.into()))?
|
||||
.map_err(|_| Error::UnableToReadPassword(password_path))?
|
||||
.into();
|
||||
|
||||
keystore
|
||||
|
@ -1,11 +1,14 @@
|
||||
use crate::ForkChoiceStore;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use proto_array::{Block as ProtoBlock, ProtoArrayForkChoice};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use std::marker::PhantomData;
|
||||
use types::{
|
||||
BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, IndexedAttestation, Slot,
|
||||
};
|
||||
|
||||
use crate::ForkChoiceStore;
|
||||
use std::cmp::Ordering;
|
||||
|
||||
/// Defined here:
|
||||
///
|
||||
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#configuration
|
||||
@ -301,21 +304,22 @@ where
|
||||
.get_block(&block_root)
|
||||
.ok_or_else(|| Error::MissingProtoArrayBlock(block_root))?;
|
||||
|
||||
if block.slot > ancestor_slot {
|
||||
Ok(self
|
||||
match block.slot.cmp(&ancestor_slot) {
|
||||
Ordering::Greater => Ok(self
|
||||
.proto_array
|
||||
.core_proto_array()
|
||||
.iter_block_roots(&block_root)
|
||||
// Search for a slot that is **less than or equal to** the target slot. We check
|
||||
// for lower slots to account for skip slots.
|
||||
.find(|(_, slot)| *slot <= ancestor_slot)
|
||||
.map(|(root, _)| root))
|
||||
} else if block.slot == ancestor_slot {
|
||||
Ok(Some(block_root))
|
||||
} else {
|
||||
// Root is older than queried slot, thus a skip slot. Return most recent root prior to
|
||||
// slot.
|
||||
Ok(Some(block_root))
|
||||
.map(|(root, _)| root)),
|
||||
Ordering::Less => Ok(Some(block_root)),
|
||||
Ordering::Equal =>
|
||||
// Root is older than queried slot, thus a skip slot. Return most recent root prior
|
||||
// to slot.
|
||||
{
|
||||
Ok(Some(block_root))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -331,17 +335,14 @@ where
|
||||
|
||||
let store = &mut self.fc_store;
|
||||
|
||||
let result = self
|
||||
.proto_array
|
||||
self.proto_array
|
||||
.find_head(
|
||||
store.justified_checkpoint().epoch,
|
||||
store.justified_checkpoint().root,
|
||||
store.finalized_checkpoint().epoch,
|
||||
store.justified_balances(),
|
||||
)
|
||||
.map_err(Into::into);
|
||||
|
||||
result
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Returns `true` if the given `store` should be updated to set
|
||||
@ -496,17 +497,16 @@ where
|
||||
// information:
|
||||
//
|
||||
// https://github.com/ethereum/eth2.0-specs/pull/1880
|
||||
if *self.fc_store.justified_checkpoint() != state.current_justified_checkpoint {
|
||||
if state.current_justified_checkpoint.epoch
|
||||
if *self.fc_store.justified_checkpoint() != state.current_justified_checkpoint
|
||||
&& (state.current_justified_checkpoint.epoch
|
||||
> self.fc_store.justified_checkpoint().epoch
|
||||
|| self
|
||||
.get_ancestor(self.fc_store.justified_checkpoint().root, finalized_slot)?
|
||||
!= Some(self.fc_store.finalized_checkpoint().root)
|
||||
{
|
||||
self.fc_store
|
||||
.set_justified_checkpoint(state.current_justified_checkpoint)
|
||||
.map_err(Error::UnableToSetJustifiedCheckpoint)?;
|
||||
}
|
||||
!= Some(self.fc_store.finalized_checkpoint().root))
|
||||
{
|
||||
self.fc_store
|
||||
.set_justified_checkpoint(state.current_justified_checkpoint)
|
||||
.map_err(Error::UnableToSetJustifiedCheckpoint)?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -557,13 +557,13 @@ where
|
||||
//
|
||||
// This is not in the specification, however it should be transparent to other nodes. We
|
||||
// return early here to avoid wasting precious resources verifying the rest of it.
|
||||
if indexed_attestation.attesting_indices.len() == 0 {
|
||||
if indexed_attestation.attesting_indices.is_empty() {
|
||||
return Err(InvalidAttestation::EmptyAggregationBitfield);
|
||||
}
|
||||
|
||||
let slot_now = self.fc_store.get_current_slot();
|
||||
let epoch_now = slot_now.epoch(E::slots_per_epoch());
|
||||
let target = indexed_attestation.data.target.clone();
|
||||
let target = indexed_attestation.data.target;
|
||||
|
||||
// Attestation must be from the current or previous epoch.
|
||||
if target.epoch > epoch_now {
|
||||
@ -822,9 +822,10 @@ pub struct PersistedForkChoice {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use types::{EthSpec, MainnetEthSpec};
|
||||
|
||||
use super::*;
|
||||
|
||||
type E = MainnetEthSpec;
|
||||
|
||||
#[test]
|
||||
|
@ -1,5 +1,4 @@
|
||||
use proto_array::fork_choice_test_definition::*;
|
||||
use serde_yaml;
|
||||
use std::fs::File;
|
||||
|
||||
fn main() {
|
||||
|
@ -358,14 +358,12 @@ impl ProtoArray {
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if child_leads_to_viable_head {
|
||||
// There is no current best-child and the child is viable.
|
||||
change_to_child
|
||||
} else {
|
||||
if child_leads_to_viable_head {
|
||||
// There is no current best-child and the child is viable.
|
||||
change_to_child
|
||||
} else {
|
||||
// There is no current best-child but the child is not viable.
|
||||
no_change
|
||||
}
|
||||
// There is no current best-child but the child is not viable.
|
||||
no_change
|
||||
};
|
||||
|
||||
let parent = self
|
||||
|
@ -95,7 +95,7 @@ impl ProtoArrayForkChoice {
|
||||
.map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?;
|
||||
|
||||
Ok(Self {
|
||||
proto_array: proto_array,
|
||||
proto_array,
|
||||
votes: ElasticList::default(),
|
||||
balances: vec![],
|
||||
})
|
||||
@ -171,6 +171,10 @@ impl ProtoArrayForkChoice {
|
||||
self.proto_array.nodes.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.proto_array.nodes.is_empty()
|
||||
}
|
||||
|
||||
pub fn contains_block(&self, block_root: &Hash256) -> bool {
|
||||
self.proto_array.indices.contains_key(block_root)
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ impl From<SszContainer> for ProtoArrayForkChoice {
|
||||
};
|
||||
|
||||
Self {
|
||||
proto_array: proto_array,
|
||||
proto_array,
|
||||
votes: ElasticList(from.votes),
|
||||
balances: from.balances,
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
use super::*;
|
||||
use smallvec::{smallvec, SmallVec};
|
||||
use std::cmp::Ordering;
|
||||
|
||||
type SmallVec8<T> = SmallVec<[T; 8]>;
|
||||
|
||||
@ -182,10 +183,12 @@ impl<'a> SszDecoderBuilder<'a> {
|
||||
if let Some(first_offset) = self.offsets.first().map(|o| o.offset) {
|
||||
// Check to ensure the first offset points to the byte immediately following the
|
||||
// fixed-length bytes.
|
||||
if first_offset < self.items_index {
|
||||
return Err(DecodeError::OffsetIntoFixedPortion(first_offset));
|
||||
} else if first_offset > self.items_index {
|
||||
return Err(DecodeError::OffsetSkipsVariableBytes(first_offset));
|
||||
match first_offset.cmp(&self.items_index) {
|
||||
Ordering::Less => return Err(DecodeError::OffsetIntoFixedPortion(first_offset)),
|
||||
Ordering::Greater => {
|
||||
return Err(DecodeError::OffsetSkipsVariableBytes(first_offset))
|
||||
}
|
||||
Ordering::Equal => (),
|
||||
}
|
||||
|
||||
// Iterate through each pair of offsets, grabbing the slice between each of the offsets.
|
||||
|
@ -261,8 +261,7 @@ where
|
||||
}
|
||||
})
|
||||
} else {
|
||||
ssz::decode_list_of_variable_length_items(bytes, Some(fixed_len))
|
||||
.and_then(|vec| Ok(vec.into()))
|
||||
ssz::decode_list_of_variable_length_items(bytes, Some(fixed_len)).map(|vec| vec.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -301,7 +301,7 @@ pub fn process_proposer_slashings<T: EthSpec>(
|
||||
// We have to verify in series because an invalid block may contain multiple slashings
|
||||
// for the same validator, and we need to correctly detect and reject that.
|
||||
proposer_slashings
|
||||
.into_iter()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.try_for_each(|(i, proposer_slashing)| {
|
||||
verify_proposer_slashing(proposer_slashing, &state, verify_signatures, spec)
|
||||
@ -508,7 +508,7 @@ pub fn process_exits<T: EthSpec>(
|
||||
) -> Result<(), BlockProcessingError> {
|
||||
// Verify and apply each exit in series. We iterate in series because higher-index exits may
|
||||
// become invalid due to the application of lower-index ones.
|
||||
for (i, exit) in voluntary_exits.into_iter().enumerate() {
|
||||
for (i, exit) in voluntary_exits.iter().enumerate() {
|
||||
verify_exit(&state, exit, verify_signatures, spec).map_err(|e| e.into_with_index(i))?;
|
||||
|
||||
initiate_validator_exit(state, exit.message.validator_index as usize, spec)?;
|
||||
|
@ -129,7 +129,7 @@ impl<'a, T: EthSpec> BlockProcessingBuilder<'a, T> {
|
||||
signature: AggregateSignature::new(),
|
||||
};
|
||||
|
||||
for (i, &validator_index) in committee.committee.into_iter().enumerate() {
|
||||
for (i, &validator_index) in committee.committee.iter().enumerate() {
|
||||
if should_sign(i, validator_index) {
|
||||
attestation
|
||||
.sign(
|
||||
|
@ -83,7 +83,7 @@ where
|
||||
/// add signatures, and the `verify`
|
||||
pub fn new(state: &'a BeaconState<T>, get_pubkey: F, spec: &'a ChainSpec) -> Self {
|
||||
Self {
|
||||
get_pubkey: get_pubkey,
|
||||
get_pubkey,
|
||||
state,
|
||||
spec,
|
||||
sets: vec![],
|
||||
@ -129,7 +129,7 @@ where
|
||||
.sets
|
||||
.into_par_iter()
|
||||
.chunks(num_chunks)
|
||||
.map(|chunk| verify_signature_sets(chunk))
|
||||
.map(verify_signature_sets)
|
||||
.reduce(|| true, |current, this| current && this);
|
||||
|
||||
if result {
|
||||
|
@ -314,8 +314,8 @@ pub fn deposit_pubkey_signature_message(
|
||||
|
||||
/// Returns the signature set for some set of deposit signatures, made with
|
||||
/// `deposit_pubkey_signature_message`.
|
||||
pub fn deposit_signature_set<'a>(
|
||||
pubkey_signature_message: &'a (PublicKey, Signature, Vec<u8>),
|
||||
pub fn deposit_signature_set(
|
||||
pubkey_signature_message: &(PublicKey, Signature, Vec<u8>),
|
||||
) -> SignatureSet {
|
||||
let (pubkey, signature, message) = pubkey_signature_message;
|
||||
|
||||
|
@ -93,8 +93,8 @@ fn verify_casper_ffg_vote<T: EthSpec>(
|
||||
verify!(
|
||||
data.source == state.current_justified_checkpoint,
|
||||
Invalid::WrongJustifiedCheckpoint {
|
||||
state: state.current_justified_checkpoint.clone(),
|
||||
attestation: data.source.clone(),
|
||||
state: state.current_justified_checkpoint,
|
||||
attestation: data.source,
|
||||
is_current: true,
|
||||
}
|
||||
);
|
||||
@ -103,8 +103,8 @@ fn verify_casper_ffg_vote<T: EthSpec>(
|
||||
verify!(
|
||||
data.source == state.previous_justified_checkpoint,
|
||||
Invalid::WrongJustifiedCheckpoint {
|
||||
state: state.previous_justified_checkpoint.clone(),
|
||||
attestation: data.source.clone(),
|
||||
state: state.previous_justified_checkpoint,
|
||||
attestation: data.source,
|
||||
is_current: false,
|
||||
}
|
||||
);
|
||||
|
@ -91,11 +91,11 @@ pub fn process_justification_and_finalization<T: EthSpec>(
|
||||
let previous_epoch = state.previous_epoch();
|
||||
let current_epoch = state.current_epoch();
|
||||
|
||||
let old_previous_justified_checkpoint = state.previous_justified_checkpoint.clone();
|
||||
let old_current_justified_checkpoint = state.current_justified_checkpoint.clone();
|
||||
let old_previous_justified_checkpoint = state.previous_justified_checkpoint;
|
||||
let old_current_justified_checkpoint = state.current_justified_checkpoint;
|
||||
|
||||
// Process justifications
|
||||
state.previous_justified_checkpoint = state.current_justified_checkpoint.clone();
|
||||
state.previous_justified_checkpoint = state.current_justified_checkpoint;
|
||||
state.justification_bits.shift_up(1)?;
|
||||
|
||||
if total_balances
|
||||
|
@ -21,11 +21,12 @@ pub fn per_slot_processing<T: EthSpec>(
|
||||
) -> Result<Option<EpochProcessingSummary>, Error> {
|
||||
cache_state(state, state_root)?;
|
||||
|
||||
let mut summary = None;
|
||||
|
||||
if state.slot > spec.genesis_slot && (state.slot + 1) % T::slots_per_epoch() == 0 {
|
||||
summary = Some(per_epoch_processing(state, spec)?);
|
||||
}
|
||||
let summary = if state.slot > spec.genesis_slot && (state.slot + 1) % T::slots_per_epoch() == 0
|
||||
{
|
||||
Some(per_epoch_processing(state, spec)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
state.slot += 1;
|
||||
|
||||
|
@ -1073,7 +1073,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
genesis_time: self.genesis_time,
|
||||
genesis_validators_root: self.genesis_validators_root,
|
||||
slot: self.slot,
|
||||
fork: self.fork.clone(),
|
||||
fork: self.fork,
|
||||
latest_block_header: self.latest_block_header.clone(),
|
||||
block_roots: self.block_roots.clone(),
|
||||
state_roots: self.state_roots.clone(),
|
||||
@ -1088,9 +1088,9 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
previous_epoch_attestations: self.previous_epoch_attestations.clone(),
|
||||
current_epoch_attestations: self.current_epoch_attestations.clone(),
|
||||
justification_bits: self.justification_bits.clone(),
|
||||
previous_justified_checkpoint: self.previous_justified_checkpoint.clone(),
|
||||
current_justified_checkpoint: self.current_justified_checkpoint.clone(),
|
||||
finalized_checkpoint: self.finalized_checkpoint.clone(),
|
||||
previous_justified_checkpoint: self.previous_justified_checkpoint,
|
||||
current_justified_checkpoint: self.current_justified_checkpoint,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
committee_caches: if config.committee_caches {
|
||||
self.committee_caches.clone()
|
||||
} else {
|
||||
|
@ -5,6 +5,7 @@ use crate::{BeaconState, EthSpec, Hash256, Unsigned, Validator};
|
||||
use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache};
|
||||
use rayon::prelude::*;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use std::cmp::Ordering;
|
||||
use tree_hash::{mix_in_length, MerkleHasher, TreeHash};
|
||||
|
||||
/// The number of fields on a beacon state.
|
||||
@ -270,8 +271,8 @@ impl ParallelValidatorTreeHash {
|
||||
/// This function makes assumptions that the `validators` list will only change in accordance
|
||||
/// with valid per-block/per-slot state transitions.
|
||||
fn leaves(&mut self, validators: &[Validator]) -> Result<Vec<Vec<Hash256>>, Error> {
|
||||
if self.len() < validators.len() {
|
||||
validators.iter().skip(self.len()).for_each(|v| {
|
||||
match self.len().cmp(&validators.len()) {
|
||||
Ordering::Less => validators.iter().skip(self.len()).for_each(|v| {
|
||||
if self
|
||||
.arenas
|
||||
.last()
|
||||
@ -287,9 +288,11 @@ impl ParallelValidatorTreeHash {
|
||||
.expect("Cannot reach this block if arenas is empty.");
|
||||
caches.push(v.new_tree_hash_cache(arena))
|
||||
}
|
||||
})
|
||||
} else if validators.len() < self.len() {
|
||||
return Err(Error::ValidatorRegistryShrunk);
|
||||
}),
|
||||
Ordering::Greater => {
|
||||
return Err(Error::ValidatorRegistryShrunk);
|
||||
}
|
||||
Ordering::Equal => (),
|
||||
}
|
||||
|
||||
self.arenas
|
||||
|
@ -24,9 +24,9 @@ impl TestingAttestationDataBuilder {
|
||||
let is_previous_epoch = slot.epoch(T::slots_per_epoch()) != current_epoch;
|
||||
|
||||
let mut source = if is_previous_epoch {
|
||||
state.previous_justified_checkpoint.clone()
|
||||
state.previous_justified_checkpoint
|
||||
} else {
|
||||
state.current_justified_checkpoint.clone()
|
||||
state.current_justified_checkpoint
|
||||
};
|
||||
|
||||
let mut target = if is_previous_epoch {
|
||||
|
@ -42,7 +42,7 @@ impl TestingAttesterSlashingBuilder {
|
||||
slot,
|
||||
index,
|
||||
beacon_block_root: hash_1,
|
||||
source: checkpoint_1.clone(),
|
||||
source: checkpoint_1,
|
||||
target: checkpoint_1,
|
||||
};
|
||||
|
||||
|
@ -60,7 +60,7 @@ type VerifySet<'a> = (
|
||||
);
|
||||
|
||||
#[cfg(not(feature = "fake_crypto"))]
|
||||
pub fn verify_signature_sets<'a>(sets: Vec<SignatureSet>) -> bool {
|
||||
pub fn verify_signature_sets(sets: Vec<SignatureSet>) -> bool {
|
||||
let rng = &mut rand::thread_rng();
|
||||
let verify_set: Vec<VerifySet> = sets
|
||||
.iter()
|
||||
|
@ -81,7 +81,7 @@ fn derive_child_sk(parent_sk: &[u8], index: u32) -> SecretHash {
|
||||
///
|
||||
/// Equivalent to `HKDF_mod_r` in EIP-2333.
|
||||
fn hkdf_mod_r(ikm: &[u8]) -> SecretHash {
|
||||
let prk = hkdf_extract("BLS-SIG-KEYGEN-SALT-".as_bytes(), ikm);
|
||||
let prk = hkdf_extract(b"BLS-SIG-KEYGEN-SALT-", ikm);
|
||||
let okm = &hkdf_expand(prk, MOD_R_L);
|
||||
mod_r(okm.as_bytes())
|
||||
}
|
||||
|
@ -16,6 +16,11 @@ impl PlainText {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
/// Checks whether `self` is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
|
||||
/// Returns a reference to the underlying bytes.
|
||||
pub fn as_bytes(&self) -> &[u8] {
|
||||
&self.0
|
||||
|
@ -14,6 +14,10 @@ impl HexBytes {
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for HexBytes {
|
||||
|
@ -66,7 +66,7 @@ pub enum Prf {
|
||||
impl Prf {
|
||||
pub fn mac(&self, password: &[u8]) -> impl Mac {
|
||||
match &self {
|
||||
_hmac_sha256 => {
|
||||
Prf::HmacSha256 => {
|
||||
Hmac::<Sha256>::new_varkey(password).expect("Could not derive HMAC using SHA256.")
|
||||
}
|
||||
}
|
||||
|
@ -409,10 +409,10 @@ fn derive_key(password: &[u8], kdf: &Kdf) -> Result<DerivedKey, Error> {
|
||||
password,
|
||||
params.salt.as_bytes(),
|
||||
&ScryptParams::new(log2_int(params.n) as u8, params.r, params.p)
|
||||
.map_err(|e| Error::ScryptInvalidParams(e))?,
|
||||
.map_err(Error::ScryptInvalidParams)?,
|
||||
dk.as_mut_bytes(),
|
||||
)
|
||||
.map_err(|e| Error::ScryptInvaidOutputLen(e))?;
|
||||
.map_err(Error::ScryptInvaidOutputLen)?;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
use clap::ArgMatches;
|
||||
use clap_utils;
|
||||
use deposit_contract::{
|
||||
testnet::{ABI, BYTECODE},
|
||||
CONTRACT_DEPLOY_GAS,
|
||||
|
@ -17,13 +17,11 @@ pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
|
||||
|
||||
let overwrite_files = matches.is_present("force");
|
||||
|
||||
if testnet_dir_path.exists() {
|
||||
if !overwrite_files {
|
||||
return Err(format!(
|
||||
"{:?} already exists, will not overwrite. Use --force to overwrite",
|
||||
testnet_dir_path
|
||||
));
|
||||
}
|
||||
if testnet_dir_path.exists() && !overwrite_files {
|
||||
return Err(format!(
|
||||
"{:?} already exists, will not overwrite. Use --force to overwrite",
|
||||
testnet_dir_path
|
||||
));
|
||||
}
|
||||
|
||||
let mut spec = T::default_spec();
|
||||
|
@ -79,7 +79,7 @@ impl TaskExecutor {
|
||||
/// This function generates prometheus metrics on number of tasks and task duration.
|
||||
pub fn spawn_blocking<F>(&self, task: F, name: &'static str)
|
||||
where
|
||||
F: FnOnce() -> () + Send + 'static,
|
||||
F: FnOnce() + Send + 'static,
|
||||
{
|
||||
let exit = self.exit.clone();
|
||||
let log = self.log.clone();
|
||||
|
@ -3,7 +3,6 @@ extern crate clap;
|
||||
|
||||
use beacon_node::ProductionBeaconNode;
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use clap_utils;
|
||||
use env_logger::{Builder, Env};
|
||||
use environment::EnvironmentBuilder;
|
||||
use eth2_testnet_config::HARDCODED_TESTNET;
|
||||
@ -272,5 +271,6 @@ fn run<E: EthSpec>(
|
||||
drop(validator_client);
|
||||
|
||||
// Shutdown the environment once all tasks have completed.
|
||||
Ok(environment.shutdown_on_idle())
|
||||
environment.shutdown_on_idle();
|
||||
Ok(())
|
||||
}
|
||||
|
@ -17,10 +17,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
|
||||
.expect("missing validators_per_node default");
|
||||
let speed_up_factor =
|
||||
value_t!(matches, "speed_up_factor", u64).expect("missing speed_up_factor default");
|
||||
let mut end_after_checks = true;
|
||||
if matches.is_present("end_after_checks") {
|
||||
end_after_checks = false;
|
||||
}
|
||||
let end_after_checks = !matches.is_present("end_after_checks");
|
||||
|
||||
println!("Beacon Chain Simulator:");
|
||||
println!(" nodes:{}", node_count);
|
||||
@ -83,7 +80,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
|
||||
// Start a timer that produces eth1 blocks on an interval.
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(eth1_block_time);
|
||||
while let Some(_) = interval.next().await {
|
||||
while interval.next().await.is_some() {
|
||||
let _ = ganache.evm_mine().await;
|
||||
}
|
||||
});
|
||||
@ -198,5 +195,6 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
|
||||
Ok::<(), String>(())
|
||||
};
|
||||
|
||||
Ok(env.runtime().block_on(main_future).unwrap())
|
||||
env.runtime().block_on(main_future).unwrap();
|
||||
Ok(())
|
||||
}
|
||||
|
@ -17,10 +17,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
|
||||
.expect("missing validators_per_node default");
|
||||
let speed_up_factor =
|
||||
value_t!(matches, "speed_up_factor", u64).expect("missing speed_up_factor default");
|
||||
let mut end_after_checks = true;
|
||||
if matches.is_present("end_after_checks") {
|
||||
end_after_checks = false;
|
||||
}
|
||||
let end_after_checks = !matches.is_present("end_after_checks");
|
||||
|
||||
println!("Beacon Chain Simulator:");
|
||||
println!(" nodes:{}", node_count);
|
||||
@ -165,5 +162,6 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
|
||||
Ok::<(), String>(())
|
||||
};
|
||||
|
||||
Ok(env.runtime().block_on(main_future).unwrap())
|
||||
env.runtime().block_on(main_future).unwrap();
|
||||
Ok(())
|
||||
}
|
||||
|
@ -209,7 +209,7 @@ pub async fn verify_one_node_sync<E: EthSpec>(
|
||||
// limited to at most `sync_timeout` epochs
|
||||
let mut interval = tokio::time::interval(epoch_duration);
|
||||
let mut count = 0;
|
||||
while let Some(_) = interval.next().await {
|
||||
while interval.next().await.is_some() {
|
||||
if count >= sync_timeout || !check_still_syncing(&network_c).await? {
|
||||
break;
|
||||
}
|
||||
@ -246,7 +246,7 @@ pub async fn verify_two_nodes_sync<E: EthSpec>(
|
||||
// limited to at most `sync_timeout` epochs
|
||||
let mut interval = tokio::time::interval(epoch_duration);
|
||||
let mut count = 0;
|
||||
while let Some(_) = interval.next().await {
|
||||
while interval.next().await.is_some() {
|
||||
if count >= sync_timeout || !check_still_syncing(&network_c).await? {
|
||||
break;
|
||||
}
|
||||
@ -294,7 +294,7 @@ pub async fn verify_in_between_sync<E: EthSpec>(
|
||||
// limited to at most `sync_timeout` epochs
|
||||
let mut interval = tokio::time::interval(epoch_duration);
|
||||
let mut count = 0;
|
||||
while let Some(_) = interval.next().await {
|
||||
while interval.next().await.is_some() {
|
||||
if count >= sync_timeout || !check_still_syncing(&network_c).await? {
|
||||
break;
|
||||
}
|
||||
|
@ -190,7 +190,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
|
||||
.into_iter()
|
||||
.fold(HashMap::new(), |mut map, duty_and_proof| {
|
||||
if let Some(committee_index) = duty_and_proof.duty.attestation_committee_index {
|
||||
let validator_duties = map.entry(committee_index).or_insert_with(|| vec![]);
|
||||
let validator_duties = map.entry(committee_index).or_insert_with(Vec::new);
|
||||
|
||||
validator_duties.push(duty_and_proof);
|
||||
}
|
||||
|
@ -151,9 +151,9 @@ enum InsertOutcome {
|
||||
|
||||
impl InsertOutcome {
|
||||
/// Returns `true` if the outcome indicates that the validator _might_ require a subscription.
|
||||
pub fn is_subscription_candidate(self) -> bool {
|
||||
pub fn is_subscription_candidate(&self) -> bool {
|
||||
match self {
|
||||
InsertOutcome::Replaced { should_resubscribe } => should_resubscribe,
|
||||
InsertOutcome::Replaced { should_resubscribe } => *should_resubscribe,
|
||||
InsertOutcome::NewValidator | InsertOutcome::NewEpoch => true,
|
||||
InsertOutcome::Identical | InsertOutcome::Invalid | InsertOutcome::NewProposalSlots => {
|
||||
false
|
||||
|
@ -95,7 +95,7 @@ impl<T, E: EthSpec> Deref for ForkService<T, E> {
|
||||
impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> {
|
||||
/// Returns the last fork downloaded from the beacon node, if any.
|
||||
pub fn fork(&self) -> Option<Fork> {
|
||||
self.fork.read().clone()
|
||||
*self.fork.read()
|
||||
}
|
||||
|
||||
/// Starts the service that periodically polls for the `Fork`.
|
||||
|
@ -345,11 +345,14 @@ impl InitializedValidators {
|
||||
voting_public_key: &PublicKey,
|
||||
enabled: bool,
|
||||
) -> Result<(), Error> {
|
||||
self.definitions
|
||||
if let Some(def) = self
|
||||
.definitions
|
||||
.as_mut_slice()
|
||||
.iter_mut()
|
||||
.find(|def| def.voting_public_key == *voting_public_key)
|
||||
.map(|def| def.enabled = enabled);
|
||||
{
|
||||
def.enabled = enabled;
|
||||
}
|
||||
|
||||
self.update_validators()?;
|
||||
|
||||
|
@ -6,7 +6,6 @@
|
||||
use account_utils::{create_with_600_perms, default_keystore_password_path, ZeroizeString};
|
||||
use eth2_keystore::Keystore;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use serde_yaml;
|
||||
use slog::{error, Logger};
|
||||
use std::collections::HashSet;
|
||||
use std::fs::{self, OpenOptions};
|
||||
@ -230,14 +229,13 @@ pub fn recursively_find_voting_keystores<P: AsRef<Path>>(
|
||||
let file_type = dir_entry.file_type()?;
|
||||
if file_type.is_dir() {
|
||||
recursively_find_voting_keystores(dir_entry.path(), matches)?
|
||||
} else if file_type.is_file() {
|
||||
if dir_entry
|
||||
} else if file_type.is_file()
|
||||
&& dir_entry
|
||||
.file_name()
|
||||
.to_str()
|
||||
.map_or(false, |filename| filename == VOTING_KEYSTORE_FILE)
|
||||
{
|
||||
matches.push(dir_entry.path())
|
||||
}
|
||||
{
|
||||
matches.push(dir_entry.path())
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user