Fix new clippy lints (#2036)

## Issue Addressed

NA

## Proposed Changes

Fixes new clippy lints in the whole project (mainly [manual_strip](https://rust-lang.github.io/rust-clippy/master/index.html#manual_strip) and [unnecessary_lazy_evaluations](https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_lazy_evaluations)). Furthermore, removes `to_string()` calls on literals when used with the `?`-operator.
This commit is contained in:
blacktemplar 2020-12-03 01:10:26 +00:00
parent d3f0a21436
commit d8cda2d86e
71 changed files with 314 additions and 364 deletions

View File

@ -133,7 +133,7 @@ pub fn cli_run<T: EthSpec>(
};
let deposit_gwei = clap_utils::parse_optional(matches, DEPOSIT_GWEI_FLAG)?
.unwrap_or_else(|| spec.max_effective_balance);
.unwrap_or(spec.max_effective_balance);
let count: Option<usize> = clap_utils::parse_optional(matches, COUNT_FLAG)?;
let at_most: Option<usize> = clap_utils::parse_optional(matches, AT_MOST_FLAG)?;

View File

@ -125,7 +125,7 @@ async fn publish_voluntary_exit<E: EthSpec>(
let keypair = load_voting_keypair(keystore_path, password_file_path, stdin_inputs)?;
let epoch = get_current_epoch::<E>(genesis_data.genesis_time, spec)
.ok_or_else(|| "Failed to get current epoch. Please check your system time".to_string())?;
.ok_or("Failed to get current epoch. Please check your system time")?;
let validator_index = get_validator_index_for_exit(client, &keypair.pk, epoch, spec).await?;
let fork = get_beacon_state_fork(client).await?;
@ -248,7 +248,7 @@ async fn get_beacon_state_fork(client: &BeaconNodeHttpClient) -> Result<Fork, St
.get_beacon_states_fork(StateId::Head)
.await
.map_err(|e| format!("Failed to get get fork: {:?}", e))?
.ok_or_else(|| "Failed to get fork, state not found".to_string())?
.ok_or("Failed to get fork, state not found")?
.data)
}

View File

@ -49,7 +49,7 @@ pub fn cli_run<T: EthSpec>(
let testnet_config = env
.testnet
.ok_or_else(|| "Unable to get testnet configuration from the environment".to_string())?;
.ok_or("Unable to get testnet configuration from the environment")?;
let genesis_validators_root = testnet_config
.beacon_state::<T>()

View File

@ -648,7 +648,7 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
let validator_index = *indexed_attestation
.attesting_indices
.first()
.ok_or_else(|| Error::NotExactlyOneAggregationBitSet(0))?;
.ok_or(Error::NotExactlyOneAggregationBitSet(0))?;
/*
* The attestation is the first valid attestation received for the participating validator
@ -838,7 +838,7 @@ pub fn verify_propagation_slot_range<T: BeaconChainTypes>(
let latest_permissible_slot = chain
.slot_clock
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
.ok_or_else(|| BeaconChainError::UnableToReadSlot)?;
.ok_or(BeaconChainError::UnableToReadSlot)?;
if attestation_slot > latest_permissible_slot {
return Err(Error::FutureSlot {
attestation_slot,
@ -850,7 +850,7 @@ pub fn verify_propagation_slot_range<T: BeaconChainTypes>(
let earliest_permissible_slot = chain
.slot_clock
.now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
.ok_or_else(|| BeaconChainError::UnableToReadSlot)?
.ok_or(BeaconChainError::UnableToReadSlot)?
- T::EthSpec::slots_per_epoch();
if attestation_slot < earliest_permissible_slot {
return Err(Error::PastSlot {
@ -873,12 +873,12 @@ pub fn verify_attestation_signature<T: BeaconChainTypes>(
let pubkey_cache = chain
.validator_pubkey_cache
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| BeaconChainError::ValidatorPubkeyCacheLockTimeout)?;
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?;
let fork = chain
.canonical_head
.try_read_for(HEAD_LOCK_TIMEOUT)
.ok_or_else(|| BeaconChainError::CanonicalHeadLockTimeout)
.ok_or(BeaconChainError::CanonicalHeadLockTimeout)
.map(|head| head.beacon_state.fork)?;
let signature_set = indexed_attestation_signature_set_from_pubkeys(
@ -974,7 +974,7 @@ pub fn verify_signed_aggregate_signatures<T: BeaconChainTypes>(
let pubkey_cache = chain
.validator_pubkey_cache
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| BeaconChainError::ValidatorPubkeyCacheLockTimeout)?;
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?;
let aggregator_index = signed_aggregate.message.aggregator_index;
if aggregator_index >= pubkey_cache.len() as u64 {
@ -984,7 +984,7 @@ pub fn verify_signed_aggregate_signatures<T: BeaconChainTypes>(
let fork = chain
.canonical_head
.try_read_for(HEAD_LOCK_TIMEOUT)
.ok_or_else(|| BeaconChainError::CanonicalHeadLockTimeout)
.ok_or(BeaconChainError::CanonicalHeadLockTimeout)
.map(|head| head.beacon_state.fork)?;
let signature_sets = vec![

View File

@ -318,7 +318,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// The slot might be unavailable due to an error with the system clock, or if the present time
/// is before genesis (i.e., a negative slot).
pub fn slot(&self) -> Result<Slot, Error> {
self.slot_clock.now().ok_or_else(|| Error::UnableToReadSlot)
self.slot_clock.now().ok_or(Error::UnableToReadSlot)
}
/// Returns the epoch _right now_ according to `self.slot_clock`. Returns `Err` if the epoch is
@ -386,7 +386,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
) -> Result<impl Iterator<Item = Result<(Hash256, Slot), Error>>, Error> {
let block = self
.get_block(&block_root)?
.ok_or_else(|| Error::MissingBeaconBlock(block_root))?;
.ok_or(Error::MissingBeaconBlock(block_root))?;
let state = self
.get_state(&block.state_root(), Some(block.slot()))?
.ok_or_else(|| Error::MissingBeaconState(block.state_root()))?;
@ -531,7 +531,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let head_lock = self
.canonical_head
.try_read_for(HEAD_LOCK_TIMEOUT)
.ok_or_else(|| Error::CanonicalHeadLockTimeout)?;
.ok_or(Error::CanonicalHeadLockTimeout)?;
f(&head_lock)
}
@ -660,11 +660,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.find(|(_, current_slot)| *current_slot == slot)
.map(|(root, _slot)| root)
})?
.ok_or_else(|| Error::NoStateForSlot(slot))?;
.ok_or(Error::NoStateForSlot(slot))?;
Ok(self
.get_state(&state_root, Some(slot))?
.ok_or_else(|| Error::NoStateForSlot(slot))?)
.ok_or(Error::NoStateForSlot(slot))?)
}
}
}
@ -686,7 +686,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self.canonical_head
.try_read_for(HEAD_LOCK_TIMEOUT)
.map(|head| head.beacon_block.slot())
.ok_or_else(|| Error::CanonicalHeadLockTimeout)
.ok_or(Error::CanonicalHeadLockTimeout)
}
/// Returns the validator index (if any) for the given public key.
@ -705,7 +705,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let pubkey_cache = self
.validator_pubkey_cache
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)?;
.ok_or(Error::ValidatorPubkeyCacheLockTimeout)?;
Ok(pubkey_cache.get_index(pubkey))
}
@ -726,7 +726,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let pubkey_cache = self
.validator_pubkey_cache
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)?;
.ok_or(Error::ValidatorPubkeyCacheLockTimeout)?;
Ok(pubkey_cache.get(validator_index).cloned())
}
@ -848,7 +848,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let head = self
.canonical_head
.try_read_for(HEAD_LOCK_TIMEOUT)
.ok_or_else(|| Error::CanonicalHeadLockTimeout)?;
.ok_or(Error::CanonicalHeadLockTimeout)?;
if slot >= head.beacon_block.slot() {
self.produce_unaggregated_attestation_for_block(
@ -879,7 +879,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let mut state = self
.get_state(&state_root, Some(slot))?
.ok_or_else(|| Error::MissingBeaconState(state_root))?;
.ok_or(Error::MissingBeaconState(state_root))?;
state.build_committee_cache(RelativeEpoch::Current, &self.spec)?;
@ -1068,7 +1068,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let fork = self
.canonical_head
.try_read_for(HEAD_LOCK_TIMEOUT)
.ok_or_else(|| Error::CanonicalHeadLockTimeout)?
.ok_or(Error::CanonicalHeadLockTimeout)?
.beacon_state
.fork;
@ -1607,7 +1607,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// known to fork choice. This ordering ensure that the pubkey cache is always up-to-date.
self.validator_pubkey_cache
.try_write_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)?
.ok_or(Error::ValidatorPubkeyCacheLockTimeout)?
.import_new_pubkeys(&state)?;
// For the current and next epoch of this state, ensure we have the shuffling from this
@ -1618,7 +1618,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let shuffling_is_cached = self
.shuffling_cache
.try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| Error::AttestationCacheLockTimeout)?
.ok_or(Error::AttestationCacheLockTimeout)?
.contains(&shuffling_id);
if !shuffling_is_cached {
@ -1626,7 +1626,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let committee_cache = state.committee_cache(*relative_epoch)?;
self.shuffling_cache
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| Error::AttestationCacheLockTimeout)?
.ok_or(Error::AttestationCacheLockTimeout)?
.insert(shuffling_id, committee_cache);
}
}
@ -1790,7 +1790,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let eth1_chain = self
.eth1_chain
.as_ref()
.ok_or_else(|| BlockProductionError::NoEth1ChainConnection)?;
.ok_or(BlockProductionError::NoEth1ChainConnection)?;
// If required, transition the new state to the present slot.
//
@ -1947,12 +1947,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.unwrap_or_else(|| {
let beacon_block = self
.get_block(&beacon_block_root)?
.ok_or_else(|| Error::MissingBeaconBlock(beacon_block_root))?;
.ok_or(Error::MissingBeaconBlock(beacon_block_root))?;
let beacon_state_root = beacon_block.state_root();
let beacon_state: BeaconState<T::EthSpec> = self
.get_state(&beacon_state_root, Some(beacon_block.slot()))?
.ok_or_else(|| Error::MissingBeaconState(beacon_state_root))?;
.ok_or(Error::MissingBeaconState(beacon_state_root))?;
Ok(BeaconSnapshot {
beacon_block,
@ -2038,7 +2038,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
*self
.canonical_head
.try_write_for(HEAD_LOCK_TIMEOUT)
.ok_or_else(|| Error::CanonicalHeadLockTimeout)? = new_head;
.ok_or(Error::CanonicalHeadLockTimeout)? = new_head;
metrics::stop_timer(update_head_timer);
@ -2065,7 +2065,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let head = self
.canonical_head
.try_read_for(HEAD_LOCK_TIMEOUT)
.ok_or_else(|| Error::CanonicalHeadLockTimeout)?;
.ok_or(Error::CanonicalHeadLockTimeout)?;
// State root of the finalized state on the epoch boundary, NOT the state
// of the finalized block. We need to use an iterator in case the state is beyond
@ -2087,7 +2087,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
})
},
)?
.ok_or_else(|| Error::MissingFinalizedStateRoot(new_finalized_slot))?;
.ok_or(Error::MissingFinalizedStateRoot(new_finalized_slot))?;
self.after_finalization(&head.beacon_state, new_finalized_state_root)?;
}
@ -2250,7 +2250,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.fork_choice
.read()
.get_block(&head_block_root)
.ok_or_else(|| Error::MissingBeaconBlock(head_block_root))?;
.ok_or(Error::MissingBeaconBlock(head_block_root))?;
let shuffling_id = BlockShufflingIds {
current: head_block.current_epoch_shuffling_id.clone(),
@ -2270,7 +2270,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let mut shuffling_cache = self
.shuffling_cache
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| Error::AttestationCacheLockTimeout)?;
.ok_or(Error::AttestationCacheLockTimeout)?;
metrics::stop_timer(cache_wait_timer);
@ -2297,7 +2297,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
&head_block.state_root,
Some(head_block.slot),
)?
.ok_or_else(|| Error::MissingBeaconState(head_block.state_root))?;
.ok_or(Error::MissingBeaconState(head_block.state_root))?;
metrics::stop_timer(state_read_timer);
let state_skip_timer =
@ -2326,7 +2326,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self.shuffling_cache
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| Error::AttestationCacheLockTimeout)?
.ok_or(Error::AttestationCacheLockTimeout)?
.insert(shuffling_id, committee_cache);
metrics::stop_timer(committee_building_timer);
@ -2396,7 +2396,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
pub fn enr_fork_id(&self) -> EnrForkId {
// If we are unable to read the slot clock we assume that it is prior to genesis and
// therefore use the genesis slot.
let slot = self.slot().unwrap_or_else(|_| self.spec.genesis_slot);
let slot = self.slot().unwrap_or(self.spec.genesis_slot);
self.spec.enr_fork_id(slot, self.genesis_validators_root)
}
@ -2412,7 +2412,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let canonical_head_hash = self
.canonical_head
.try_read_for(HEAD_LOCK_TIMEOUT)
.ok_or_else(|| Error::CanonicalHeadLockTimeout)
.ok_or(Error::CanonicalHeadLockTimeout)
.unwrap()
.beacon_block_root;
let mut visited: HashSet<Hash256> = HashSet::new();

View File

@ -320,14 +320,14 @@ where
.store
.get_item::<SignedBeaconBlock<E>>(&self.justified_checkpoint.root)
.map_err(Error::FailedToReadBlock)?
.ok_or_else(|| Error::MissingBlock(self.justified_checkpoint.root))?
.ok_or(Error::MissingBlock(self.justified_checkpoint.root))?
.message;
self.justified_balances = self
.store
.get_state(&justified_block.state_root, Some(justified_block.slot))
.map_err(Error::FailedToReadState)?
.ok_or_else(|| Error::MissingState(justified_block.state_root))?
.ok_or(Error::MissingState(justified_block.state_root))?
.balances
.into();
}

View File

@ -452,7 +452,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
let present_slot_with_tolerance = chain
.slot_clock
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
.ok_or_else(|| BeaconChainError::UnableToReadSlot)?;
.ok_or(BeaconChainError::UnableToReadSlot)?;
if block.slot() > present_slot_with_tolerance {
return Err(BlockError::FutureSlot {
present_slot: present_slot_with_tolerance,
@ -513,7 +513,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
let pubkey_cache = get_validator_pubkey_cache(chain)?;
let pubkey = pubkey_cache
.get(block.message.proposer_index as usize)
.ok_or_else(|| BlockError::UnknownValidator(block.message.proposer_index))?;
.ok_or(BlockError::UnknownValidator(block.message.proposer_index))?;
block.verify_signature(
Some(block_root),
pubkey,
@ -1180,7 +1180,7 @@ fn get_validator_pubkey_cache<T: BeaconChainTypes>(
chain
.validator_pubkey_cache
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| BeaconChainError::ValidatorPubkeyCacheLockTimeout)
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)
.map_err(BlockError::BeaconChainError)
}
@ -1220,7 +1220,7 @@ fn verify_header_signature<T: BeaconChainTypes>(
let proposer_pubkey = get_validator_pubkey_cache(chain)?
.get(header.message.proposer_index as usize)
.cloned()
.ok_or_else(|| BlockError::UnknownValidator(header.message.proposer_index))?;
.ok_or(BlockError::UnknownValidator(header.message.proposer_index))?;
let (fork, genesis_validators_root) = chain
.with_head(|head| {
Ok((

View File

@ -211,7 +211,7 @@ where
let store = self
.store
.clone()
.ok_or_else(|| "get_persisted_eth1_backend requires a store.".to_string())?;
.ok_or("get_persisted_eth1_backend requires a store.")?;
store
.get_item::<SszEth1>(&ETH1_CACHE_DB_KEY)
@ -223,7 +223,7 @@ where
let store = self
.store
.clone()
.ok_or_else(|| "store_contains_beacon_chain requires a store.".to_string())?;
.ok_or("store_contains_beacon_chain requires a store.")?;
Ok(store
.get_item::<PersistedBeaconChain>(&BEACON_CHAIN_DB_KEY)
@ -235,15 +235,12 @@ where
///
/// May initialize several components; including the op_pool and finalized checkpoints.
pub fn resume_from_db(mut self) -> Result<Self, String> {
let log = self
.log
.as_ref()
.ok_or_else(|| "resume_from_db requires a log".to_string())?;
let log = self.log.as_ref().ok_or("resume_from_db requires a log")?;
let pubkey_cache_path = self
.pubkey_cache_path
.as_ref()
.ok_or_else(|| "resume_from_db requires a data_dir".to_string())?;
.ok_or("resume_from_db requires a data_dir")?;
info!(
log,
@ -254,7 +251,7 @@ where
let store = self
.store
.clone()
.ok_or_else(|| "resume_from_db requires a store.".to_string())?;
.ok_or("resume_from_db requires a store.")?;
let chain = store
.get_item::<PersistedBeaconChain>(&BEACON_CHAIN_DB_KEY)
@ -267,7 +264,7 @@ where
let persisted_fork_choice = store
.get_item::<PersistedForkChoice>(&FORK_CHOICE_DB_KEY)
.map_err(|e| format!("DB error when reading persisted fork choice: {:?}", e))?
.ok_or_else(|| "No persisted fork choice present in database.".to_string())?;
.ok_or("No persisted fork choice present in database.")?;
let fc_store = BeaconForkChoiceStore::from_persisted(
persisted_fork_choice.fork_choice_store,
@ -282,11 +279,11 @@ where
let genesis_block = store
.get_item::<SignedBeaconBlock<TEthSpec>>(&chain.genesis_block_root)
.map_err(|e| format!("DB error when reading genesis block: {:?}", e))?
.ok_or_else(|| "Genesis block not found in store".to_string())?;
.ok_or("Genesis block not found in store")?;
let genesis_state = store
.get_state(&genesis_block.state_root(), Some(genesis_block.slot()))
.map_err(|e| format!("DB error when reading genesis state: {:?}", e))?
.ok_or_else(|| "Genesis block not found in store".to_string())?;
.ok_or("Genesis block not found in store")?;
self.genesis_time = Some(genesis_state.genesis_time);
@ -318,10 +315,7 @@ where
mut self,
mut beacon_state: BeaconState<TEthSpec>,
) -> Result<Self, String> {
let store = self
.store
.clone()
.ok_or_else(|| "genesis_state requires a store")?;
let store = self.store.clone().ok_or("genesis_state requires a store")?;
let beacon_block = genesis_block(&mut beacon_state, &self.spec)?;
@ -436,35 +430,28 @@ where
>,
String,
> {
let log = self
.log
.ok_or_else(|| "Cannot build without a logger".to_string())?;
let log = self.log.ok_or("Cannot build without a logger")?;
let slot_clock = self
.slot_clock
.ok_or_else(|| "Cannot build without a slot_clock.".to_string())?;
let store = self
.store
.clone()
.ok_or_else(|| "Cannot build without a store.".to_string())?;
.ok_or("Cannot build without a slot_clock.")?;
let store = self.store.clone().ok_or("Cannot build without a store.")?;
let mut fork_choice = self
.fork_choice
.ok_or_else(|| "Cannot build without fork choice.".to_string())?;
.ok_or("Cannot build without fork choice.")?;
let genesis_block_root = self
.genesis_block_root
.ok_or_else(|| "Cannot build without a genesis block root".to_string())?;
.ok_or("Cannot build without a genesis block root")?;
let genesis_state_root = self
.genesis_state_root
.ok_or_else(|| "Cannot build without a genesis state root".to_string())?;
.ok_or("Cannot build without a genesis state root")?;
let current_slot = if slot_clock
.is_prior_to_genesis()
.ok_or_else(|| "Unable to read slot clock".to_string())?
.ok_or("Unable to read slot clock")?
{
self.spec.genesis_slot
} else {
slot_clock
.now()
.ok_or_else(|| "Unable to read slot".to_string())?
slot_clock.now().ok_or("Unable to read slot")?
};
let head_block_root = fork_choice
@ -474,12 +461,12 @@ where
let head_block = store
.get_item::<SignedBeaconBlock<TEthSpec>>(&head_block_root)
.map_err(|e| format!("DB error when reading head block: {:?}", e))?
.ok_or_else(|| "Head block not found in store".to_string())?;
.ok_or("Head block not found in store")?;
let head_state_root = head_block.state_root();
let head_state = store
.get_state(&head_state_root, Some(head_block.slot()))
.map_err(|e| format!("DB error when reading head state: {:?}", e))?
.ok_or_else(|| "Head state not found in store".to_string())?;
.ok_or("Head state not found in store")?;
let mut canonical_head = BeaconSnapshot {
beacon_block_root: head_block_root,
@ -520,7 +507,7 @@ where
let pubkey_cache_path = self
.pubkey_cache_path
.ok_or_else(|| "Cannot build without a pubkey cache path".to_string())?;
.ok_or("Cannot build without a pubkey cache path")?;
let validator_pubkey_cache = self.validator_pubkey_cache.map(Ok).unwrap_or_else(|| {
ValidatorPubkeyCache::new(&canonical_head.beacon_state, pubkey_cache_path)
@ -541,9 +528,7 @@ where
store,
store_migrator,
slot_clock,
op_pool: self
.op_pool
.ok_or_else(|| "Cannot build without op pool".to_string())?,
op_pool: self.op_pool.ok_or("Cannot build without op pool")?,
// TODO: allow for persisting and loading the pool from disk.
naive_aggregation_pool: <_>::default(),
// TODO: allow for persisting and loading the pool from disk.
@ -566,7 +551,7 @@ where
fork_choice: RwLock::new(fork_choice),
event_handler: self
.event_handler
.ok_or_else(|| "Cannot build without an event handler".to_string())?,
.ok_or("Cannot build without an event handler")?,
head_tracker: Arc::new(self.head_tracker.unwrap_or_default()),
snapshot_cache: TimeoutRwLock::new(SnapshotCache::new(
DEFAULT_SNAPSHOT_CACHE_SIZE,
@ -577,7 +562,7 @@ where
disabled_forks: self.disabled_forks,
shutdown_sender: self
.shutdown_sender
.ok_or_else(|| "Cannot build without a shutdown sender.".to_string())?,
.ok_or("Cannot build without a shutdown sender.")?,
log: log.clone(),
graffiti: self.graffiti,
slasher: self.slasher.clone(),
@ -648,7 +633,7 @@ where
let log = self
.log
.as_ref()
.ok_or_else(|| "dummy_eth1_backend requires a log".to_string())?;
.ok_or("dummy_eth1_backend requires a log")?;
let backend =
CachingEth1Backend::new(Eth1Config::default(), log.clone(), self.spec.clone());
@ -676,7 +661,7 @@ where
pub fn testing_slot_clock(self, slot_duration: Duration) -> Result<Self, String> {
let genesis_time = self
.genesis_time
.ok_or_else(|| "testing_slot_clock requires an initialized state")?;
.ok_or("testing_slot_clock requires an initialized state")?;
let slot_clock = TestingSlotClock::new(
Slot::new(0),

View File

@ -83,7 +83,7 @@ impl<E: EthSpec> AggregatedAttestationMap<E> {
let committee_index = set_bits
.first()
.copied()
.ok_or_else(|| Error::NoAggregationBitsSet)?;
.ok_or(Error::NoAggregationBitsSet)?;
if set_bits.len() > 1 {
return Err(Error::MoreThanOneAggregationBitSet(set_bits.len()));

View File

@ -144,7 +144,7 @@ impl<E: EthSpec> ObservedAttestations<E> {
self.sets
.get_mut(index)
.ok_or_else(|| Error::InvalidSetIndex(index))
.ok_or(Error::InvalidSetIndex(index))
.and_then(|set| set.observe_attestation(a, root))
}
@ -156,7 +156,7 @@ impl<E: EthSpec> ObservedAttestations<E> {
self.sets
.get(index)
.ok_or_else(|| Error::InvalidSetIndex(index))
.ok_or(Error::InvalidSetIndex(index))
.and_then(|set| set.is_known(a, root))
}

View File

@ -137,13 +137,11 @@ where
let chain_config = config.chain.clone();
let graffiti = config.graffiti;
let store =
store.ok_or_else(|| "beacon_chain_start_method requires a store".to_string())?;
let store = store.ok_or("beacon_chain_start_method requires a store")?;
let context = runtime_context
.ok_or_else(|| "beacon_chain_start_method requires a runtime context".to_string())?
.ok_or("beacon_chain_start_method requires a runtime context")?
.service_context("beacon".into());
let spec = chain_spec
.ok_or_else(|| "beacon_chain_start_method requires a chain spec".to_string())?;
let spec = chain_spec.ok_or("beacon_chain_start_method requires a chain spec")?;
let builder = BeaconChainBuilder::new(eth_spec_instance)
.logger(context.log().clone())
@ -160,9 +158,7 @@ where
builder
};
let chain_exists = builder
.store_contains_beacon_chain()
.unwrap_or_else(|_| false);
let chain_exists = builder.store_contains_beacon_chain().unwrap_or(false);
// If the client is expect to resume but there's no beacon chain in the database,
// use the `DepositContract` method. This scenario is quite common when the client
@ -310,11 +306,11 @@ where
let beacon_chain = self
.beacon_chain
.clone()
.ok_or_else(|| "network requires a beacon chain")?;
.ok_or("network requires a beacon chain")?;
let context = self
.runtime_context
.as_ref()
.ok_or_else(|| "network requires a runtime_context")?
.ok_or("network requires a runtime_context")?
.clone();
let (network_globals, network_send) =
@ -333,16 +329,16 @@ where
let context = self
.runtime_context
.as_ref()
.ok_or_else(|| "node timer requires a runtime_context")?
.ok_or("node timer requires a runtime_context")?
.service_context("node_timer".into());
let beacon_chain = self
.beacon_chain
.clone()
.ok_or_else(|| "node timer requires a beacon chain")?;
.ok_or("node timer requires a beacon chain")?;
let milliseconds_per_slot = self
.chain_spec
.as_ref()
.ok_or_else(|| "node timer requires a chain spec".to_string())?
.ok_or("node timer requires a chain spec")?
.milliseconds_per_slot;
spawn_timer(context.executor, beacon_chain, milliseconds_per_slot)
@ -370,16 +366,16 @@ where
let context = self
.runtime_context
.as_ref()
.ok_or_else(|| "slasher requires a runtime_context")?
.ok_or("slasher requires a runtime_context")?
.service_context("slasher_server_ctxt".into());
let slasher = self
.slasher
.clone()
.ok_or_else(|| "slasher server requires a slasher")?;
.ok_or("slasher server requires a slasher")?;
let slot_clock = self
.slot_clock
.clone()
.ok_or_else(|| "slasher server requires a slot clock")?;
.ok_or("slasher server requires a slot clock")?;
SlasherServer::run(slasher, slot_clock, &context.executor);
Ok(())
}
@ -389,20 +385,20 @@ where
let context = self
.runtime_context
.as_ref()
.ok_or_else(|| "slot_notifier requires a runtime_context")?
.ok_or("slot_notifier requires a runtime_context")?
.service_context("slot_notifier".into());
let beacon_chain = self
.beacon_chain
.clone()
.ok_or_else(|| "slot_notifier requires a beacon chain")?;
.ok_or("slot_notifier requires a beacon chain")?;
let network_globals = self
.network_globals
.clone()
.ok_or_else(|| "slot_notifier requires a libp2p network")?;
.ok_or("slot_notifier requires a libp2p network")?;
let milliseconds_per_slot = self
.chain_spec
.as_ref()
.ok_or_else(|| "slot_notifier requires a chain spec".to_string())?
.ok_or("slot_notifier requires a chain spec")?
.milliseconds_per_slot;
spawn_notifier(
@ -430,7 +426,7 @@ where
let runtime_context = self
.runtime_context
.as_ref()
.ok_or_else(|| "build requires a runtime context".to_string())?;
.ok_or("build requires a runtime context")?;
let log = runtime_context.log().clone();
let http_api_listen_addr = if self.http_api_config.enabled {
@ -518,20 +514,20 @@ where
let context = self
.runtime_context
.as_ref()
.ok_or_else(|| "beacon_chain requires a runtime context")?
.ok_or("beacon_chain requires a runtime context")?
.clone();
let chain = self
.beacon_chain_builder
.ok_or_else(|| "beacon_chain requires a beacon_chain_builder")?
.ok_or("beacon_chain requires a beacon_chain_builder")?
.event_handler(
self.event_handler
.ok_or_else(|| "beacon_chain requires an event handler")?,
.ok_or("beacon_chain requires an event handler")?,
)
.slot_clock(
self.slot_clock
.clone()
.ok_or_else(|| "beacon_chain requires a slot clock")?,
.ok_or("beacon_chain requires a slot clock")?,
)
.shutdown_sender(context.executor.shutdown_sender())
.build()
@ -573,7 +569,7 @@ where
let context = self
.runtime_context
.as_ref()
.ok_or_else(|| "tee_event_handler requires a runtime_context")?
.ok_or("tee_event_handler requires a runtime_context")?
.service_context("ws".into());
let log = context.log().clone();
@ -619,12 +615,12 @@ where
let context = self
.runtime_context
.as_ref()
.ok_or_else(|| "disk_store requires a log".to_string())?
.ok_or("disk_store requires a log")?
.service_context("freezer_db".into());
let spec = self
.chain_spec
.clone()
.ok_or_else(|| "disk_store requires a chain spec".to_string())?;
.ok_or("disk_store requires a chain spec")?;
self.db_path = Some(hot_path.into());
self.freezer_db_path = Some(cold_path.into());
@ -661,15 +657,15 @@ where
let context = self
.runtime_context
.as_ref()
.ok_or_else(|| "caching_eth1_backend requires a runtime_context")?
.ok_or("caching_eth1_backend requires a runtime_context")?
.service_context("eth1_rpc".into());
let beacon_chain_builder = self
.beacon_chain_builder
.ok_or_else(|| "caching_eth1_backend requires a beacon_chain_builder")?;
.ok_or("caching_eth1_backend requires a beacon_chain_builder")?;
let spec = self
.chain_spec
.clone()
.ok_or_else(|| "caching_eth1_backend requires a chain spec".to_string())?;
.ok_or("caching_eth1_backend requires a chain spec")?;
let backend = if let Some(eth1_service_from_genesis) = self.eth1_service {
eth1_service_from_genesis.update_config(config)?;
@ -720,7 +716,7 @@ where
pub fn no_eth1_backend(mut self) -> Result<Self, String> {
let beacon_chain_builder = self
.beacon_chain_builder
.ok_or_else(|| "caching_eth1_backend requires a beacon_chain_builder")?;
.ok_or("caching_eth1_backend requires a beacon_chain_builder")?;
self.beacon_chain_builder = Some(beacon_chain_builder.no_eth1_backend());
@ -739,7 +735,7 @@ where
pub fn dummy_eth1_backend(mut self) -> Result<Self, String> {
let beacon_chain_builder = self
.beacon_chain_builder
.ok_or_else(|| "caching_eth1_backend requires a beacon_chain_builder")?;
.ok_or("caching_eth1_backend requires a beacon_chain_builder")?;
self.beacon_chain_builder = Some(beacon_chain_builder.dummy_eth1_backend()?);
@ -763,16 +759,16 @@ where
let beacon_chain_builder = self
.beacon_chain_builder
.as_ref()
.ok_or_else(|| "system_time_slot_clock requires a beacon_chain_builder")?;
.ok_or("system_time_slot_clock requires a beacon_chain_builder")?;
let genesis_time = beacon_chain_builder
.genesis_time
.ok_or_else(|| "system_time_slot_clock requires an initialized beacon state")?;
.ok_or("system_time_slot_clock requires an initialized beacon state")?;
let spec = self
.chain_spec
.clone()
.ok_or_else(|| "system_time_slot_clock requires a chain spec".to_string())?;
.ok_or("system_time_slot_clock requires a chain spec")?;
let slot_clock = SystemTimeSlotClock::new(
spec.genesis_slot,

View File

@ -101,7 +101,7 @@ impl Config {
pub fn create_db_path(&self) -> Result<PathBuf, String> {
let db_path = self
.get_db_path()
.ok_or_else(|| "Unable to locate user home directory")?;
.ok_or("Unable to locate user home directory")?;
ensure_dir_exists(db_path)
}
@ -125,7 +125,7 @@ impl Config {
pub fn create_freezer_db_path(&self) -> Result<PathBuf, String> {
let freezer_db_path = self
.get_freezer_db_path()
.ok_or_else(|| "Unable to locate user home directory")?;
.ok_or("Unable to locate user home directory")?;
ensure_dir_exists(freezer_db_path)
}
@ -142,7 +142,7 @@ impl Config {
pub fn create_data_dir(&self) -> Result<PathBuf, String> {
let path = self
.get_data_dir()
.ok_or_else(|| "Unable to locate user home directory".to_string())?;
.ok_or("Unable to locate user home directory")?;
ensure_dir_exists(path)
}
}

View File

@ -31,7 +31,7 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
let duration_to_next_slot = beacon_chain
.slot_clock
.duration_to_next_slot()
.ok_or_else(|| "slot_notifier unable to determine time to next slot")?;
.ok_or("slot_notifier unable to determine time to next slot")?;
// Run this half way through each slot.
let start_instant = tokio::time::Instant::now() + duration_to_next_slot + (slot_duration / 2);
@ -94,7 +94,7 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
metrics::set_gauge(
&metrics::SYNC_SLOTS_PER_SECOND,
speedo.slots_per_second().unwrap_or_else(|| 0_f64) as i64,
speedo.slots_per_second().unwrap_or(0_f64) as i64,
);
// The next two lines take advantage of saturating subtraction on `Slot`.

View File

@ -25,19 +25,19 @@ impl Log {
let pubkey = bytes
.get(PUBKEY_START..PUBKEY_START + PUBKEY_LEN)
.ok_or_else(|| "Insufficient bytes for pubkey".to_string())?;
.ok_or("Insufficient bytes for pubkey")?;
let withdrawal_credentials = bytes
.get(CREDS_START..CREDS_START + CREDS_LEN)
.ok_or_else(|| "Insufficient bytes for withdrawal credential".to_string())?;
.ok_or("Insufficient bytes for withdrawal credential")?;
let amount = bytes
.get(AMOUNT_START..AMOUNT_START + AMOUNT_LEN)
.ok_or_else(|| "Insufficient bytes for amount".to_string())?;
.ok_or("Insufficient bytes for amount")?;
let signature = bytes
.get(SIG_START..SIG_START + SIG_LEN)
.ok_or_else(|| "Insufficient bytes for signature".to_string())?;
.ok_or("Insufficient bytes for signature")?;
let index = bytes
.get(INDEX_START..INDEX_START + INDEX_LEN)
.ok_or_else(|| "Insufficient bytes for index".to_string())?;
.ok_or("Insufficient bytes for index")?;
let deposit_data = DepositData {
pubkey: PublicKeyBytes::from_ssz_bytes(pubkey)

View File

@ -83,9 +83,9 @@ pub async fn get_network_id(endpoint: &str, timeout: Duration) -> Result<Eth1Id,
let response_body = send_rpc_request(endpoint, "net_version", json!([]), timeout).await?;
Eth1Id::from_str(
response_result(&response_body)?
.ok_or_else(|| "No result was returned for network id".to_string())?
.ok_or("No result was returned for network id")?
.as_str()
.ok_or_else(|| "Data was not string")?,
.ok_or("Data was not string")?,
)
}
@ -94,9 +94,9 @@ pub async fn get_chain_id(endpoint: &str, timeout: Duration) -> Result<Eth1Id, S
let response_body = send_rpc_request(endpoint, "eth_chainId", json!([]), timeout).await?;
hex_to_u64_be(
response_result(&response_body)?
.ok_or_else(|| "No result was returned for chain id".to_string())?
.ok_or("No result was returned for chain id")?
.as_str()
.ok_or_else(|| "Data was not string")?,
.ok_or("Data was not string")?,
)
.map(Into::into)
}
@ -115,9 +115,9 @@ pub async fn get_block_number(endpoint: &str, timeout: Duration) -> Result<u64,
let response_body = send_rpc_request(endpoint, "eth_blockNumber", json!([]), timeout).await?;
hex_to_u64_be(
response_result(&response_body)?
.ok_or_else(|| "No result field was returned for block number".to_string())?
.ok_or("No result field was returned for block number")?
.as_str()
.ok_or_else(|| "Data was not string")?,
.ok_or("Data was not string")?,
)
.map_err(|e| format!("Failed to get block number: {}", e))
}
@ -142,11 +142,11 @@ pub async fn get_block(
let response_body = send_rpc_request(endpoint, "eth_getBlockByNumber", params, timeout).await?;
let hash = hex_to_bytes(
response_result(&response_body)?
.ok_or_else(|| "No result field was returned for block".to_string())?
.ok_or("No result field was returned for block")?
.get("hash")
.ok_or_else(|| "No hash for block")?
.ok_or("No hash for block")?
.as_str()
.ok_or_else(|| "Block hash was not string")?,
.ok_or("Block hash was not string")?,
)?;
let hash = if hash.len() == 32 {
Ok(Hash256::from_slice(&hash))
@ -156,20 +156,20 @@ pub async fn get_block(
let timestamp = hex_to_u64_be(
response_result(&response_body)?
.ok_or_else(|| "No result field was returned for timestamp".to_string())?
.ok_or("No result field was returned for timestamp")?
.get("timestamp")
.ok_or_else(|| "No timestamp for block")?
.ok_or("No timestamp for block")?
.as_str()
.ok_or_else(|| "Block timestamp was not string")?,
.ok_or("Block timestamp was not string")?,
)?;
let number = hex_to_u64_be(
response_result(&response_body)?
.ok_or_else(|| "No result field was returned for number".to_string())?
.ok_or("No result field was returned for number")?
.get("number")
.ok_or_else(|| "No number for block")?
.ok_or("No number for block")?
.as_str()
.ok_or_else(|| "Block number was not string")?,
.ok_or("Block number was not string")?,
)?;
if number <= usize::max_value() as u64 {
@ -287,7 +287,7 @@ async fn call(
let hex = result
.as_str()
.map(|s| s.to_string())
.ok_or_else(|| "'result' value was not a string".to_string())?;
.ok_or("'result' value was not a string")?;
Ok(Some(hex_to_bytes(&hex)?))
}
@ -322,23 +322,23 @@ pub async fn get_deposit_logs_in_range(
let response_body = send_rpc_request(endpoint, "eth_getLogs", params, timeout).await?;
response_result(&response_body)?
.ok_or_else(|| "No result field was returned for deposit logs".to_string())?
.ok_or("No result field was returned for deposit logs")?
.as_array()
.cloned()
.ok_or_else(|| "'result' value was not an array".to_string())?
.ok_or("'result' value was not an array")?
.into_iter()
.map(|value| {
let block_number = value
.get("blockNumber")
.ok_or_else(|| "No block number field in log")?
.ok_or("No block number field in log")?
.as_str()
.ok_or_else(|| "Block number was not string")?;
.ok_or("Block number was not string")?;
let data = value
.get("data")
.ok_or_else(|| "No block number field in log")?
.ok_or("No block number field in log")?
.as_str()
.ok_or_else(|| "Data was not string")?;
.ok_or("Data was not string")?;
Ok(Log {
block_number: hex_to_u64_be(&block_number)?,
@ -389,7 +389,7 @@ pub async fn send_rpc_request(
let encoding = response
.headers()
.get(CONTENT_TYPE)
.ok_or_else(|| "No content-type header in response".to_string())?
.ok_or("No content-type header in response")?
.to_str()
.map(|s| s.to_string())
.map_err(|e| format!("Failed to parse content-type header: {}", e))?;
@ -443,8 +443,8 @@ fn hex_to_bytes(hex: &str) -> Result<Vec<u8>, String> {
/// Removes the `0x` prefix from some bytes. Returns an error if the prefix is not present.
fn strip_prefix(hex: &str) -> Result<&str, String> {
if hex.starts_with("0x") {
Ok(&hex[2..])
if let Some(stripped) = hex.strip_prefix("0x") {
Ok(stripped)
} else {
Err("Hex string did not start with `0x`".to_string())
}

View File

@ -808,8 +808,8 @@ impl Service {
.chunks(blocks_per_log_query)
.take(max_log_requests_per_update)
.map(|vec| {
let first = vec.first().cloned().unwrap_or_else(|| 0);
let last = vec.last().map(|n| n + 1).unwrap_or_else(|| 0);
let first = vec.first().cloned().unwrap_or(0);
let last = vec.last().map(|n| n + 1).unwrap_or(0);
first..last
})
.collect::<Vec<Range<u64>>>()
@ -894,7 +894,7 @@ impl Service {
metrics::set_gauge(&metrics::DEPOSIT_CACHE_LEN, cache.cache.len() as i64);
metrics::set_gauge(
&metrics::HIGHEST_PROCESSED_DEPOSIT_BLOCK,
cache.last_processed_block.unwrap_or_else(|| 0) as i64,
cache.last_processed_block.unwrap_or(0) as i64,
);
}
@ -1035,7 +1035,7 @@ impl Service {
.block_cache
.read()
.latest_block_timestamp()
.unwrap_or_else(|| 0) as i64,
.unwrap_or(0) as i64,
);
blocks_imported += 1;

View File

@ -34,16 +34,14 @@ impl Eth2Enr for Enr {
fn bitfield<TSpec: EthSpec>(&self) -> Result<EnrBitfield<TSpec>, &'static str> {
let bitfield_bytes = self
.get(BITFIELD_ENR_KEY)
.ok_or_else(|| "ENR bitfield non-existent")?;
.ok_or("ENR bitfield non-existent")?;
BitVector::<TSpec::SubnetBitfieldLength>::from_ssz_bytes(bitfield_bytes)
.map_err(|_| "Could not decode the ENR SSZ bitfield")
}
fn eth2(&self) -> Result<EnrForkId, &'static str> {
let eth2_bytes = self
.get(ETH2_ENR_KEY)
.ok_or_else(|| "ENR has no eth2 field")?;
let eth2_bytes = self.get(ETH2_ENR_KEY).ok_or("ENR has no eth2 field")?;
EnrForkId::from_ssz_bytes(eth2_bytes).map_err(|_| "Could not decode EnrForkId")
}
@ -79,7 +77,7 @@ pub fn use_or_load_enr(
// same node id, different configuration - update the sequence number
// Note: local_enr is generated with default(0) attnets value,
// so a non default value in persisted enr will also update sequence number.
let new_seq_no = disk_enr.seq().checked_add(1).ok_or_else(|| "ENR sequence number on file is too large. Remove it to generate a new NodeId")?;
let new_seq_no = disk_enr.seq().checked_add(1).ok_or("ENR sequence number on file is too large. Remove it to generate a new NodeId")?;
local_enr.set_seq(new_seq_no, enr_key).map_err(|e| {
format!("Could not update ENR sequence number: {:?}", e)
})?;
@ -133,7 +131,7 @@ pub fn create_enr_builder_from_config<T: EnrKey>(
}
// we always give it our listening tcp port
if enable_tcp {
let tcp_port = config.enr_tcp_port.unwrap_or_else(|| config.libp2p_port);
let tcp_port = config.enr_tcp_port.unwrap_or(config.libp2p_port);
builder.tcp(tcp_port);
}
builder

View File

@ -83,10 +83,7 @@ impl<T: EthSpec> PeerInfo<T> {
/// Returns if the peer is subscribed to a given `SubnetId`
pub fn on_subnet(&self, subnet_id: SubnetId) -> bool {
if let Some(meta_data) = &self.meta_data {
return meta_data
.attnets
.get(*subnet_id as usize)
.unwrap_or_else(|_| false);
return meta_data.attnets.get(*subnet_id as usize).unwrap_or(false);
}
false
}

View File

@ -313,8 +313,7 @@ impl PartialOrd for Score {
impl Ord for Score {
fn cmp(&self, other: &Score) -> std::cmp::Ordering {
self.partial_cmp(other)
.unwrap_or_else(|| std::cmp::Ordering::Equal)
self.partial_cmp(other).unwrap_or(std::cmp::Ordering::Equal)
}
}

View File

@ -375,8 +375,8 @@ fn build_transport(
// Useful helper functions for debugging. Currently not used in the client.
#[allow(dead_code)]
fn keypair_from_hex(hex_bytes: &str) -> error::Result<Keypair> {
let hex_bytes = if hex_bytes.starts_with("0x") {
hex_bytes[2..].to_string()
let hex_bytes = if let Some(stripped) = hex_bytes.strip_prefix("0x") {
stripped.to_string()
} else {
hex_bytes.to_string()
};

View File

@ -30,7 +30,7 @@ impl BeaconProposerCache {
.fork_choice
.read()
.get_block(&head_root)
.ok_or_else(|| BeaconChainError::MissingBeaconBlock(head_root))?;
.ok_or(BeaconChainError::MissingBeaconBlock(head_root))?;
// If the head epoch is more than `EPOCHS_TO_SKIP` in the future, just build the cache at
// the epoch of the head. This prevents doing a massive amount of skip slots when starting
@ -63,7 +63,7 @@ impl BeaconProposerCache {
let mut head_state = chain
.get_state(&head_block.state_root, Some(head_block.slot))?
.ok_or_else(|| BeaconChainError::MissingBeaconState(head_block.state_root))?;
.ok_or(BeaconChainError::MissingBeaconState(head_block.state_root))?;
let decision_block_root = Self::decision_block_root(current_epoch, head_root, &head_state)?;
@ -85,7 +85,7 @@ impl BeaconProposerCache {
.and_then(|i| {
let pubkey = chain
.validator_pubkey(i)?
.ok_or_else(|| BeaconChainError::ValidatorPubkeyCacheIncomplete(i))?;
.ok_or(BeaconChainError::ValidatorPubkeyCacheIncomplete(i))?;
Ok(ProposerData {
pubkey: PublicKeyBytes::from(pubkey),
@ -168,7 +168,7 @@ impl BeaconProposerCache {
.fork_choice
.read()
.get_block(&head_block_root)
.ok_or_else(|| BeaconChainError::MissingBeaconBlock(head_block_root))
.ok_or(BeaconChainError::MissingBeaconBlock(head_block_root))
.map_err(warp_utils::reject::beacon_chain_error)?;
// Rebuild the cache if this call causes a cache-miss.

View File

@ -315,7 +315,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
.beacon_chain
.slot_clock
.now()
.ok_or_else(|| "Could not get the current slot")?;
.ok_or("Could not get the current slot")?;
let discovery_subnets: Vec<SubnetDiscovery> = exact_subnets
.filter_map(|exact_subnet| {
@ -363,7 +363,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
.beacon_chain
.slot_clock
.now()
.ok_or_else(|| "Could not get the current slot")?;
.ok_or("Could not get the current slot")?;
// Calculate the duration to the unsubscription event.
// There are two main cases. Attempting to subscribe to the current slot and all others.
@ -371,7 +371,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
self.beacon_chain
.slot_clock
.duration_to_next_slot()
.ok_or_else(|| "Unable to determine duration to next slot")?
.ok_or("Unable to determine duration to next slot")?
} else {
let slot_duration = self.beacon_chain.slot_clock.slot_duration();
@ -380,7 +380,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
self.beacon_chain
.slot_clock
.duration_to_slot(exact_subnet.slot)
.ok_or_else(|| "Unable to determine duration to subscription slot")?
.ok_or("Unable to determine duration to subscription slot")?
+ slot_duration
};

View File

@ -33,18 +33,14 @@ pub fn get_config<E: EthSpec>(
// If necessary, remove any existing database and configuration
if client_config.data_dir.exists() && cli_args.is_present("purge-db") {
// Remove the chain_db.
fs::remove_dir_all(
client_config
.get_db_path()
.ok_or_else(|| "Failed to get db_path".to_string())?,
)
.map_err(|err| format!("Failed to remove chain_db: {}", err))?;
fs::remove_dir_all(client_config.get_db_path().ok_or("Failed to get db_path")?)
.map_err(|err| format!("Failed to remove chain_db: {}", err))?;
// Remove the freezer db.
fs::remove_dir_all(
client_config
.get_freezer_db_path()
.ok_or_else(|| "Failed to get freezer db path".to_string())?,
.ok_or("Failed to get freezer db path")?,
)
.map_err(|err| format!("Failed to remove chain_db: {}", err))?;
@ -319,10 +315,10 @@ pub fn get_config<E: EthSpec>(
let mut split = wss_checkpoint.split(':');
let root_str = split
.next()
.ok_or_else(|| "Improperly formatted weak subjectivity checkpoint".to_string())?;
.ok_or("Improperly formatted weak subjectivity checkpoint")?;
let epoch_str = split
.next()
.ok_or_else(|| "Improperly formatted weak subjectivity checkpoint".to_string())?;
.ok_or("Improperly formatted weak subjectivity checkpoint")?;
if !root_str.starts_with("0x") {
return Err(
@ -555,7 +551,7 @@ pub fn set_network_config(
resolved_addrs
.next()
.map(|a| a.ip())
.ok_or_else(|| "Resolved dns addr contains no entries".to_string())?
.ok_or("Resolved dns addr contains no entries")?
} else {
return Err(format!("Failed to parse enr-address: {}", enr_address));
};

View File

@ -394,9 +394,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
// boundary state in the hot DB.
let state = self
.load_hot_state(&epoch_boundary_state_root, BlockReplay::Accurate)?
.ok_or_else(|| {
HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root)
})?;
.ok_or(HotColdDBError::MissingEpochBoundaryState(
epoch_boundary_state_root,
))?;
Ok(Some(state))
} else {
// Try the cold DB
@ -553,10 +553,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
epoch_boundary_state_root,
}) = self.load_hot_state_summary(state_root)?
{
let boundary_state = get_full_state(&self.hot_db, &epoch_boundary_state_root)?
.ok_or_else(|| {
HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root)
})?;
let boundary_state = get_full_state(&self.hot_db, &epoch_boundary_state_root)?.ok_or(
HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root),
)?;
// Optimization to avoid even *thinking* about replaying blocks if we're already
// on an epoch boundary.
@ -682,8 +681,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
let high_restore_point = if high_restore_point_idx * self.config.slots_per_restore_point
>= split.slot.as_u64()
{
self.get_state(&split.state_root, Some(split.slot))?
.ok_or_else(|| HotColdDBError::MissingSplitState(split.state_root, split.slot))?
self.get_state(&split.state_root, Some(split.slot))?.ok_or(
HotColdDBError::MissingSplitState(split.state_root, split.slot),
)?
} else {
self.load_restore_point_by_index(high_restore_point_idx)?
};
@ -1019,7 +1019,7 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
if slot % store.config.slots_per_restore_point == 0 {
let state: BeaconState<E> = get_full_state(&store.hot_db, &state_root)?
.ok_or_else(|| HotColdDBError::MissingStateToFreeze(state_root))?;
.ok_or(HotColdDBError::MissingStateToFreeze(state_root))?;
store.store_cold_state(&state_root, &state, &mut cold_db_ops)?;
}

View File

@ -21,7 +21,7 @@ pub fn spawn_timer<T: BeaconChainTypes>(
+ beacon_chain
.slot_clock
.duration_to_next_slot()
.ok_or_else(|| "slot_notifier unable to determine time to next slot")?;
.ok_or("slot_notifier unable to determine time to next slot")?;
// Warning: `interval_at` panics if `milliseconds_per_slot` = 0.
let mut interval = interval_at(start_instant, Duration::from_millis(milliseconds_per_slot));

View File

@ -84,9 +84,9 @@ impl<T: EthSpec> TryFrom<&ArgMatches<'_>> for BootNodeConfig<T> {
let spec = config
.yaml_config
.as_ref()
.ok_or_else(|| "The testnet directory must contain a spec config".to_string())?
.ok_or("The testnet directory must contain a spec config")?
.apply_to_chain_spec::<T>(&T::default_spec())
.ok_or_else(|| "The loaded config is not compatible with the current spec")?;
.ok_or("The loaded config is not compatible with the current spec")?;
if config.beacon_state_is_known() {
let genesis_state = config.beacon_state::<T>()?;

View File

@ -86,14 +86,12 @@ impl ValidatorDefinition {
let voting_keystore_path = voting_keystore_path.as_ref().into();
let keystore =
Keystore::from_json_file(&voting_keystore_path).map_err(Error::UnableToOpenKeystore)?;
let voting_public_key = keystore
.public_key()
.ok_or_else(|| Error::InvalidKeystorePubkey)?;
let voting_public_key = keystore.public_key().ok_or(Error::InvalidKeystorePubkey)?;
Ok(ValidatorDefinition {
enabled: true,
voting_public_key,
description: keystore.description().unwrap_or_else(|| "").to_string(),
description: keystore.description().unwrap_or("").to_string(),
signing_definition: SigningDefinition::LocalKeystore {
voting_keystore_path,
voting_keystore_password_path: None,
@ -214,7 +212,7 @@ impl ValidatorDefinitions {
Some(ValidatorDefinition {
enabled: true,
voting_public_key,
description: keystore.description().unwrap_or_else(|| "").to_string(),
description: keystore.description().unwrap_or("").to_string(),
signing_definition: SigningDefinition::LocalKeystore {
voting_keystore_path,
voting_keystore_password_path,

View File

@ -101,8 +101,8 @@ pub fn parse_ssz_optional<T: Decode>(
matches
.value_of(name)
.map(|val| {
if val.starts_with("0x") {
let vec = hex::decode(&val[2..])
if let Some(stripped) = val.strip_prefix("0x") {
let vec = hex::decode(stripped)
.map_err(|e| format!("Unable to parse {} as hex: {:?}", name, e))?;
T::from_ssz_bytes(&vec)

View File

@ -86,7 +86,7 @@ pub fn download_deposit_contract(
let abi = contract
.get("abi")
.ok_or_else(|| "Response does not contain key: abi".to_string())?
.ok_or("Response does not contain key: abi")?
.to_string();
verify_checksum(abi.as_bytes(), abi_checksum);
@ -97,7 +97,7 @@ pub fn download_deposit_contract(
let bytecode = contract
.get("bytecode")
.ok_or_else(|| "Response does not contain key: bytecode".to_string())?
.ok_or("Response does not contain key: bytecode")?
.to_string();
verify_checksum(bytecode.as_bytes(), bytecode_checksum);

View File

@ -55,8 +55,7 @@ pub fn decode_eth1_tx_data(
) -> Result<(DepositData, Hash256), DecodeError> {
let abi = Contract::load(ABI)?;
let function = abi.function("deposit")?;
let mut tokens =
function.decode_input(bytes.get(4..).ok_or_else(|| DecodeError::InadequateBytes)?)?;
let mut tokens = function.decode_input(bytes.get(4..).ok_or(DecodeError::InadequateBytes)?)?;
macro_rules! decode_token {
($type: ty, $to_fn: ident) => {

View File

@ -79,7 +79,7 @@ impl ValidatorClientHttpClient {
let sig = response
.headers()
.get("Signature")
.ok_or_else(|| Error::MissingSignatureHeader)?
.ok_or(Error::MissingSignatureHeader)?
.to_str()
.map_err(|_| Error::InvalidSignatureHeader)?
.to_string();
@ -96,7 +96,7 @@ impl ValidatorClientHttpClient {
Some(secp256k1::verify(&message, &sig, &self.server_pubkey))
})
.filter(|is_valid| *is_valid)
.ok_or_else(|| Error::InvalidSignatureHeader)?;
.ok_or(Error::InvalidSignatureHeader)?;
Ok(body)
}

View File

@ -106,8 +106,8 @@ impl TryInto<Keypair> for YamlKeypair {
}
fn string_to_bytes(string: &str) -> Result<Vec<u8>, String> {
let string = if string.starts_with("0x") {
&string[2..]
let string = if let Some(stripped) = string.strip_prefix("0x") {
stripped
} else {
string
};

View File

@ -122,7 +122,7 @@ impl Eth2TestnetConfig {
let genesis_state_bytes = self
.genesis_state_bytes
.as_ref()
.ok_or_else(|| "Genesis state is unknown".to_string())?;
.ok_or("Genesis state is unknown")?;
BeaconState::from_ssz_bytes(genesis_state_bytes)
.map_err(|e| format!("Genesis state SSZ bytes are invalid: {:?}", e))
@ -156,8 +156,8 @@ impl Eth2TestnetConfig {
//
// This allows us to play nice with other clients that are expecting
// plain-text, not YAML.
let no_doc_header = if yaml.starts_with("---\n") {
&yaml[4..]
let no_doc_header = if let Some(stripped) = yaml.strip_prefix("---\n") {
stripped
} else {
&yaml
};

View File

@ -138,7 +138,7 @@ impl<'a> Builder<'a> {
pub fn build(self) -> Result<ValidatorDir, Error> {
let (voting_keystore, voting_password) = self
.voting_keystore
.ok_or_else(|| Error::UninitializedVotingKeystore)?;
.ok_or(Error::UninitializedVotingKeystore)?;
let dir = self
.base_validators_dir

View File

@ -167,8 +167,8 @@ impl ValidatorDir {
.map_err(Error::UnableToReadDepositData)
.and_then(|hex_bytes| {
let hex = std::str::from_utf8(&hex_bytes).map_err(|_| Error::DepositDataNotUtf8)?;
if hex.starts_with("0x") {
hex::decode(&hex[2..]).map_err(Error::DepositDataInvalidHex)
if let Some(stripped) = hex.strip_prefix("0x") {
hex::decode(stripped).map_err(Error::DepositDataInvalidHex)
} else {
Err(Error::DepositDataMissing0xPrefix)
}

View File

@ -115,7 +115,7 @@ impl TreeHashCache {
let left = self.layers[depth]
.get(arena, left_idx)?
.ok_or_else(|| Error::MissingLeftIdx(left_idx))?;
.ok_or(Error::MissingLeftIdx(left_idx))?;
let right = self.layers[depth]
.get(arena, right_idx)?
.copied()

View File

@ -55,9 +55,7 @@ impl<T: Encode + Decode> CacheArena<T> {
.iter_mut()
.skip(alloc_id + 1)
.try_for_each(|offset| {
*offset = offset
.checked_add(grow_by)
.ok_or_else(|| Error::OffsetOverflow)?;
*offset = offset.checked_add(grow_by).ok_or(Error::OffsetOverflow)?;
Ok(())
})
@ -75,7 +73,7 @@ impl<T: Encode + Decode> CacheArena<T> {
.try_for_each(|offset| {
*offset = offset
.checked_sub(shrink_by)
.ok_or_else(|| Error::OffsetUnderflow)?;
.ok_or(Error::OffsetUnderflow)?;
Ok(())
})
@ -99,15 +97,12 @@ impl<T: Encode + Decode> CacheArena<T> {
let offset = *self
.offsets
.get(alloc_id)
.ok_or_else(|| Error::UnknownAllocId(alloc_id))?;
.ok_or(Error::UnknownAllocId(alloc_id))?;
let start = range
.start
.checked_add(offset)
.ok_or_else(|| Error::RangeOverFlow)?;
let end = range
.end
.checked_add(offset)
.ok_or_else(|| Error::RangeOverFlow)?;
.ok_or(Error::RangeOverFlow)?;
let end = range.end.checked_add(offset).ok_or(Error::RangeOverFlow)?;
let prev_len = self.backing.len();
@ -127,7 +122,7 @@ impl<T: Encode + Decode> CacheArena<T> {
let start = self
.offsets
.get(alloc_id)
.ok_or_else(|| Error::UnknownAllocId(alloc_id))?;
.ok_or(Error::UnknownAllocId(alloc_id))?;
let end = self
.offsets
.get(alloc_id + 1)
@ -143,7 +138,7 @@ impl<T: Encode + Decode> CacheArena<T> {
let offset = self
.offsets
.get(alloc_id)
.ok_or_else(|| Error::UnknownAllocId(alloc_id))?;
.ok_or(Error::UnknownAllocId(alloc_id))?;
Ok(self.backing.get(i + offset))
} else {
Ok(None)
@ -156,7 +151,7 @@ impl<T: Encode + Decode> CacheArena<T> {
let offset = self
.offsets
.get(alloc_id)
.ok_or_else(|| Error::UnknownAllocId(alloc_id))?;
.ok_or(Error::UnknownAllocId(alloc_id))?;
Ok(self.backing.get_mut(i + offset))
} else {
Ok(None)
@ -168,7 +163,7 @@ impl<T: Encode + Decode> CacheArena<T> {
let start = *self
.offsets
.get(alloc_id)
.ok_or_else(|| Error::UnknownAllocId(alloc_id))?;
.ok_or(Error::UnknownAllocId(alloc_id))?;
let end = self
.offsets
.get(alloc_id + 1)

View File

@ -312,7 +312,7 @@ where
let block = self
.proto_array
.get_block(&block_root)
.ok_or_else(|| Error::MissingProtoArrayBlock(block_root))?;
.ok_or(Error::MissingProtoArrayBlock(block_root))?;
match block.slot.cmp(&ancestor_slot) {
Ordering::Greater => Ok(self
@ -618,7 +618,7 @@ where
let block = self
.proto_array
.get_block(&indexed_attestation.data.beacon_block_root)
.ok_or_else(|| InvalidAttestation::UnknownHeadBlock {
.ok_or(InvalidAttestation::UnknownHeadBlock {
beacon_block_root: indexed_attestation.data.beacon_block_root,
})?;

View File

@ -77,7 +77,7 @@ impl ProtoArray {
let node = self
.nodes
.get_mut(node_index)
.ok_or_else(|| Error::InvalidNodeIndex(node_index))?;
.ok_or(Error::InvalidNodeIndex(node_index))?;
// There is no need to adjust the balances or manage parent of the zero hash since it
// is an alias to the genesis block. The weight applied to the genesis block is
@ -89,7 +89,7 @@ impl ProtoArray {
let node_delta = deltas
.get(node_index)
.copied()
.ok_or_else(|| Error::InvalidNodeDelta(node_index))?;
.ok_or(Error::InvalidNodeDelta(node_index))?;
// Apply the delta to the node.
if node_delta < 0 {
@ -105,19 +105,19 @@ impl ProtoArray {
node.weight = node
.weight
.checked_sub(node_delta.abs() as u64)
.ok_or_else(|| Error::DeltaOverflow(node_index))?;
.ok_or(Error::DeltaOverflow(node_index))?;
} else {
node.weight = node
.weight
.checked_add(node_delta as u64)
.ok_or_else(|| Error::DeltaOverflow(node_index))?;
.ok_or(Error::DeltaOverflow(node_index))?;
}
// If the node has a parent, try to update its best-child and best-descendant.
if let Some(parent_index) = node.parent {
let parent_delta = deltas
.get_mut(parent_index)
.ok_or_else(|| Error::InvalidParentDelta(parent_index))?;
.ok_or(Error::InvalidParentDelta(parent_index))?;
// Back-propagate the nodes delta to its parent.
*parent_delta += node_delta;
@ -185,16 +185,14 @@ impl ProtoArray {
let justified_node = self
.nodes
.get(justified_index)
.ok_or_else(|| Error::InvalidJustifiedIndex(justified_index))?;
.ok_or(Error::InvalidJustifiedIndex(justified_index))?;
let best_descendant_index = justified_node
.best_descendant
.unwrap_or_else(|| justified_index);
let best_descendant_index = justified_node.best_descendant.unwrap_or(justified_index);
let best_node = self
.nodes
.get(best_descendant_index)
.ok_or_else(|| Error::InvalidBestDescendant(best_descendant_index))?;
.ok_or(Error::InvalidBestDescendant(best_descendant_index))?;
// Perform a sanity check that the node is indeed valid to be the head.
if !self.node_is_viable_for_head(&best_node) {
@ -228,7 +226,7 @@ impl ProtoArray {
let finalized_index = *self
.indices
.get(&finalized_root)
.ok_or_else(|| Error::FinalizedNodeUnknown(finalized_root))?;
.ok_or(Error::FinalizedNodeUnknown(finalized_root))?;
if finalized_index < self.prune_threshold {
// Pruning at small numbers incurs more cost than benefit.
@ -240,7 +238,7 @@ impl ProtoArray {
let root = &self
.nodes
.get(node_index)
.ok_or_else(|| Error::InvalidNodeIndex(node_index))?
.ok_or(Error::InvalidNodeIndex(node_index))?
.root;
self.indices.remove(root);
}
@ -252,7 +250,7 @@ impl ProtoArray {
for (_root, index) in self.indices.iter_mut() {
*index = index
.checked_sub(finalized_index)
.ok_or_else(|| Error::IndexOverflow("indices"))?;
.ok_or(Error::IndexOverflow("indices"))?;
}
// Iterate through all the existing nodes and adjust their indices to match the new layout
@ -266,14 +264,14 @@ impl ProtoArray {
node.best_child = Some(
best_child
.checked_sub(finalized_index)
.ok_or_else(|| Error::IndexOverflow("best_child"))?,
.ok_or(Error::IndexOverflow("best_child"))?,
);
}
if let Some(best_descendant) = node.best_descendant {
node.best_descendant = Some(
best_descendant
.checked_sub(finalized_index)
.ok_or_else(|| Error::IndexOverflow("best_descendant"))?,
.ok_or(Error::IndexOverflow("best_descendant"))?,
);
}
}
@ -301,12 +299,12 @@ impl ProtoArray {
let child = self
.nodes
.get(child_index)
.ok_or_else(|| Error::InvalidNodeIndex(child_index))?;
.ok_or(Error::InvalidNodeIndex(child_index))?;
let parent = self
.nodes
.get(parent_index)
.ok_or_else(|| Error::InvalidNodeIndex(parent_index))?;
.ok_or(Error::InvalidNodeIndex(parent_index))?;
let child_leads_to_viable_head = self.node_leads_to_viable_head(&child)?;
@ -335,7 +333,7 @@ impl ProtoArray {
let best_child = self
.nodes
.get(best_child_index)
.ok_or_else(|| Error::InvalidBestDescendant(best_child_index))?;
.ok_or(Error::InvalidBestDescendant(best_child_index))?;
let best_child_leads_to_viable_head =
self.node_leads_to_viable_head(&best_child)?;
@ -373,7 +371,7 @@ impl ProtoArray {
let parent = self
.nodes
.get_mut(parent_index)
.ok_or_else(|| Error::InvalidNodeIndex(parent_index))?;
.ok_or(Error::InvalidNodeIndex(parent_index))?;
parent.best_child = new_best_child;
parent.best_descendant = new_best_descendant;
@ -389,7 +387,7 @@ impl ProtoArray {
let best_descendant = self
.nodes
.get(best_descendant_index)
.ok_or_else(|| Error::InvalidBestDescendant(best_descendant_index))?;
.ok_or(Error::InvalidBestDescendant(best_descendant_index))?;
self.node_is_viable_for_head(best_descendant)
} else {

View File

@ -287,14 +287,14 @@ fn compute_deltas(
// If the validator was not included in the _old_ balances (i.e., it did not exist yet)
// then say its balance was zero.
let old_balance = old_balances.get(val_index).copied().unwrap_or_else(|| 0);
let old_balance = old_balances.get(val_index).copied().unwrap_or(0);
// If the validators vote is not known in the _new_ balances, then use a balance of zero.
//
// It is possible that there is a vote for an unknown validator if we change our justified
// state to a new state with a higher epoch that is on a different fork because that fork may have
// on-boarded less validators than the prior fork.
let new_balance = new_balances.get(val_index).copied().unwrap_or_else(|| 0);
let new_balance = new_balances.get(val_index).copied().unwrap_or(0);
if vote.current_root != vote.next_root || old_balance != new_balance {
// We ignore the vote if it is not known in `indices`. We assume that it is outside
@ -302,9 +302,9 @@ fn compute_deltas(
if let Some(current_delta_index) = indices.get(&vote.current_root).copied() {
let delta = deltas
.get(current_delta_index)
.ok_or_else(|| Error::InvalidNodeDelta(current_delta_index))?
.ok_or(Error::InvalidNodeDelta(current_delta_index))?
.checked_sub(old_balance as i64)
.ok_or_else(|| Error::DeltaOverflow(current_delta_index))?;
.ok_or(Error::DeltaOverflow(current_delta_index))?;
// Array access safe due to check on previous line.
deltas[current_delta_index] = delta;
@ -315,9 +315,9 @@ fn compute_deltas(
if let Some(next_delta_index) = indices.get(&vote.next_root).copied() {
let delta = deltas
.get(next_delta_index)
.ok_or_else(|| Error::InvalidNodeDelta(next_delta_index))?
.ok_or(Error::InvalidNodeDelta(next_delta_index))?
.checked_add(new_balance as i64)
.ok_or_else(|| Error::DeltaOverflow(next_delta_index))?;
.ok_or(Error::DeltaOverflow(next_delta_index))?;
// Array access safe due to check on previous line.
deltas[next_delta_index] = delta;

View File

@ -13,8 +13,8 @@ pub fn encode<T: AsRef<[u8]>>(data: T) -> String {
/// Decode `data` from a 0x-prefixed hex string.
pub fn decode(s: &str) -> Result<Vec<u8>, String> {
if s.starts_with("0x") {
hex::decode(&s[2..]).map_err(|e| format!("invalid hex: {:?}", e))
if let Some(stripped) = s.strip_prefix("0x") {
hex::decode(stripped).map_err(|e| format!("invalid hex: {:?}", e))
} else {
Err("hex must have 0x prefix".to_string())
}
@ -33,8 +33,8 @@ impl<'de> Visitor<'de> for PrefixedHexVisitor {
where
E: de::Error,
{
if value.starts_with("0x") {
Ok(hex::decode(&value[2..])
if let Some(stripped) = value.strip_prefix("0x") {
Ok(hex::decode(stripped)
.map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))?)
} else {
Err(de::Error::custom("missing 0x prefix"))

View File

@ -450,7 +450,7 @@ pub fn decode_list_of_variable_length_items<T: Decode>(
bytes.get(start..offset)
};
let slice = slice_option.ok_or_else(|| DecodeError::OutOfBoundsByte { i: offset })?;
let slice = slice_option.ok_or(DecodeError::OutOfBoundsByte { i: offset })?;
values.push(T::from_ssz_bytes(slice)?);
}

View File

@ -170,7 +170,7 @@ impl<N: Unsigned + Clone> Bitfield<Variable<N>> {
let len = initial_bitfield
.highest_set_bit()
.ok_or_else(|| Error::MissingLengthInformation)?;
.ok_or(Error::MissingLengthInformation)?;
// The length bit should be in the last byte, or else it means we have too many bytes.
if len / 8 + 1 != bytes_len {
@ -286,7 +286,7 @@ impl<T: BitfieldBehaviour> Bitfield<T> {
let byte = self
.bytes
.get_mut(i / 8)
.ok_or_else(|| Error::OutOfBounds { i, len })?;
.ok_or(Error::OutOfBounds { i, len })?;
if value {
*byte |= 1 << (i % 8)
@ -308,7 +308,7 @@ impl<T: BitfieldBehaviour> Bitfield<T> {
let byte = self
.bytes
.get(i / 8)
.ok_or_else(|| Error::OutOfBounds { i, len: self.len })?;
.ok_or(Error::OutOfBounds { i, len: self.len })?;
Ok(*byte & 1 << (i % 8) > 0)
} else {

View File

@ -236,7 +236,7 @@ where
let num_items = bytes
.len()
.checked_div(T::ssz_fixed_len())
.ok_or_else(|| ssz::DecodeError::ZeroLengthItem)?;
.ok_or(ssz::DecodeError::ZeroLengthItem)?;
if num_items != fixed_len {
return Err(ssz::DecodeError::BytesInvalid(format!(

View File

@ -236,7 +236,7 @@ where
let num_items = bytes
.len()
.checked_div(T::ssz_fixed_len())
.ok_or_else(|| ssz::DecodeError::ZeroLengthItem)?;
.ok_or(ssz::DecodeError::ZeroLengthItem)?;
if num_items > max_len {
return Err(ssz::DecodeError::BytesInvalid(format!(

View File

@ -206,8 +206,7 @@ where
.attesting_indices
.into_iter()
.map(|&validator_idx| {
Ok(get_pubkey(validator_idx as usize)
.ok_or_else(|| Error::ValidatorUnknown(validator_idx))?)
Ok(get_pubkey(validator_idx as usize).ok_or(Error::ValidatorUnknown(validator_idx))?)
})
.collect::<Result<_>>()?;
@ -241,8 +240,7 @@ where
.attesting_indices
.into_iter()
.map(|&validator_idx| {
Ok(get_pubkey(validator_idx as usize)
.ok_or_else(|| Error::ValidatorUnknown(validator_idx))?)
Ok(get_pubkey(validator_idx as usize).ok_or(Error::ValidatorUnknown(validator_idx))?)
})
.collect::<Result<_>>()?;
@ -355,8 +353,7 @@ where
Ok(SignatureSet::single_pubkey(
signature,
get_pubkey(validator_index as usize)
.ok_or_else(|| Error::ValidatorUnknown(validator_index))?,
get_pubkey(validator_index as usize).ok_or(Error::ValidatorUnknown(validator_index))?,
message,
))
}
@ -391,8 +388,7 @@ where
Ok(SignatureSet::single_pubkey(
signature,
get_pubkey(validator_index as usize)
.ok_or_else(|| Error::ValidatorUnknown(validator_index))?,
get_pubkey(validator_index as usize).ok_or(Error::ValidatorUnknown(validator_index))?,
message,
))
}

View File

@ -884,7 +884,7 @@ impl<T: EthSpec> BeaconState<T> {
self.eth1_data
.deposit_count
.checked_sub(self.eth1_deposit_index)
.ok_or_else(|| Error::InvalidDepositState {
.ok_or(Error::InvalidDepositState {
deposit_count: self.eth1_data.deposit_count,
deposit_index: self.eth1_deposit_index,
})

View File

@ -56,7 +56,7 @@ impl CommitteeCache {
&seed[..],
false,
)
.ok_or_else(|| Error::UnableToShuffle)?;
.ok_or(Error::UnableToShuffle)?;
// The use of `NonZeroUsize` reduces the maximum number of possible validators by one.
if state.validators.len() == usize::max_value() {
@ -148,7 +148,7 @@ impl CommitteeCache {
pub fn get_all_beacon_committees(&self) -> Result<Vec<BeaconCommittee>, Error> {
let initialized_epoch = self
.initialized_epoch
.ok_or_else(|| Error::CommitteeCacheUninitialized(None))?;
.ok_or(Error::CommitteeCacheUninitialized(None))?;
initialized_epoch.slot_iter(self.slots_per_epoch).try_fold(
Vec::with_capacity(self.slots_per_epoch as usize),

View File

@ -419,7 +419,7 @@ impl ParallelValidatorTreeHash {
let validator = validators
.get(val_index)
.ok_or_else(|| Error::TreeHashCacheInconsistent)?;
.ok_or(Error::TreeHashCacheInconsistent)?;
validator
.recalculate_tree_hash_root(arena, cache)

View File

@ -95,8 +95,8 @@ macro_rules! impl_from_str {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.starts_with("0x") {
let bytes = hex::decode(&s[2..]).map_err(|e| e.to_string())?;
if let Some(stripped) = s.strip_prefix("0x") {
let bytes = hex::decode(stripped).map_err(|e| e.to_string())?;
Self::deserialize(&bytes[..]).map_err(|e| format!("{:?}", e))
} else {
Err("must start with 0x".to_string())

View File

@ -202,7 +202,7 @@ impl Wallet {
.json
.nextaccount
.checked_add(1)
.ok_or_else(|| Error::PathExhausted)?;
.ok_or(Error::PathExhausted)?;
Ok(keystores)
}

View File

@ -8,13 +8,13 @@ use types::{BeaconState, EthSpec};
pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
let path = matches
.value_of("ssz-state")
.ok_or_else(|| "ssz-state not specified")?
.ok_or("ssz-state not specified")?
.parse::<PathBuf>()
.map_err(|e| format!("Unable to parse ssz-state: {}", e))?;
let genesis_time = matches
.value_of("genesis-time")
.ok_or_else(|| "genesis-time not specified")?
.ok_or("genesis-time not specified")?
.parse::<u64>()
.map_err(|e| format!("Unable to parse genesis-time: {}", e))?;

View File

@ -24,7 +24,7 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches<'_>) -> Res
let testnet_dir = matches
.value_of("testnet-dir")
.ok_or_else(|| ())
.ok_or(())
.and_then(|dir| dir.parse::<PathBuf>().map_err(|_| ()))
.unwrap_or_else(|_| {
dirs::home_dir()
@ -37,7 +37,7 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches<'_>) -> Res
let spec = eth2_testnet_config
.yaml_config
.as_ref()
.ok_or_else(|| "The testnet directory must contain a spec config".to_string())?
.ok_or("The testnet directory must contain a spec config")?
.apply_to_chain_spec::<T>(&env.core_context().eth2_config.spec)
.ok_or_else(|| {
format!(

View File

@ -11,7 +11,7 @@ use types::{test_utils::generate_deterministic_keypairs, EthSpec};
pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches) -> Result<(), String> {
let validator_count = matches
.value_of("validator-count")
.ok_or_else(|| "validator-count not specified")?
.ok_or("validator-count not specified")?
.parse::<usize>()
.map_err(|e| format!("Unable to parse validator-count: {}", e))?;
@ -28,7 +28,7 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches) -> Result<
let testnet_dir = matches
.value_of("testnet-dir")
.ok_or_else(|| ())
.ok_or(())
.and_then(|dir| dir.parse::<PathBuf>().map_err(|_| ()))
.unwrap_or_else(|_| {
dirs::home_dir()
@ -41,7 +41,7 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches) -> Result<
let mut spec = eth2_testnet_config
.yaml_config
.as_ref()
.ok_or_else(|| "The testnet directory must contain a spec config".to_string())?
.ok_or("The testnet directory must contain a spec config")?
.apply_to_chain_spec::<T>(&env.core_context().eth2_config.spec)
.ok_or_else(|| {
format!(

View File

@ -4,12 +4,10 @@ use ssz::Decode;
use types::{BeaconBlock, BeaconState, EthSpec};
pub fn run_parse_hex<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
let type_str = matches
.value_of("type")
.ok_or_else(|| "No type supplied".to_string())?;
let type_str = matches.value_of("type").ok_or("No type supplied")?;
let mut hex: String = matches
.value_of("hex_ssz")
.ok_or_else(|| "No hex ssz supplied".to_string())?
.ok_or("No hex ssz supplied")?
.to_string();
if hex.starts_with("0x") {

View File

@ -9,7 +9,7 @@ use types::{EthSpec, SignedBeaconBlock};
pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
let type_str = matches
.value_of("type")
.ok_or_else(|| "No type supplied".to_string())?;
.ok_or("No type supplied")?;
let path = parse_path(matches, "path")?;
info!("Type: {:?}", type_str);

View File

@ -10,19 +10,19 @@ use types::{BeaconState, EthSpec};
pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
let pre_state_path = matches
.value_of("pre-state")
.ok_or_else(|| "No pre-state file supplied".to_string())?
.ok_or("No pre-state file supplied")?
.parse::<PathBuf>()
.map_err(|e| format!("Failed to parse pre-state path: {}", e))?;
let slots = matches
.value_of("slots")
.ok_or_else(|| "No slots supplied".to_string())?
.ok_or("No slots supplied")?
.parse::<usize>()
.map_err(|e| format!("Failed to parse slots: {}", e))?;
let output_path = matches
.value_of("output")
.ok_or_else(|| "No output file supplied".to_string())?
.ok_or("No output file supplied")?
.parse::<PathBuf>()
.map_err(|e| format!("Failed to parse output path: {}", e))?;

View File

@ -9,19 +9,19 @@ use types::{BeaconState, EthSpec, SignedBeaconBlock};
pub fn run_transition_blocks<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
let pre_state_path = matches
.value_of("pre-state")
.ok_or_else(|| "No pre-state file supplied".to_string())?
.ok_or("No pre-state file supplied")?
.parse::<PathBuf>()
.map_err(|e| format!("Failed to parse pre-state path: {}", e))?;
let block_path = matches
.value_of("block")
.ok_or_else(|| "No block file supplied".to_string())?
.ok_or("No block file supplied")?
.parse::<PathBuf>()
.map_err(|e| format!("Failed to parse block path: {}", e))?;
let output_path = matches
.value_of("output")
.ok_or_else(|| "No output file supplied".to_string())?
.ok_or("No output file supplied")?
.parse::<PathBuf>()
.map_err(|e| format!("Failed to parse output path: {}", e))?;

View File

@ -161,9 +161,9 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
.as_secs();
let file_stem = path
.file_stem()
.ok_or_else(|| "Invalid file name".to_string())?
.ok_or("Invalid file name")?
.to_str()
.ok_or_else(|| "Failed to create str from filename".to_string())?;
.ok_or("Failed to create str from filename")?;
let file_ext = path.extension().unwrap_or_else(|| OsStr::new(""));
let backup_name = format!("{}_backup_{}", file_stem, timestamp);
let backup_path = path.with_file_name(backup_name).with_extension(file_ext);
@ -229,7 +229,7 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
self.eth2_config.spec = eth2_testnet_config
.yaml_config
.as_ref()
.ok_or_else(|| "The testnet directory must contain a spec config".to_string())?
.ok_or("The testnet directory must contain a spec config")?
.apply_to_chain_spec::<E>(&self.eth2_config.spec)
.ok_or_else(|| {
format!(
@ -262,14 +262,12 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
Ok(Environment {
runtime: self
.runtime
.ok_or_else(|| "Cannot build environment without runtime".to_string())?,
.ok_or("Cannot build environment without runtime")?,
signal_tx,
signal_rx: Some(signal_rx),
signal: Some(signal),
exit,
log: self
.log
.ok_or_else(|| "Cannot build environment without log".to_string())?,
log: self.log.ok_or("Cannot build environment without log")?,
eth_spec_instance: self.eth_spec_instance,
eth2_config: self.eth2_config,
testnet: self.testnet,

View File

@ -200,7 +200,7 @@ fn run<E: EthSpec>(
let debug_level = matches
.value_of("debug-level")
.ok_or_else(|| "Expected --debug-level flag".to_string())?;
.ok_or("Expected --debug-level flag")?;
let log_format = matches.value_of("log-format");

View File

@ -47,7 +47,7 @@ impl Chunk {
self.data
.get(cell_index)
.map(|distance| epoch + u64::from(*distance))
.ok_or_else(|| Error::ChunkIndexOutOfBounds(cell_index))
.ok_or(Error::ChunkIndexOutOfBounds(cell_index))
}
pub fn set_target(
@ -75,7 +75,7 @@ impl Chunk {
let cell = self
.data
.get_mut(cell_index)
.ok_or_else(|| Error::ChunkIndexOutOfBounds(cell_index))?;
.ok_or(Error::ChunkIndexOutOfBounds(cell_index))?;
*cell = target_distance;
Ok(())
}

View File

@ -342,7 +342,7 @@ impl<E: EthSpec> SlasherDB<E> {
let bytes = txn
.get(self.indexed_attestation_db, &key)
.optional()?
.ok_or_else(|| Error::MissingIndexedAttestation {
.ok_or(Error::MissingIndexedAttestation {
root: indexed_attestation_hash,
})?;
Ok(IndexedAttestation::from_ssz_bytes(bytes)?)
@ -400,7 +400,7 @@ impl<E: EthSpec> SlasherDB<E> {
) -> Result<IndexedAttestation<E>, Error> {
let record = self
.get_attester_record(txn, validator_index, target_epoch)?
.ok_or_else(|| Error::MissingAttesterRecord {
.ok_or(Error::MissingAttesterRecord {
validator_index,
target_epoch,
})?;
@ -512,7 +512,7 @@ impl<E: EthSpec> SlasherDB<E> {
let key_bytes = cursor
.get(None, None, lmdb_sys::MDB_GET_CURRENT)?
.0
.ok_or_else(|| Error::MissingProposerKey)?;
.ok_or(Error::MissingProposerKey)?;
let (slot, _) = ProposerKey::parse(key_bytes)?;
if slot < min_slot {
@ -558,7 +558,7 @@ impl<E: EthSpec> SlasherDB<E> {
let key_bytes = cursor
.get(None, None, lmdb_sys::MDB_GET_CURRENT)?
.0
.ok_or_else(|| Error::MissingAttesterKey)?;
.ok_or(Error::MissingAttesterKey)?;
let (target_epoch, _) = AttesterKey::parse(key_bytes)?;
@ -605,7 +605,7 @@ impl<E: EthSpec> SlasherDB<E> {
let key_bytes = cursor
.get(None, None, lmdb_sys::MDB_GET_CURRENT)?
.0
.ok_or_else(|| Error::MissingAttesterKey)?;
.ok_or(Error::MissingAttesterKey)?;
let (target_epoch, _) = IndexedAttestationKey::parse(key_bytes)?;

View File

@ -35,7 +35,7 @@ impl GanacheInstance {
) -> Result<Self, String> {
let stdout = child
.stdout
.ok_or_else(|| "Unable to get stdout for ganache child process")?;
.ok_or("Unable to get stdout for ganache child process")?;
let start = Instant::now();
let mut reader = BufReader::new(stdout);

View File

@ -64,7 +64,7 @@ impl<E: EthSpec> LocalBeaconNode<E> {
let listen_addr = self
.client
.http_api_listen_addr()
.ok_or_else(|| "A remote beacon node must have a http server".to_string())?;
.ok_or("A remote beacon node must have a http server")?;
let beacon_node_url: Url = format!("http://{}:{}", listen_addr.ip(), listen_addr.port())
.parse()

View File

@ -69,19 +69,19 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationServiceBuilder<T, E> {
inner: Arc::new(Inner {
duties_service: self
.duties_service
.ok_or_else(|| "Cannot build AttestationService without duties_service")?,
.ok_or("Cannot build AttestationService without duties_service")?,
validator_store: self
.validator_store
.ok_or_else(|| "Cannot build AttestationService without validator_store")?,
.ok_or("Cannot build AttestationService without validator_store")?,
slot_clock: self
.slot_clock
.ok_or_else(|| "Cannot build AttestationService without slot_clock")?,
.ok_or("Cannot build AttestationService without slot_clock")?,
beacon_node: self
.beacon_node
.ok_or_else(|| "Cannot build AttestationService without beacon_node")?,
.ok_or("Cannot build AttestationService without beacon_node")?,
context: self
.context
.ok_or_else(|| "Cannot build AttestationService without runtime_context")?,
.ok_or("Cannot build AttestationService without runtime_context")?,
}),
})
}
@ -130,7 +130,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
let duration_to_next_slot = self
.slot_clock
.duration_to_next_slot()
.ok_or_else(|| "Unable to determine duration to next slot".to_string())?;
.ok_or("Unable to determine duration to next slot")?;
info!(
log,
@ -174,14 +174,11 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
/// For each each required attestation, spawn a new task that downloads, signs and uploads the
/// attestation to the beacon node.
fn spawn_attestation_tasks(&self, slot_duration: Duration) -> Result<(), String> {
let slot = self
.slot_clock
.now()
.ok_or_else(|| "Failed to read slot clock".to_string())?;
let slot = self.slot_clock.now().ok_or("Failed to read slot clock")?;
let duration_to_next_slot = self
.slot_clock
.duration_to_next_slot()
.ok_or_else(|| "Unable to determine duration to next slot".to_string())?;
.ok_or("Unable to determine duration to next slot")?;
// If a validator needs to publish an aggregate attestation, they must do so at 2/3
// through the slot. This delay triggers at this time
@ -336,7 +333,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
let current_epoch = self
.slot_clock
.now()
.ok_or_else(|| "Unable to determine current slot from clock".to_string())?
.ok_or("Unable to determine current slot from clock")?
.epoch(E::slots_per_epoch());
let attestation_data = self

View File

@ -59,16 +59,16 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> {
inner: Arc::new(Inner {
validator_store: self
.validator_store
.ok_or_else(|| "Cannot build BlockService without validator_store")?,
.ok_or("Cannot build BlockService without validator_store")?,
slot_clock: self
.slot_clock
.ok_or_else(|| "Cannot build BlockService without slot_clock")?,
.ok_or("Cannot build BlockService without slot_clock")?,
beacon_node: self
.beacon_node
.ok_or_else(|| "Cannot build BlockService without beacon_node")?,
.ok_or("Cannot build BlockService without beacon_node")?,
context: self
.context
.ok_or_else(|| "Cannot build BlockService without runtime_context")?,
.ok_or("Cannot build BlockService without runtime_context")?,
graffiti: self.graffiti,
}),
})
@ -217,12 +217,12 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
let current_slot = self
.slot_clock
.now()
.ok_or_else(|| "Unable to determine current slot from clock".to_string())?;
.ok_or("Unable to determine current slot from clock")?;
let randao_reveal = self
.validator_store
.randao_reveal(&validator_pubkey, slot.epoch(E::slots_per_epoch()))
.ok_or_else(|| "Unable to produce randao reveal".to_string())?;
.ok_or("Unable to produce randao reveal")?;
let block = self
.beacon_node
@ -234,7 +234,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
let signed_block = self
.validator_store
.sign_block(&validator_pubkey, block, current_slot)
.ok_or_else(|| "Unable to sign block".to_string())?;
.ok_or("Unable to sign block")?;
self.beacon_node
.post_beacon_blocks(&signed_block)

View File

@ -57,7 +57,7 @@ impl DutyAndProof {
let selection_proof = validator_store
.produce_selection_proof(&self.duty.validator_pubkey, slot)
.ok_or_else(|| "Failed to produce selection proof".to_string())?;
.ok_or("Failed to produce selection proof")?;
self.selection_proof = selection_proof
.is_aggregator(committee_length, spec)
@ -375,16 +375,16 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesServiceBuilder<T, E> {
store: Arc::new(DutiesStore::default()),
validator_store: self
.validator_store
.ok_or_else(|| "Cannot build DutiesService without validator_store")?,
.ok_or("Cannot build DutiesService without validator_store")?,
slot_clock: self
.slot_clock
.ok_or_else(|| "Cannot build DutiesService without slot_clock")?,
.ok_or("Cannot build DutiesService without slot_clock")?,
beacon_node: self
.beacon_node
.ok_or_else(|| "Cannot build DutiesService without beacon_node")?,
.ok_or("Cannot build DutiesService without beacon_node")?,
context: self
.context
.ok_or_else(|| "Cannot build DutiesService without runtime_context")?,
.ok_or("Cannot build DutiesService without runtime_context")?,
allow_unsynced_beacon_node: self.allow_unsynced_beacon_node,
}),
})
@ -466,7 +466,7 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
let duration_to_next_slot = self
.slot_clock
.duration_to_next_slot()
.ok_or_else(|| "Unable to determine duration to next slot".to_string())?;
.ok_or("Unable to determine duration to next slot")?;
let mut interval = {
let slot_duration = Duration::from_millis(spec.milliseconds_per_slot);

View File

@ -54,13 +54,13 @@ impl<T: SlotClock + 'static> ForkServiceBuilder<T> {
fork: RwLock::new(self.fork),
slot_clock: self
.slot_clock
.ok_or_else(|| "Cannot build ForkService without slot_clock")?,
.ok_or("Cannot build ForkService without slot_clock")?,
beacon_node: self
.beacon_node
.ok_or_else(|| "Cannot build ForkService without beacon_node")?,
.ok_or("Cannot build ForkService without beacon_node")?,
log: self
.log
.ok_or_else(|| "Cannot build ForkService without logger")?
.ok_or("Cannot build ForkService without logger")?
.clone(),
}),
})
@ -131,7 +131,7 @@ impl<T: SlotClock + 'static> ForkService<T> {
let duration_to_next_epoch = self
.slot_clock
.duration_to_next_epoch(E::slots_per_epoch())
.ok_or_else(|| "Unable to determine duration to next epoch".to_string())?;
.ok_or("Unable to determine duration to next epoch")?;
let mut interval = {
let slot_duration = Duration::from_millis(spec.milliseconds_per_slot);

View File

@ -98,8 +98,8 @@ impl ApiSecret {
.and_then(|bytes| {
let hex =
String::from_utf8(bytes).map_err(|_| format!("{} is not utf8", SK_FILENAME))?;
if hex.starts_with(PK_PREFIX) {
serde_utils::hex::decode(&hex[PK_PREFIX.len()..])
if let Some(stripped) = hex.strip_prefix(PK_PREFIX) {
serde_utils::hex::decode(stripped)
.map_err(|_| format!("{} should be 0x-prefixed hex", SK_FILENAME))
} else {
Err(format!("unable to parse {}", SK_FILENAME))

View File

@ -490,8 +490,8 @@ impl InitializedValidators {
// Create a lock file for the cache
let key_cache_path = KeyCache::cache_file_path(&self.validators_dir);
let cache_lockfile_path = get_lockfile_path(&key_cache_path)
.ok_or_else(|| Error::BadKeyCachePath(key_cache_path))?;
let cache_lockfile_path =
get_lockfile_path(&key_cache_path).ok_or(Error::BadKeyCachePath(key_cache_path))?;
let _cache_lockfile = Lockfile::new(cache_lockfile_path)?;
let cache =

View File

@ -16,7 +16,7 @@ pub fn spawn_notifier<T: EthSpec>(client: &ProductionValidatorClient<T>) -> Resu
let duration_to_next_slot = duties_service
.slot_clock
.duration_to_next_slot()
.ok_or_else(|| "slot_notifier unable to determine time to next slot")?;
.ok_or("slot_notifier unable to determine time to next slot")?;
// Run the notifier half way through each slot.
let start_instant = Instant::now() + duration_to_next_slot + (slot_duration / 2);