From d8cda2d86eb3f69185a16d0474b987fbf0b8eb6b Mon Sep 17 00:00:00 2001 From: blacktemplar Date: Thu, 3 Dec 2020 01:10:26 +0000 Subject: [PATCH] Fix new clippy lints (#2036) ## Issue Addressed NA ## Proposed Changes Fixes new clippy lints in the whole project (mainly [manual_strip](https://rust-lang.github.io/rust-clippy/master/index.html#manual_strip) and [unnecessary_lazy_evaluations](https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_lazy_evaluations)). Furthermore, removes `to_string()` calls on literals when used with the `?`-operator. --- account_manager/src/validator/create.rs | 2 +- account_manager/src/validator/exit.rs | 4 +- .../src/validator/slashing_protection.rs | 2 +- .../src/attestation_verification.rs | 14 ++-- beacon_node/beacon_chain/src/beacon_chain.rs | 52 +++++++------- .../src/beacon_fork_choice_store.rs | 4 +- .../beacon_chain/src/block_verification.rs | 8 +-- beacon_node/beacon_chain/src/builder.rs | 65 +++++++----------- .../src/naive_aggregation_pool.rs | 2 +- .../beacon_chain/src/observed_attestations.rs | 4 +- beacon_node/client/src/builder.rs | 68 +++++++++---------- beacon_node/client/src/config.rs | 6 +- beacon_node/client/src/notifier.rs | 4 +- beacon_node/eth1/src/deposit_log.rs | 10 +-- beacon_node/eth1/src/http.rs | 50 +++++++------- beacon_node/eth1/src/service.rs | 8 +-- beacon_node/eth2_libp2p/src/discovery/enr.rs | 10 ++- .../eth2_libp2p/src/peer_manager/peer_info.rs | 5 +- .../eth2_libp2p/src/peer_manager/score.rs | 3 +- beacon_node/eth2_libp2p/src/service.rs | 4 +- .../http_api/src/beacon_proposer_cache.rs | 8 +-- .../network/src/attestation_service/mod.rs | 8 +-- beacon_node/src/config.rs | 16 ++--- beacon_node/store/src/hot_cold_store.rs | 20 +++--- beacon_node/timer/src/lib.rs | 2 +- boot_node/src/config.rs | 4 +- .../src/validator_definitions.rs | 8 +-- common/clap_utils/src/lib.rs | 4 +- common/deposit_contract/build.rs | 4 +- common/deposit_contract/src/lib.rs | 3 +- common/eth2/src/lighthouse_vc/http_client.rs | 4 +- common/eth2_interop_keypairs/src/lib.rs | 4 +- common/eth2_testnet_config/src/lib.rs | 6 +- common/validator_dir/src/builder.rs | 2 +- common/validator_dir/src/validator_dir.rs | 4 +- consensus/cached_tree_hash/src/cache.rs | 2 +- consensus/cached_tree_hash/src/cache_arena.rs | 23 +++---- consensus/fork_choice/src/fork_choice.rs | 4 +- consensus/proto_array/src/proto_array.rs | 38 +++++------ .../src/proto_array_fork_choice.rs | 12 ++-- consensus/serde_utils/src/hex.rs | 8 +-- consensus/ssz/src/decode/impls.rs | 2 +- consensus/ssz_types/src/bitfield.rs | 6 +- consensus/ssz_types/src/fixed_vector.rs | 2 +- consensus/ssz_types/src/variable_list.rs | 2 +- .../per_block_processing/signature_sets.rs | 12 ++-- consensus/types/src/beacon_state.rs | 2 +- .../types/src/beacon_state/committee_cache.rs | 4 +- .../types/src/beacon_state/tree_hash_cache.rs | 2 +- crypto/bls/src/macros.rs | 4 +- crypto/eth2_wallet/src/wallet.rs | 2 +- lcli/src/change_genesis_time.rs | 4 +- lcli/src/eth1_genesis.rs | 4 +- lcli/src/interop_genesis.rs | 6 +- lcli/src/parse_hex.rs | 6 +- lcli/src/parse_ssz.rs | 2 +- lcli/src/skip_slots.rs | 6 +- lcli/src/transition_blocks.rs | 6 +- lighthouse/environment/src/lib.rs | 12 ++-- lighthouse/src/main.rs | 2 +- slasher/src/array.rs | 4 +- slasher/src/database.rs | 10 +-- testing/eth1_test_rig/src/ganache.rs | 2 +- testing/node_test_rig/src/lib.rs | 2 +- validator_client/src/attestation_service.rs | 21 +++--- validator_client/src/block_service.rs | 14 ++-- validator_client/src/duties_service.rs | 12 ++-- validator_client/src/fork_service.rs | 8 +-- validator_client/src/http_api/api_secret.rs | 4 +- .../src/initialized_validators.rs | 4 +- validator_client/src/notifier.rs | 2 +- 71 files changed, 314 insertions(+), 364 deletions(-) diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index 024865123..85e332458 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -133,7 +133,7 @@ pub fn cli_run( }; let deposit_gwei = clap_utils::parse_optional(matches, DEPOSIT_GWEI_FLAG)? - .unwrap_or_else(|| spec.max_effective_balance); + .unwrap_or(spec.max_effective_balance); let count: Option = clap_utils::parse_optional(matches, COUNT_FLAG)?; let at_most: Option = clap_utils::parse_optional(matches, AT_MOST_FLAG)?; diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 65c62786d..3cf8900af 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -125,7 +125,7 @@ async fn publish_voluntary_exit( let keypair = load_voting_keypair(keystore_path, password_file_path, stdin_inputs)?; let epoch = get_current_epoch::(genesis_data.genesis_time, spec) - .ok_or_else(|| "Failed to get current epoch. Please check your system time".to_string())?; + .ok_or("Failed to get current epoch. Please check your system time")?; let validator_index = get_validator_index_for_exit(client, &keypair.pk, epoch, spec).await?; let fork = get_beacon_state_fork(client).await?; @@ -248,7 +248,7 @@ async fn get_beacon_state_fork(client: &BeaconNodeHttpClient) -> Result( let testnet_config = env .testnet - .ok_or_else(|| "Unable to get testnet configuration from the environment".to_string())?; + .ok_or("Unable to get testnet configuration from the environment")?; let genesis_validators_root = testnet_config .beacon_state::() diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index d6fddf188..bdcee1846 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -648,7 +648,7 @@ impl VerifiedUnaggregatedAttestation { let validator_index = *indexed_attestation .attesting_indices .first() - .ok_or_else(|| Error::NotExactlyOneAggregationBitSet(0))?; + .ok_or(Error::NotExactlyOneAggregationBitSet(0))?; /* * The attestation is the first valid attestation received for the participating validator @@ -838,7 +838,7 @@ pub fn verify_propagation_slot_range( let latest_permissible_slot = chain .slot_clock .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) - .ok_or_else(|| BeaconChainError::UnableToReadSlot)?; + .ok_or(BeaconChainError::UnableToReadSlot)?; if attestation_slot > latest_permissible_slot { return Err(Error::FutureSlot { attestation_slot, @@ -850,7 +850,7 @@ pub fn verify_propagation_slot_range( let earliest_permissible_slot = chain .slot_clock .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) - .ok_or_else(|| BeaconChainError::UnableToReadSlot)? + .ok_or(BeaconChainError::UnableToReadSlot)? - T::EthSpec::slots_per_epoch(); if attestation_slot < earliest_permissible_slot { return Err(Error::PastSlot { @@ -873,12 +873,12 @@ pub fn verify_attestation_signature( let pubkey_cache = chain .validator_pubkey_cache .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; + .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; let fork = chain .canonical_head .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or_else(|| BeaconChainError::CanonicalHeadLockTimeout) + .ok_or(BeaconChainError::CanonicalHeadLockTimeout) .map(|head| head.beacon_state.fork)?; let signature_set = indexed_attestation_signature_set_from_pubkeys( @@ -974,7 +974,7 @@ pub fn verify_signed_aggregate_signatures( let pubkey_cache = chain .validator_pubkey_cache .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; + .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; let aggregator_index = signed_aggregate.message.aggregator_index; if aggregator_index >= pubkey_cache.len() as u64 { @@ -984,7 +984,7 @@ pub fn verify_signed_aggregate_signatures( let fork = chain .canonical_head .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or_else(|| BeaconChainError::CanonicalHeadLockTimeout) + .ok_or(BeaconChainError::CanonicalHeadLockTimeout) .map(|head| head.beacon_state.fork)?; let signature_sets = vec![ diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b1eec7c10..aa744ad35 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -318,7 +318,7 @@ impl BeaconChain { /// The slot might be unavailable due to an error with the system clock, or if the present time /// is before genesis (i.e., a negative slot). pub fn slot(&self) -> Result { - self.slot_clock.now().ok_or_else(|| Error::UnableToReadSlot) + self.slot_clock.now().ok_or(Error::UnableToReadSlot) } /// Returns the epoch _right now_ according to `self.slot_clock`. Returns `Err` if the epoch is @@ -386,7 +386,7 @@ impl BeaconChain { ) -> Result>, Error> { let block = self .get_block(&block_root)? - .ok_or_else(|| Error::MissingBeaconBlock(block_root))?; + .ok_or(Error::MissingBeaconBlock(block_root))?; let state = self .get_state(&block.state_root(), Some(block.slot()))? .ok_or_else(|| Error::MissingBeaconState(block.state_root()))?; @@ -531,7 +531,7 @@ impl BeaconChain { let head_lock = self .canonical_head .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or_else(|| Error::CanonicalHeadLockTimeout)?; + .ok_or(Error::CanonicalHeadLockTimeout)?; f(&head_lock) } @@ -660,11 +660,11 @@ impl BeaconChain { .find(|(_, current_slot)| *current_slot == slot) .map(|(root, _slot)| root) })? - .ok_or_else(|| Error::NoStateForSlot(slot))?; + .ok_or(Error::NoStateForSlot(slot))?; Ok(self .get_state(&state_root, Some(slot))? - .ok_or_else(|| Error::NoStateForSlot(slot))?) + .ok_or(Error::NoStateForSlot(slot))?) } } } @@ -686,7 +686,7 @@ impl BeaconChain { self.canonical_head .try_read_for(HEAD_LOCK_TIMEOUT) .map(|head| head.beacon_block.slot()) - .ok_or_else(|| Error::CanonicalHeadLockTimeout) + .ok_or(Error::CanonicalHeadLockTimeout) } /// Returns the validator index (if any) for the given public key. @@ -705,7 +705,7 @@ impl BeaconChain { let pubkey_cache = self .validator_pubkey_cache .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)?; + .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; Ok(pubkey_cache.get_index(pubkey)) } @@ -726,7 +726,7 @@ impl BeaconChain { let pubkey_cache = self .validator_pubkey_cache .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)?; + .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; Ok(pubkey_cache.get(validator_index).cloned()) } @@ -848,7 +848,7 @@ impl BeaconChain { let head = self .canonical_head .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or_else(|| Error::CanonicalHeadLockTimeout)?; + .ok_or(Error::CanonicalHeadLockTimeout)?; if slot >= head.beacon_block.slot() { self.produce_unaggregated_attestation_for_block( @@ -879,7 +879,7 @@ impl BeaconChain { let mut state = self .get_state(&state_root, Some(slot))? - .ok_or_else(|| Error::MissingBeaconState(state_root))?; + .ok_or(Error::MissingBeaconState(state_root))?; state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; @@ -1068,7 +1068,7 @@ impl BeaconChain { let fork = self .canonical_head .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or_else(|| Error::CanonicalHeadLockTimeout)? + .ok_or(Error::CanonicalHeadLockTimeout)? .beacon_state .fork; @@ -1607,7 +1607,7 @@ impl BeaconChain { // known to fork choice. This ordering ensure that the pubkey cache is always up-to-date. self.validator_pubkey_cache .try_write_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)? + .ok_or(Error::ValidatorPubkeyCacheLockTimeout)? .import_new_pubkeys(&state)?; // For the current and next epoch of this state, ensure we have the shuffling from this @@ -1618,7 +1618,7 @@ impl BeaconChain { let shuffling_is_cached = self .shuffling_cache .try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| Error::AttestationCacheLockTimeout)? + .ok_or(Error::AttestationCacheLockTimeout)? .contains(&shuffling_id); if !shuffling_is_cached { @@ -1626,7 +1626,7 @@ impl BeaconChain { let committee_cache = state.committee_cache(*relative_epoch)?; self.shuffling_cache .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| Error::AttestationCacheLockTimeout)? + .ok_or(Error::AttestationCacheLockTimeout)? .insert(shuffling_id, committee_cache); } } @@ -1790,7 +1790,7 @@ impl BeaconChain { let eth1_chain = self .eth1_chain .as_ref() - .ok_or_else(|| BlockProductionError::NoEth1ChainConnection)?; + .ok_or(BlockProductionError::NoEth1ChainConnection)?; // If required, transition the new state to the present slot. // @@ -1947,12 +1947,12 @@ impl BeaconChain { .unwrap_or_else(|| { let beacon_block = self .get_block(&beacon_block_root)? - .ok_or_else(|| Error::MissingBeaconBlock(beacon_block_root))?; + .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; let beacon_state_root = beacon_block.state_root(); let beacon_state: BeaconState = self .get_state(&beacon_state_root, Some(beacon_block.slot()))? - .ok_or_else(|| Error::MissingBeaconState(beacon_state_root))?; + .ok_or(Error::MissingBeaconState(beacon_state_root))?; Ok(BeaconSnapshot { beacon_block, @@ -2038,7 +2038,7 @@ impl BeaconChain { *self .canonical_head .try_write_for(HEAD_LOCK_TIMEOUT) - .ok_or_else(|| Error::CanonicalHeadLockTimeout)? = new_head; + .ok_or(Error::CanonicalHeadLockTimeout)? = new_head; metrics::stop_timer(update_head_timer); @@ -2065,7 +2065,7 @@ impl BeaconChain { let head = self .canonical_head .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or_else(|| Error::CanonicalHeadLockTimeout)?; + .ok_or(Error::CanonicalHeadLockTimeout)?; // State root of the finalized state on the epoch boundary, NOT the state // of the finalized block. We need to use an iterator in case the state is beyond @@ -2087,7 +2087,7 @@ impl BeaconChain { }) }, )? - .ok_or_else(|| Error::MissingFinalizedStateRoot(new_finalized_slot))?; + .ok_or(Error::MissingFinalizedStateRoot(new_finalized_slot))?; self.after_finalization(&head.beacon_state, new_finalized_state_root)?; } @@ -2250,7 +2250,7 @@ impl BeaconChain { .fork_choice .read() .get_block(&head_block_root) - .ok_or_else(|| Error::MissingBeaconBlock(head_block_root))?; + .ok_or(Error::MissingBeaconBlock(head_block_root))?; let shuffling_id = BlockShufflingIds { current: head_block.current_epoch_shuffling_id.clone(), @@ -2270,7 +2270,7 @@ impl BeaconChain { let mut shuffling_cache = self .shuffling_cache .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| Error::AttestationCacheLockTimeout)?; + .ok_or(Error::AttestationCacheLockTimeout)?; metrics::stop_timer(cache_wait_timer); @@ -2297,7 +2297,7 @@ impl BeaconChain { &head_block.state_root, Some(head_block.slot), )? - .ok_or_else(|| Error::MissingBeaconState(head_block.state_root))?; + .ok_or(Error::MissingBeaconState(head_block.state_root))?; metrics::stop_timer(state_read_timer); let state_skip_timer = @@ -2326,7 +2326,7 @@ impl BeaconChain { self.shuffling_cache .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| Error::AttestationCacheLockTimeout)? + .ok_or(Error::AttestationCacheLockTimeout)? .insert(shuffling_id, committee_cache); metrics::stop_timer(committee_building_timer); @@ -2396,7 +2396,7 @@ impl BeaconChain { pub fn enr_fork_id(&self) -> EnrForkId { // If we are unable to read the slot clock we assume that it is prior to genesis and // therefore use the genesis slot. - let slot = self.slot().unwrap_or_else(|_| self.spec.genesis_slot); + let slot = self.slot().unwrap_or(self.spec.genesis_slot); self.spec.enr_fork_id(slot, self.genesis_validators_root) } @@ -2412,7 +2412,7 @@ impl BeaconChain { let canonical_head_hash = self .canonical_head .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or_else(|| Error::CanonicalHeadLockTimeout) + .ok_or(Error::CanonicalHeadLockTimeout) .unwrap() .beacon_block_root; let mut visited: HashSet = HashSet::new(); diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 0ddedda06..104cb5ed0 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -320,14 +320,14 @@ where .store .get_item::>(&self.justified_checkpoint.root) .map_err(Error::FailedToReadBlock)? - .ok_or_else(|| Error::MissingBlock(self.justified_checkpoint.root))? + .ok_or(Error::MissingBlock(self.justified_checkpoint.root))? .message; self.justified_balances = self .store .get_state(&justified_block.state_root, Some(justified_block.slot)) .map_err(Error::FailedToReadState)? - .ok_or_else(|| Error::MissingState(justified_block.state_root))? + .ok_or(Error::MissingState(justified_block.state_root))? .balances .into(); } diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index d3a5af218..71d34b626 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -452,7 +452,7 @@ impl GossipVerifiedBlock { let present_slot_with_tolerance = chain .slot_clock .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) - .ok_or_else(|| BeaconChainError::UnableToReadSlot)?; + .ok_or(BeaconChainError::UnableToReadSlot)?; if block.slot() > present_slot_with_tolerance { return Err(BlockError::FutureSlot { present_slot: present_slot_with_tolerance, @@ -513,7 +513,7 @@ impl GossipVerifiedBlock { let pubkey_cache = get_validator_pubkey_cache(chain)?; let pubkey = pubkey_cache .get(block.message.proposer_index as usize) - .ok_or_else(|| BlockError::UnknownValidator(block.message.proposer_index))?; + .ok_or(BlockError::UnknownValidator(block.message.proposer_index))?; block.verify_signature( Some(block_root), pubkey, @@ -1180,7 +1180,7 @@ fn get_validator_pubkey_cache( chain .validator_pubkey_cache .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| BeaconChainError::ValidatorPubkeyCacheLockTimeout) + .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout) .map_err(BlockError::BeaconChainError) } @@ -1220,7 +1220,7 @@ fn verify_header_signature( let proposer_pubkey = get_validator_pubkey_cache(chain)? .get(header.message.proposer_index as usize) .cloned() - .ok_or_else(|| BlockError::UnknownValidator(header.message.proposer_index))?; + .ok_or(BlockError::UnknownValidator(header.message.proposer_index))?; let (fork, genesis_validators_root) = chain .with_head(|head| { Ok(( diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 10e5cd0fc..0311755df 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -211,7 +211,7 @@ where let store = self .store .clone() - .ok_or_else(|| "get_persisted_eth1_backend requires a store.".to_string())?; + .ok_or("get_persisted_eth1_backend requires a store.")?; store .get_item::(Ð1_CACHE_DB_KEY) @@ -223,7 +223,7 @@ where let store = self .store .clone() - .ok_or_else(|| "store_contains_beacon_chain requires a store.".to_string())?; + .ok_or("store_contains_beacon_chain requires a store.")?; Ok(store .get_item::(&BEACON_CHAIN_DB_KEY) @@ -235,15 +235,12 @@ where /// /// May initialize several components; including the op_pool and finalized checkpoints. pub fn resume_from_db(mut self) -> Result { - let log = self - .log - .as_ref() - .ok_or_else(|| "resume_from_db requires a log".to_string())?; + let log = self.log.as_ref().ok_or("resume_from_db requires a log")?; let pubkey_cache_path = self .pubkey_cache_path .as_ref() - .ok_or_else(|| "resume_from_db requires a data_dir".to_string())?; + .ok_or("resume_from_db requires a data_dir")?; info!( log, @@ -254,7 +251,7 @@ where let store = self .store .clone() - .ok_or_else(|| "resume_from_db requires a store.".to_string())?; + .ok_or("resume_from_db requires a store.")?; let chain = store .get_item::(&BEACON_CHAIN_DB_KEY) @@ -267,7 +264,7 @@ where let persisted_fork_choice = store .get_item::(&FORK_CHOICE_DB_KEY) .map_err(|e| format!("DB error when reading persisted fork choice: {:?}", e))? - .ok_or_else(|| "No persisted fork choice present in database.".to_string())?; + .ok_or("No persisted fork choice present in database.")?; let fc_store = BeaconForkChoiceStore::from_persisted( persisted_fork_choice.fork_choice_store, @@ -282,11 +279,11 @@ where let genesis_block = store .get_item::>(&chain.genesis_block_root) .map_err(|e| format!("DB error when reading genesis block: {:?}", e))? - .ok_or_else(|| "Genesis block not found in store".to_string())?; + .ok_or("Genesis block not found in store")?; let genesis_state = store .get_state(&genesis_block.state_root(), Some(genesis_block.slot())) .map_err(|e| format!("DB error when reading genesis state: {:?}", e))? - .ok_or_else(|| "Genesis block not found in store".to_string())?; + .ok_or("Genesis block not found in store")?; self.genesis_time = Some(genesis_state.genesis_time); @@ -318,10 +315,7 @@ where mut self, mut beacon_state: BeaconState, ) -> Result { - let store = self - .store - .clone() - .ok_or_else(|| "genesis_state requires a store")?; + let store = self.store.clone().ok_or("genesis_state requires a store")?; let beacon_block = genesis_block(&mut beacon_state, &self.spec)?; @@ -436,35 +430,28 @@ where >, String, > { - let log = self - .log - .ok_or_else(|| "Cannot build without a logger".to_string())?; + let log = self.log.ok_or("Cannot build without a logger")?; let slot_clock = self .slot_clock - .ok_or_else(|| "Cannot build without a slot_clock.".to_string())?; - let store = self - .store - .clone() - .ok_or_else(|| "Cannot build without a store.".to_string())?; + .ok_or("Cannot build without a slot_clock.")?; + let store = self.store.clone().ok_or("Cannot build without a store.")?; let mut fork_choice = self .fork_choice - .ok_or_else(|| "Cannot build without fork choice.".to_string())?; + .ok_or("Cannot build without fork choice.")?; let genesis_block_root = self .genesis_block_root - .ok_or_else(|| "Cannot build without a genesis block root".to_string())?; + .ok_or("Cannot build without a genesis block root")?; let genesis_state_root = self .genesis_state_root - .ok_or_else(|| "Cannot build without a genesis state root".to_string())?; + .ok_or("Cannot build without a genesis state root")?; let current_slot = if slot_clock .is_prior_to_genesis() - .ok_or_else(|| "Unable to read slot clock".to_string())? + .ok_or("Unable to read slot clock")? { self.spec.genesis_slot } else { - slot_clock - .now() - .ok_or_else(|| "Unable to read slot".to_string())? + slot_clock.now().ok_or("Unable to read slot")? }; let head_block_root = fork_choice @@ -474,12 +461,12 @@ where let head_block = store .get_item::>(&head_block_root) .map_err(|e| format!("DB error when reading head block: {:?}", e))? - .ok_or_else(|| "Head block not found in store".to_string())?; + .ok_or("Head block not found in store")?; let head_state_root = head_block.state_root(); let head_state = store .get_state(&head_state_root, Some(head_block.slot())) .map_err(|e| format!("DB error when reading head state: {:?}", e))? - .ok_or_else(|| "Head state not found in store".to_string())?; + .ok_or("Head state not found in store")?; let mut canonical_head = BeaconSnapshot { beacon_block_root: head_block_root, @@ -520,7 +507,7 @@ where let pubkey_cache_path = self .pubkey_cache_path - .ok_or_else(|| "Cannot build without a pubkey cache path".to_string())?; + .ok_or("Cannot build without a pubkey cache path")?; let validator_pubkey_cache = self.validator_pubkey_cache.map(Ok).unwrap_or_else(|| { ValidatorPubkeyCache::new(&canonical_head.beacon_state, pubkey_cache_path) @@ -541,9 +528,7 @@ where store, store_migrator, slot_clock, - op_pool: self - .op_pool - .ok_or_else(|| "Cannot build without op pool".to_string())?, + op_pool: self.op_pool.ok_or("Cannot build without op pool")?, // TODO: allow for persisting and loading the pool from disk. naive_aggregation_pool: <_>::default(), // TODO: allow for persisting and loading the pool from disk. @@ -566,7 +551,7 @@ where fork_choice: RwLock::new(fork_choice), event_handler: self .event_handler - .ok_or_else(|| "Cannot build without an event handler".to_string())?, + .ok_or("Cannot build without an event handler")?, head_tracker: Arc::new(self.head_tracker.unwrap_or_default()), snapshot_cache: TimeoutRwLock::new(SnapshotCache::new( DEFAULT_SNAPSHOT_CACHE_SIZE, @@ -577,7 +562,7 @@ where disabled_forks: self.disabled_forks, shutdown_sender: self .shutdown_sender - .ok_or_else(|| "Cannot build without a shutdown sender.".to_string())?, + .ok_or("Cannot build without a shutdown sender.")?, log: log.clone(), graffiti: self.graffiti, slasher: self.slasher.clone(), @@ -648,7 +633,7 @@ where let log = self .log .as_ref() - .ok_or_else(|| "dummy_eth1_backend requires a log".to_string())?; + .ok_or("dummy_eth1_backend requires a log")?; let backend = CachingEth1Backend::new(Eth1Config::default(), log.clone(), self.spec.clone()); @@ -676,7 +661,7 @@ where pub fn testing_slot_clock(self, slot_duration: Duration) -> Result { let genesis_time = self .genesis_time - .ok_or_else(|| "testing_slot_clock requires an initialized state")?; + .ok_or("testing_slot_clock requires an initialized state")?; let slot_clock = TestingSlotClock::new( Slot::new(0), diff --git a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs index 247f613a9..79c439751 100644 --- a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs +++ b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs @@ -83,7 +83,7 @@ impl AggregatedAttestationMap { let committee_index = set_bits .first() .copied() - .ok_or_else(|| Error::NoAggregationBitsSet)?; + .ok_or(Error::NoAggregationBitsSet)?; if set_bits.len() > 1 { return Err(Error::MoreThanOneAggregationBitSet(set_bits.len())); diff --git a/beacon_node/beacon_chain/src/observed_attestations.rs b/beacon_node/beacon_chain/src/observed_attestations.rs index 0bea39ff8..358a50346 100644 --- a/beacon_node/beacon_chain/src/observed_attestations.rs +++ b/beacon_node/beacon_chain/src/observed_attestations.rs @@ -144,7 +144,7 @@ impl ObservedAttestations { self.sets .get_mut(index) - .ok_or_else(|| Error::InvalidSetIndex(index)) + .ok_or(Error::InvalidSetIndex(index)) .and_then(|set| set.observe_attestation(a, root)) } @@ -156,7 +156,7 @@ impl ObservedAttestations { self.sets .get(index) - .ok_or_else(|| Error::InvalidSetIndex(index)) + .ok_or(Error::InvalidSetIndex(index)) .and_then(|set| set.is_known(a, root)) } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 5e9a2551b..0b6d2b0aa 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -137,13 +137,11 @@ where let chain_config = config.chain.clone(); let graffiti = config.graffiti; - let store = - store.ok_or_else(|| "beacon_chain_start_method requires a store".to_string())?; + let store = store.ok_or("beacon_chain_start_method requires a store")?; let context = runtime_context - .ok_or_else(|| "beacon_chain_start_method requires a runtime context".to_string())? + .ok_or("beacon_chain_start_method requires a runtime context")? .service_context("beacon".into()); - let spec = chain_spec - .ok_or_else(|| "beacon_chain_start_method requires a chain spec".to_string())?; + let spec = chain_spec.ok_or("beacon_chain_start_method requires a chain spec")?; let builder = BeaconChainBuilder::new(eth_spec_instance) .logger(context.log().clone()) @@ -160,9 +158,7 @@ where builder }; - let chain_exists = builder - .store_contains_beacon_chain() - .unwrap_or_else(|_| false); + let chain_exists = builder.store_contains_beacon_chain().unwrap_or(false); // If the client is expect to resume but there's no beacon chain in the database, // use the `DepositContract` method. This scenario is quite common when the client @@ -310,11 +306,11 @@ where let beacon_chain = self .beacon_chain .clone() - .ok_or_else(|| "network requires a beacon chain")?; + .ok_or("network requires a beacon chain")?; let context = self .runtime_context .as_ref() - .ok_or_else(|| "network requires a runtime_context")? + .ok_or("network requires a runtime_context")? .clone(); let (network_globals, network_send) = @@ -333,16 +329,16 @@ where let context = self .runtime_context .as_ref() - .ok_or_else(|| "node timer requires a runtime_context")? + .ok_or("node timer requires a runtime_context")? .service_context("node_timer".into()); let beacon_chain = self .beacon_chain .clone() - .ok_or_else(|| "node timer requires a beacon chain")?; + .ok_or("node timer requires a beacon chain")?; let milliseconds_per_slot = self .chain_spec .as_ref() - .ok_or_else(|| "node timer requires a chain spec".to_string())? + .ok_or("node timer requires a chain spec")? .milliseconds_per_slot; spawn_timer(context.executor, beacon_chain, milliseconds_per_slot) @@ -370,16 +366,16 @@ where let context = self .runtime_context .as_ref() - .ok_or_else(|| "slasher requires a runtime_context")? + .ok_or("slasher requires a runtime_context")? .service_context("slasher_server_ctxt".into()); let slasher = self .slasher .clone() - .ok_or_else(|| "slasher server requires a slasher")?; + .ok_or("slasher server requires a slasher")?; let slot_clock = self .slot_clock .clone() - .ok_or_else(|| "slasher server requires a slot clock")?; + .ok_or("slasher server requires a slot clock")?; SlasherServer::run(slasher, slot_clock, &context.executor); Ok(()) } @@ -389,20 +385,20 @@ where let context = self .runtime_context .as_ref() - .ok_or_else(|| "slot_notifier requires a runtime_context")? + .ok_or("slot_notifier requires a runtime_context")? .service_context("slot_notifier".into()); let beacon_chain = self .beacon_chain .clone() - .ok_or_else(|| "slot_notifier requires a beacon chain")?; + .ok_or("slot_notifier requires a beacon chain")?; let network_globals = self .network_globals .clone() - .ok_or_else(|| "slot_notifier requires a libp2p network")?; + .ok_or("slot_notifier requires a libp2p network")?; let milliseconds_per_slot = self .chain_spec .as_ref() - .ok_or_else(|| "slot_notifier requires a chain spec".to_string())? + .ok_or("slot_notifier requires a chain spec")? .milliseconds_per_slot; spawn_notifier( @@ -430,7 +426,7 @@ where let runtime_context = self .runtime_context .as_ref() - .ok_or_else(|| "build requires a runtime context".to_string())?; + .ok_or("build requires a runtime context")?; let log = runtime_context.log().clone(); let http_api_listen_addr = if self.http_api_config.enabled { @@ -518,20 +514,20 @@ where let context = self .runtime_context .as_ref() - .ok_or_else(|| "beacon_chain requires a runtime context")? + .ok_or("beacon_chain requires a runtime context")? .clone(); let chain = self .beacon_chain_builder - .ok_or_else(|| "beacon_chain requires a beacon_chain_builder")? + .ok_or("beacon_chain requires a beacon_chain_builder")? .event_handler( self.event_handler - .ok_or_else(|| "beacon_chain requires an event handler")?, + .ok_or("beacon_chain requires an event handler")?, ) .slot_clock( self.slot_clock .clone() - .ok_or_else(|| "beacon_chain requires a slot clock")?, + .ok_or("beacon_chain requires a slot clock")?, ) .shutdown_sender(context.executor.shutdown_sender()) .build() @@ -573,7 +569,7 @@ where let context = self .runtime_context .as_ref() - .ok_or_else(|| "tee_event_handler requires a runtime_context")? + .ok_or("tee_event_handler requires a runtime_context")? .service_context("ws".into()); let log = context.log().clone(); @@ -619,12 +615,12 @@ where let context = self .runtime_context .as_ref() - .ok_or_else(|| "disk_store requires a log".to_string())? + .ok_or("disk_store requires a log")? .service_context("freezer_db".into()); let spec = self .chain_spec .clone() - .ok_or_else(|| "disk_store requires a chain spec".to_string())?; + .ok_or("disk_store requires a chain spec")?; self.db_path = Some(hot_path.into()); self.freezer_db_path = Some(cold_path.into()); @@ -661,15 +657,15 @@ where let context = self .runtime_context .as_ref() - .ok_or_else(|| "caching_eth1_backend requires a runtime_context")? + .ok_or("caching_eth1_backend requires a runtime_context")? .service_context("eth1_rpc".into()); let beacon_chain_builder = self .beacon_chain_builder - .ok_or_else(|| "caching_eth1_backend requires a beacon_chain_builder")?; + .ok_or("caching_eth1_backend requires a beacon_chain_builder")?; let spec = self .chain_spec .clone() - .ok_or_else(|| "caching_eth1_backend requires a chain spec".to_string())?; + .ok_or("caching_eth1_backend requires a chain spec")?; let backend = if let Some(eth1_service_from_genesis) = self.eth1_service { eth1_service_from_genesis.update_config(config)?; @@ -720,7 +716,7 @@ where pub fn no_eth1_backend(mut self) -> Result { let beacon_chain_builder = self .beacon_chain_builder - .ok_or_else(|| "caching_eth1_backend requires a beacon_chain_builder")?; + .ok_or("caching_eth1_backend requires a beacon_chain_builder")?; self.beacon_chain_builder = Some(beacon_chain_builder.no_eth1_backend()); @@ -739,7 +735,7 @@ where pub fn dummy_eth1_backend(mut self) -> Result { let beacon_chain_builder = self .beacon_chain_builder - .ok_or_else(|| "caching_eth1_backend requires a beacon_chain_builder")?; + .ok_or("caching_eth1_backend requires a beacon_chain_builder")?; self.beacon_chain_builder = Some(beacon_chain_builder.dummy_eth1_backend()?); @@ -763,16 +759,16 @@ where let beacon_chain_builder = self .beacon_chain_builder .as_ref() - .ok_or_else(|| "system_time_slot_clock requires a beacon_chain_builder")?; + .ok_or("system_time_slot_clock requires a beacon_chain_builder")?; let genesis_time = beacon_chain_builder .genesis_time - .ok_or_else(|| "system_time_slot_clock requires an initialized beacon state")?; + .ok_or("system_time_slot_clock requires an initialized beacon state")?; let spec = self .chain_spec .clone() - .ok_or_else(|| "system_time_slot_clock requires a chain spec".to_string())?; + .ok_or("system_time_slot_clock requires a chain spec")?; let slot_clock = SystemTimeSlotClock::new( spec.genesis_slot, diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 3649a3da3..de24f5b64 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -101,7 +101,7 @@ impl Config { pub fn create_db_path(&self) -> Result { let db_path = self .get_db_path() - .ok_or_else(|| "Unable to locate user home directory")?; + .ok_or("Unable to locate user home directory")?; ensure_dir_exists(db_path) } @@ -125,7 +125,7 @@ impl Config { pub fn create_freezer_db_path(&self) -> Result { let freezer_db_path = self .get_freezer_db_path() - .ok_or_else(|| "Unable to locate user home directory")?; + .ok_or("Unable to locate user home directory")?; ensure_dir_exists(freezer_db_path) } @@ -142,7 +142,7 @@ impl Config { pub fn create_data_dir(&self) -> Result { let path = self .get_data_dir() - .ok_or_else(|| "Unable to locate user home directory".to_string())?; + .ok_or("Unable to locate user home directory")?; ensure_dir_exists(path) } } diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 6f814557a..2f2f6faa6 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -31,7 +31,7 @@ pub fn spawn_notifier( let duration_to_next_slot = beacon_chain .slot_clock .duration_to_next_slot() - .ok_or_else(|| "slot_notifier unable to determine time to next slot")?; + .ok_or("slot_notifier unable to determine time to next slot")?; // Run this half way through each slot. let start_instant = tokio::time::Instant::now() + duration_to_next_slot + (slot_duration / 2); @@ -94,7 +94,7 @@ pub fn spawn_notifier( metrics::set_gauge( &metrics::SYNC_SLOTS_PER_SECOND, - speedo.slots_per_second().unwrap_or_else(|| 0_f64) as i64, + speedo.slots_per_second().unwrap_or(0_f64) as i64, ); // The next two lines take advantage of saturating subtraction on `Slot`. diff --git a/beacon_node/eth1/src/deposit_log.rs b/beacon_node/eth1/src/deposit_log.rs index e46bcc27a..1b3cfa01a 100644 --- a/beacon_node/eth1/src/deposit_log.rs +++ b/beacon_node/eth1/src/deposit_log.rs @@ -25,19 +25,19 @@ impl Log { let pubkey = bytes .get(PUBKEY_START..PUBKEY_START + PUBKEY_LEN) - .ok_or_else(|| "Insufficient bytes for pubkey".to_string())?; + .ok_or("Insufficient bytes for pubkey")?; let withdrawal_credentials = bytes .get(CREDS_START..CREDS_START + CREDS_LEN) - .ok_or_else(|| "Insufficient bytes for withdrawal credential".to_string())?; + .ok_or("Insufficient bytes for withdrawal credential")?; let amount = bytes .get(AMOUNT_START..AMOUNT_START + AMOUNT_LEN) - .ok_or_else(|| "Insufficient bytes for amount".to_string())?; + .ok_or("Insufficient bytes for amount")?; let signature = bytes .get(SIG_START..SIG_START + SIG_LEN) - .ok_or_else(|| "Insufficient bytes for signature".to_string())?; + .ok_or("Insufficient bytes for signature")?; let index = bytes .get(INDEX_START..INDEX_START + INDEX_LEN) - .ok_or_else(|| "Insufficient bytes for index".to_string())?; + .ok_or("Insufficient bytes for index")?; let deposit_data = DepositData { pubkey: PublicKeyBytes::from_ssz_bytes(pubkey) diff --git a/beacon_node/eth1/src/http.rs b/beacon_node/eth1/src/http.rs index f6ed2e834..88e322244 100644 --- a/beacon_node/eth1/src/http.rs +++ b/beacon_node/eth1/src/http.rs @@ -83,9 +83,9 @@ pub async fn get_network_id(endpoint: &str, timeout: Duration) -> Result Result Result Result, String> { /// Removes the `0x` prefix from some bytes. Returns an error if the prefix is not present. fn strip_prefix(hex: &str) -> Result<&str, String> { - if hex.starts_with("0x") { - Ok(&hex[2..]) + if let Some(stripped) = hex.strip_prefix("0x") { + Ok(stripped) } else { Err("Hex string did not start with `0x`".to_string()) } diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 70e5501ae..1899d44f6 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -808,8 +808,8 @@ impl Service { .chunks(blocks_per_log_query) .take(max_log_requests_per_update) .map(|vec| { - let first = vec.first().cloned().unwrap_or_else(|| 0); - let last = vec.last().map(|n| n + 1).unwrap_or_else(|| 0); + let first = vec.first().cloned().unwrap_or(0); + let last = vec.last().map(|n| n + 1).unwrap_or(0); first..last }) .collect::>>() @@ -894,7 +894,7 @@ impl Service { metrics::set_gauge(&metrics::DEPOSIT_CACHE_LEN, cache.cache.len() as i64); metrics::set_gauge( &metrics::HIGHEST_PROCESSED_DEPOSIT_BLOCK, - cache.last_processed_block.unwrap_or_else(|| 0) as i64, + cache.last_processed_block.unwrap_or(0) as i64, ); } @@ -1035,7 +1035,7 @@ impl Service { .block_cache .read() .latest_block_timestamp() - .unwrap_or_else(|| 0) as i64, + .unwrap_or(0) as i64, ); blocks_imported += 1; diff --git a/beacon_node/eth2_libp2p/src/discovery/enr.rs b/beacon_node/eth2_libp2p/src/discovery/enr.rs index b7da95516..a461cfc18 100644 --- a/beacon_node/eth2_libp2p/src/discovery/enr.rs +++ b/beacon_node/eth2_libp2p/src/discovery/enr.rs @@ -34,16 +34,14 @@ impl Eth2Enr for Enr { fn bitfield(&self) -> Result, &'static str> { let bitfield_bytes = self .get(BITFIELD_ENR_KEY) - .ok_or_else(|| "ENR bitfield non-existent")?; + .ok_or("ENR bitfield non-existent")?; BitVector::::from_ssz_bytes(bitfield_bytes) .map_err(|_| "Could not decode the ENR SSZ bitfield") } fn eth2(&self) -> Result { - let eth2_bytes = self - .get(ETH2_ENR_KEY) - .ok_or_else(|| "ENR has no eth2 field")?; + let eth2_bytes = self.get(ETH2_ENR_KEY).ok_or("ENR has no eth2 field")?; EnrForkId::from_ssz_bytes(eth2_bytes).map_err(|_| "Could not decode EnrForkId") } @@ -79,7 +77,7 @@ pub fn use_or_load_enr( // same node id, different configuration - update the sequence number // Note: local_enr is generated with default(0) attnets value, // so a non default value in persisted enr will also update sequence number. - let new_seq_no = disk_enr.seq().checked_add(1).ok_or_else(|| "ENR sequence number on file is too large. Remove it to generate a new NodeId")?; + let new_seq_no = disk_enr.seq().checked_add(1).ok_or("ENR sequence number on file is too large. Remove it to generate a new NodeId")?; local_enr.set_seq(new_seq_no, enr_key).map_err(|e| { format!("Could not update ENR sequence number: {:?}", e) })?; @@ -133,7 +131,7 @@ pub fn create_enr_builder_from_config( } // we always give it our listening tcp port if enable_tcp { - let tcp_port = config.enr_tcp_port.unwrap_or_else(|| config.libp2p_port); + let tcp_port = config.enr_tcp_port.unwrap_or(config.libp2p_port); builder.tcp(tcp_port); } builder diff --git a/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs b/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs index 4d5aa4c1b..bd9a6a5b0 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs @@ -83,10 +83,7 @@ impl PeerInfo { /// Returns if the peer is subscribed to a given `SubnetId` pub fn on_subnet(&self, subnet_id: SubnetId) -> bool { if let Some(meta_data) = &self.meta_data { - return meta_data - .attnets - .get(*subnet_id as usize) - .unwrap_or_else(|_| false); + return meta_data.attnets.get(*subnet_id as usize).unwrap_or(false); } false } diff --git a/beacon_node/eth2_libp2p/src/peer_manager/score.rs b/beacon_node/eth2_libp2p/src/peer_manager/score.rs index 71608d8fd..5b6b0ac36 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/score.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/score.rs @@ -313,8 +313,7 @@ impl PartialOrd for Score { impl Ord for Score { fn cmp(&self, other: &Score) -> std::cmp::Ordering { - self.partial_cmp(other) - .unwrap_or_else(|| std::cmp::Ordering::Equal) + self.partial_cmp(other).unwrap_or(std::cmp::Ordering::Equal) } } diff --git a/beacon_node/eth2_libp2p/src/service.rs b/beacon_node/eth2_libp2p/src/service.rs index 84cdd961e..0ae21f229 100644 --- a/beacon_node/eth2_libp2p/src/service.rs +++ b/beacon_node/eth2_libp2p/src/service.rs @@ -375,8 +375,8 @@ fn build_transport( // Useful helper functions for debugging. Currently not used in the client. #[allow(dead_code)] fn keypair_from_hex(hex_bytes: &str) -> error::Result { - let hex_bytes = if hex_bytes.starts_with("0x") { - hex_bytes[2..].to_string() + let hex_bytes = if let Some(stripped) = hex_bytes.strip_prefix("0x") { + stripped.to_string() } else { hex_bytes.to_string() }; diff --git a/beacon_node/http_api/src/beacon_proposer_cache.rs b/beacon_node/http_api/src/beacon_proposer_cache.rs index 1dbcef1f5..4347ec951 100644 --- a/beacon_node/http_api/src/beacon_proposer_cache.rs +++ b/beacon_node/http_api/src/beacon_proposer_cache.rs @@ -30,7 +30,7 @@ impl BeaconProposerCache { .fork_choice .read() .get_block(&head_root) - .ok_or_else(|| BeaconChainError::MissingBeaconBlock(head_root))?; + .ok_or(BeaconChainError::MissingBeaconBlock(head_root))?; // If the head epoch is more than `EPOCHS_TO_SKIP` in the future, just build the cache at // the epoch of the head. This prevents doing a massive amount of skip slots when starting @@ -63,7 +63,7 @@ impl BeaconProposerCache { let mut head_state = chain .get_state(&head_block.state_root, Some(head_block.slot))? - .ok_or_else(|| BeaconChainError::MissingBeaconState(head_block.state_root))?; + .ok_or(BeaconChainError::MissingBeaconState(head_block.state_root))?; let decision_block_root = Self::decision_block_root(current_epoch, head_root, &head_state)?; @@ -85,7 +85,7 @@ impl BeaconProposerCache { .and_then(|i| { let pubkey = chain .validator_pubkey(i)? - .ok_or_else(|| BeaconChainError::ValidatorPubkeyCacheIncomplete(i))?; + .ok_or(BeaconChainError::ValidatorPubkeyCacheIncomplete(i))?; Ok(ProposerData { pubkey: PublicKeyBytes::from(pubkey), @@ -168,7 +168,7 @@ impl BeaconProposerCache { .fork_choice .read() .get_block(&head_block_root) - .ok_or_else(|| BeaconChainError::MissingBeaconBlock(head_block_root)) + .ok_or(BeaconChainError::MissingBeaconBlock(head_block_root)) .map_err(warp_utils::reject::beacon_chain_error)?; // Rebuild the cache if this call causes a cache-miss. diff --git a/beacon_node/network/src/attestation_service/mod.rs b/beacon_node/network/src/attestation_service/mod.rs index 1591c8339..b450657cb 100644 --- a/beacon_node/network/src/attestation_service/mod.rs +++ b/beacon_node/network/src/attestation_service/mod.rs @@ -315,7 +315,7 @@ impl AttestationService { .beacon_chain .slot_clock .now() - .ok_or_else(|| "Could not get the current slot")?; + .ok_or("Could not get the current slot")?; let discovery_subnets: Vec = exact_subnets .filter_map(|exact_subnet| { @@ -363,7 +363,7 @@ impl AttestationService { .beacon_chain .slot_clock .now() - .ok_or_else(|| "Could not get the current slot")?; + .ok_or("Could not get the current slot")?; // Calculate the duration to the unsubscription event. // There are two main cases. Attempting to subscribe to the current slot and all others. @@ -371,7 +371,7 @@ impl AttestationService { self.beacon_chain .slot_clock .duration_to_next_slot() - .ok_or_else(|| "Unable to determine duration to next slot")? + .ok_or("Unable to determine duration to next slot")? } else { let slot_duration = self.beacon_chain.slot_clock.slot_duration(); @@ -380,7 +380,7 @@ impl AttestationService { self.beacon_chain .slot_clock .duration_to_slot(exact_subnet.slot) - .ok_or_else(|| "Unable to determine duration to subscription slot")? + .ok_or("Unable to determine duration to subscription slot")? + slot_duration }; diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c9645bbb4..485068c50 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -33,18 +33,14 @@ pub fn get_config( // If necessary, remove any existing database and configuration if client_config.data_dir.exists() && cli_args.is_present("purge-db") { // Remove the chain_db. - fs::remove_dir_all( - client_config - .get_db_path() - .ok_or_else(|| "Failed to get db_path".to_string())?, - ) - .map_err(|err| format!("Failed to remove chain_db: {}", err))?; + fs::remove_dir_all(client_config.get_db_path().ok_or("Failed to get db_path")?) + .map_err(|err| format!("Failed to remove chain_db: {}", err))?; // Remove the freezer db. fs::remove_dir_all( client_config .get_freezer_db_path() - .ok_or_else(|| "Failed to get freezer db path".to_string())?, + .ok_or("Failed to get freezer db path")?, ) .map_err(|err| format!("Failed to remove chain_db: {}", err))?; @@ -319,10 +315,10 @@ pub fn get_config( let mut split = wss_checkpoint.split(':'); let root_str = split .next() - .ok_or_else(|| "Improperly formatted weak subjectivity checkpoint".to_string())?; + .ok_or("Improperly formatted weak subjectivity checkpoint")?; let epoch_str = split .next() - .ok_or_else(|| "Improperly formatted weak subjectivity checkpoint".to_string())?; + .ok_or("Improperly formatted weak subjectivity checkpoint")?; if !root_str.starts_with("0x") { return Err( @@ -555,7 +551,7 @@ pub fn set_network_config( resolved_addrs .next() .map(|a| a.ip()) - .ok_or_else(|| "Resolved dns addr contains no entries".to_string())? + .ok_or("Resolved dns addr contains no entries")? } else { return Err(format!("Failed to parse enr-address: {}", enr_address)); }; diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 5f2f70d3e..a612d413e 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -394,9 +394,9 @@ impl, Cold: ItemStore> HotColdDB // boundary state in the hot DB. let state = self .load_hot_state(&epoch_boundary_state_root, BlockReplay::Accurate)? - .ok_or_else(|| { - HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root) - })?; + .ok_or(HotColdDBError::MissingEpochBoundaryState( + epoch_boundary_state_root, + ))?; Ok(Some(state)) } else { // Try the cold DB @@ -553,10 +553,9 @@ impl, Cold: ItemStore> HotColdDB epoch_boundary_state_root, }) = self.load_hot_state_summary(state_root)? { - let boundary_state = get_full_state(&self.hot_db, &epoch_boundary_state_root)? - .ok_or_else(|| { - HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root) - })?; + let boundary_state = get_full_state(&self.hot_db, &epoch_boundary_state_root)?.ok_or( + HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root), + )?; // Optimization to avoid even *thinking* about replaying blocks if we're already // on an epoch boundary. @@ -682,8 +681,9 @@ impl, Cold: ItemStore> HotColdDB let high_restore_point = if high_restore_point_idx * self.config.slots_per_restore_point >= split.slot.as_u64() { - self.get_state(&split.state_root, Some(split.slot))? - .ok_or_else(|| HotColdDBError::MissingSplitState(split.state_root, split.slot))? + self.get_state(&split.state_root, Some(split.slot))?.ok_or( + HotColdDBError::MissingSplitState(split.state_root, split.slot), + )? } else { self.load_restore_point_by_index(high_restore_point_idx)? }; @@ -1019,7 +1019,7 @@ pub fn migrate_database, Cold: ItemStore>( if slot % store.config.slots_per_restore_point == 0 { let state: BeaconState = get_full_state(&store.hot_db, &state_root)? - .ok_or_else(|| HotColdDBError::MissingStateToFreeze(state_root))?; + .ok_or(HotColdDBError::MissingStateToFreeze(state_root))?; store.store_cold_state(&state_root, &state, &mut cold_db_ops)?; } diff --git a/beacon_node/timer/src/lib.rs b/beacon_node/timer/src/lib.rs index 74c9e5eb0..92e437ae5 100644 --- a/beacon_node/timer/src/lib.rs +++ b/beacon_node/timer/src/lib.rs @@ -21,7 +21,7 @@ pub fn spawn_timer( + beacon_chain .slot_clock .duration_to_next_slot() - .ok_or_else(|| "slot_notifier unable to determine time to next slot")?; + .ok_or("slot_notifier unable to determine time to next slot")?; // Warning: `interval_at` panics if `milliseconds_per_slot` = 0. let mut interval = interval_at(start_instant, Duration::from_millis(milliseconds_per_slot)); diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index e557f4008..212fdd17f 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -84,9 +84,9 @@ impl TryFrom<&ArgMatches<'_>> for BootNodeConfig { let spec = config .yaml_config .as_ref() - .ok_or_else(|| "The testnet directory must contain a spec config".to_string())? + .ok_or("The testnet directory must contain a spec config")? .apply_to_chain_spec::(&T::default_spec()) - .ok_or_else(|| "The loaded config is not compatible with the current spec")?; + .ok_or("The loaded config is not compatible with the current spec")?; if config.beacon_state_is_known() { let genesis_state = config.beacon_state::()?; diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 51552c930..6106b4a5a 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -86,14 +86,12 @@ impl ValidatorDefinition { let voting_keystore_path = voting_keystore_path.as_ref().into(); let keystore = Keystore::from_json_file(&voting_keystore_path).map_err(Error::UnableToOpenKeystore)?; - let voting_public_key = keystore - .public_key() - .ok_or_else(|| Error::InvalidKeystorePubkey)?; + let voting_public_key = keystore.public_key().ok_or(Error::InvalidKeystorePubkey)?; Ok(ValidatorDefinition { enabled: true, voting_public_key, - description: keystore.description().unwrap_or_else(|| "").to_string(), + description: keystore.description().unwrap_or("").to_string(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path: None, @@ -214,7 +212,7 @@ impl ValidatorDefinitions { Some(ValidatorDefinition { enabled: true, voting_public_key, - description: keystore.description().unwrap_or_else(|| "").to_string(), + description: keystore.description().unwrap_or("").to_string(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path, diff --git a/common/clap_utils/src/lib.rs b/common/clap_utils/src/lib.rs index 1eba1efac..e78401b81 100644 --- a/common/clap_utils/src/lib.rs +++ b/common/clap_utils/src/lib.rs @@ -101,8 +101,8 @@ pub fn parse_ssz_optional( matches .value_of(name) .map(|val| { - if val.starts_with("0x") { - let vec = hex::decode(&val[2..]) + if let Some(stripped) = val.strip_prefix("0x") { + let vec = hex::decode(stripped) .map_err(|e| format!("Unable to parse {} as hex: {:?}", name, e))?; T::from_ssz_bytes(&vec) diff --git a/common/deposit_contract/build.rs b/common/deposit_contract/build.rs index 581ec819f..719a5a05f 100644 --- a/common/deposit_contract/build.rs +++ b/common/deposit_contract/build.rs @@ -86,7 +86,7 @@ pub fn download_deposit_contract( let abi = contract .get("abi") - .ok_or_else(|| "Response does not contain key: abi".to_string())? + .ok_or("Response does not contain key: abi")? .to_string(); verify_checksum(abi.as_bytes(), abi_checksum); @@ -97,7 +97,7 @@ pub fn download_deposit_contract( let bytecode = contract .get("bytecode") - .ok_or_else(|| "Response does not contain key: bytecode".to_string())? + .ok_or("Response does not contain key: bytecode")? .to_string(); verify_checksum(bytecode.as_bytes(), bytecode_checksum); diff --git a/common/deposit_contract/src/lib.rs b/common/deposit_contract/src/lib.rs index 8ec048826..e57f22dc0 100644 --- a/common/deposit_contract/src/lib.rs +++ b/common/deposit_contract/src/lib.rs @@ -55,8 +55,7 @@ pub fn decode_eth1_tx_data( ) -> Result<(DepositData, Hash256), DecodeError> { let abi = Contract::load(ABI)?; let function = abi.function("deposit")?; - let mut tokens = - function.decode_input(bytes.get(4..).ok_or_else(|| DecodeError::InadequateBytes)?)?; + let mut tokens = function.decode_input(bytes.get(4..).ok_or(DecodeError::InadequateBytes)?)?; macro_rules! decode_token { ($type: ty, $to_fn: ident) => { diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index b08ceabb2..2258e93a2 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -79,7 +79,7 @@ impl ValidatorClientHttpClient { let sig = response .headers() .get("Signature") - .ok_or_else(|| Error::MissingSignatureHeader)? + .ok_or(Error::MissingSignatureHeader)? .to_str() .map_err(|_| Error::InvalidSignatureHeader)? .to_string(); @@ -96,7 +96,7 @@ impl ValidatorClientHttpClient { Some(secp256k1::verify(&message, &sig, &self.server_pubkey)) }) .filter(|is_valid| *is_valid) - .ok_or_else(|| Error::InvalidSignatureHeader)?; + .ok_or(Error::InvalidSignatureHeader)?; Ok(body) } diff --git a/common/eth2_interop_keypairs/src/lib.rs b/common/eth2_interop_keypairs/src/lib.rs index ce11efa8f..eb26f563e 100644 --- a/common/eth2_interop_keypairs/src/lib.rs +++ b/common/eth2_interop_keypairs/src/lib.rs @@ -106,8 +106,8 @@ impl TryInto for YamlKeypair { } fn string_to_bytes(string: &str) -> Result, String> { - let string = if string.starts_with("0x") { - &string[2..] + let string = if let Some(stripped) = string.strip_prefix("0x") { + stripped } else { string }; diff --git a/common/eth2_testnet_config/src/lib.rs b/common/eth2_testnet_config/src/lib.rs index 1bec2c3cd..fc0c1404e 100644 --- a/common/eth2_testnet_config/src/lib.rs +++ b/common/eth2_testnet_config/src/lib.rs @@ -122,7 +122,7 @@ impl Eth2TestnetConfig { let genesis_state_bytes = self .genesis_state_bytes .as_ref() - .ok_or_else(|| "Genesis state is unknown".to_string())?; + .ok_or("Genesis state is unknown")?; BeaconState::from_ssz_bytes(genesis_state_bytes) .map_err(|e| format!("Genesis state SSZ bytes are invalid: {:?}", e)) @@ -156,8 +156,8 @@ impl Eth2TestnetConfig { // // This allows us to play nice with other clients that are expecting // plain-text, not YAML. - let no_doc_header = if yaml.starts_with("---\n") { - &yaml[4..] + let no_doc_header = if let Some(stripped) = yaml.strip_prefix("---\n") { + stripped } else { &yaml }; diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index ad1b01ee4..70cb13ae9 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -138,7 +138,7 @@ impl<'a> Builder<'a> { pub fn build(self) -> Result { let (voting_keystore, voting_password) = self .voting_keystore - .ok_or_else(|| Error::UninitializedVotingKeystore)?; + .ok_or(Error::UninitializedVotingKeystore)?; let dir = self .base_validators_dir diff --git a/common/validator_dir/src/validator_dir.rs b/common/validator_dir/src/validator_dir.rs index fb2e06bd4..5706b6630 100644 --- a/common/validator_dir/src/validator_dir.rs +++ b/common/validator_dir/src/validator_dir.rs @@ -167,8 +167,8 @@ impl ValidatorDir { .map_err(Error::UnableToReadDepositData) .and_then(|hex_bytes| { let hex = std::str::from_utf8(&hex_bytes).map_err(|_| Error::DepositDataNotUtf8)?; - if hex.starts_with("0x") { - hex::decode(&hex[2..]).map_err(Error::DepositDataInvalidHex) + if let Some(stripped) = hex.strip_prefix("0x") { + hex::decode(stripped).map_err(Error::DepositDataInvalidHex) } else { Err(Error::DepositDataMissing0xPrefix) } diff --git a/consensus/cached_tree_hash/src/cache.rs b/consensus/cached_tree_hash/src/cache.rs index 9c546e0ff..edb60f306 100644 --- a/consensus/cached_tree_hash/src/cache.rs +++ b/consensus/cached_tree_hash/src/cache.rs @@ -115,7 +115,7 @@ impl TreeHashCache { let left = self.layers[depth] .get(arena, left_idx)? - .ok_or_else(|| Error::MissingLeftIdx(left_idx))?; + .ok_or(Error::MissingLeftIdx(left_idx))?; let right = self.layers[depth] .get(arena, right_idx)? .copied() diff --git a/consensus/cached_tree_hash/src/cache_arena.rs b/consensus/cached_tree_hash/src/cache_arena.rs index 17a23ed0c..44ba89927 100644 --- a/consensus/cached_tree_hash/src/cache_arena.rs +++ b/consensus/cached_tree_hash/src/cache_arena.rs @@ -55,9 +55,7 @@ impl CacheArena { .iter_mut() .skip(alloc_id + 1) .try_for_each(|offset| { - *offset = offset - .checked_add(grow_by) - .ok_or_else(|| Error::OffsetOverflow)?; + *offset = offset.checked_add(grow_by).ok_or(Error::OffsetOverflow)?; Ok(()) }) @@ -75,7 +73,7 @@ impl CacheArena { .try_for_each(|offset| { *offset = offset .checked_sub(shrink_by) - .ok_or_else(|| Error::OffsetUnderflow)?; + .ok_or(Error::OffsetUnderflow)?; Ok(()) }) @@ -99,15 +97,12 @@ impl CacheArena { let offset = *self .offsets .get(alloc_id) - .ok_or_else(|| Error::UnknownAllocId(alloc_id))?; + .ok_or(Error::UnknownAllocId(alloc_id))?; let start = range .start .checked_add(offset) - .ok_or_else(|| Error::RangeOverFlow)?; - let end = range - .end - .checked_add(offset) - .ok_or_else(|| Error::RangeOverFlow)?; + .ok_or(Error::RangeOverFlow)?; + let end = range.end.checked_add(offset).ok_or(Error::RangeOverFlow)?; let prev_len = self.backing.len(); @@ -127,7 +122,7 @@ impl CacheArena { let start = self .offsets .get(alloc_id) - .ok_or_else(|| Error::UnknownAllocId(alloc_id))?; + .ok_or(Error::UnknownAllocId(alloc_id))?; let end = self .offsets .get(alloc_id + 1) @@ -143,7 +138,7 @@ impl CacheArena { let offset = self .offsets .get(alloc_id) - .ok_or_else(|| Error::UnknownAllocId(alloc_id))?; + .ok_or(Error::UnknownAllocId(alloc_id))?; Ok(self.backing.get(i + offset)) } else { Ok(None) @@ -156,7 +151,7 @@ impl CacheArena { let offset = self .offsets .get(alloc_id) - .ok_or_else(|| Error::UnknownAllocId(alloc_id))?; + .ok_or(Error::UnknownAllocId(alloc_id))?; Ok(self.backing.get_mut(i + offset)) } else { Ok(None) @@ -168,7 +163,7 @@ impl CacheArena { let start = *self .offsets .get(alloc_id) - .ok_or_else(|| Error::UnknownAllocId(alloc_id))?; + .ok_or(Error::UnknownAllocId(alloc_id))?; let end = self .offsets .get(alloc_id + 1) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index ae4d06a60..6697af806 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -312,7 +312,7 @@ where let block = self .proto_array .get_block(&block_root) - .ok_or_else(|| Error::MissingProtoArrayBlock(block_root))?; + .ok_or(Error::MissingProtoArrayBlock(block_root))?; match block.slot.cmp(&ancestor_slot) { Ordering::Greater => Ok(self @@ -618,7 +618,7 @@ where let block = self .proto_array .get_block(&indexed_attestation.data.beacon_block_root) - .ok_or_else(|| InvalidAttestation::UnknownHeadBlock { + .ok_or(InvalidAttestation::UnknownHeadBlock { beacon_block_root: indexed_attestation.data.beacon_block_root, })?; diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index c89a96628..093e0ae3b 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -77,7 +77,7 @@ impl ProtoArray { let node = self .nodes .get_mut(node_index) - .ok_or_else(|| Error::InvalidNodeIndex(node_index))?; + .ok_or(Error::InvalidNodeIndex(node_index))?; // There is no need to adjust the balances or manage parent of the zero hash since it // is an alias to the genesis block. The weight applied to the genesis block is @@ -89,7 +89,7 @@ impl ProtoArray { let node_delta = deltas .get(node_index) .copied() - .ok_or_else(|| Error::InvalidNodeDelta(node_index))?; + .ok_or(Error::InvalidNodeDelta(node_index))?; // Apply the delta to the node. if node_delta < 0 { @@ -105,19 +105,19 @@ impl ProtoArray { node.weight = node .weight .checked_sub(node_delta.abs() as u64) - .ok_or_else(|| Error::DeltaOverflow(node_index))?; + .ok_or(Error::DeltaOverflow(node_index))?; } else { node.weight = node .weight .checked_add(node_delta as u64) - .ok_or_else(|| Error::DeltaOverflow(node_index))?; + .ok_or(Error::DeltaOverflow(node_index))?; } // If the node has a parent, try to update its best-child and best-descendant. if let Some(parent_index) = node.parent { let parent_delta = deltas .get_mut(parent_index) - .ok_or_else(|| Error::InvalidParentDelta(parent_index))?; + .ok_or(Error::InvalidParentDelta(parent_index))?; // Back-propagate the nodes delta to its parent. *parent_delta += node_delta; @@ -185,16 +185,14 @@ impl ProtoArray { let justified_node = self .nodes .get(justified_index) - .ok_or_else(|| Error::InvalidJustifiedIndex(justified_index))?; + .ok_or(Error::InvalidJustifiedIndex(justified_index))?; - let best_descendant_index = justified_node - .best_descendant - .unwrap_or_else(|| justified_index); + let best_descendant_index = justified_node.best_descendant.unwrap_or(justified_index); let best_node = self .nodes .get(best_descendant_index) - .ok_or_else(|| Error::InvalidBestDescendant(best_descendant_index))?; + .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; // Perform a sanity check that the node is indeed valid to be the head. if !self.node_is_viable_for_head(&best_node) { @@ -228,7 +226,7 @@ impl ProtoArray { let finalized_index = *self .indices .get(&finalized_root) - .ok_or_else(|| Error::FinalizedNodeUnknown(finalized_root))?; + .ok_or(Error::FinalizedNodeUnknown(finalized_root))?; if finalized_index < self.prune_threshold { // Pruning at small numbers incurs more cost than benefit. @@ -240,7 +238,7 @@ impl ProtoArray { let root = &self .nodes .get(node_index) - .ok_or_else(|| Error::InvalidNodeIndex(node_index))? + .ok_or(Error::InvalidNodeIndex(node_index))? .root; self.indices.remove(root); } @@ -252,7 +250,7 @@ impl ProtoArray { for (_root, index) in self.indices.iter_mut() { *index = index .checked_sub(finalized_index) - .ok_or_else(|| Error::IndexOverflow("indices"))?; + .ok_or(Error::IndexOverflow("indices"))?; } // Iterate through all the existing nodes and adjust their indices to match the new layout @@ -266,14 +264,14 @@ impl ProtoArray { node.best_child = Some( best_child .checked_sub(finalized_index) - .ok_or_else(|| Error::IndexOverflow("best_child"))?, + .ok_or(Error::IndexOverflow("best_child"))?, ); } if let Some(best_descendant) = node.best_descendant { node.best_descendant = Some( best_descendant .checked_sub(finalized_index) - .ok_or_else(|| Error::IndexOverflow("best_descendant"))?, + .ok_or(Error::IndexOverflow("best_descendant"))?, ); } } @@ -301,12 +299,12 @@ impl ProtoArray { let child = self .nodes .get(child_index) - .ok_or_else(|| Error::InvalidNodeIndex(child_index))?; + .ok_or(Error::InvalidNodeIndex(child_index))?; let parent = self .nodes .get(parent_index) - .ok_or_else(|| Error::InvalidNodeIndex(parent_index))?; + .ok_or(Error::InvalidNodeIndex(parent_index))?; let child_leads_to_viable_head = self.node_leads_to_viable_head(&child)?; @@ -335,7 +333,7 @@ impl ProtoArray { let best_child = self .nodes .get(best_child_index) - .ok_or_else(|| Error::InvalidBestDescendant(best_child_index))?; + .ok_or(Error::InvalidBestDescendant(best_child_index))?; let best_child_leads_to_viable_head = self.node_leads_to_viable_head(&best_child)?; @@ -373,7 +371,7 @@ impl ProtoArray { let parent = self .nodes .get_mut(parent_index) - .ok_or_else(|| Error::InvalidNodeIndex(parent_index))?; + .ok_or(Error::InvalidNodeIndex(parent_index))?; parent.best_child = new_best_child; parent.best_descendant = new_best_descendant; @@ -389,7 +387,7 @@ impl ProtoArray { let best_descendant = self .nodes .get(best_descendant_index) - .ok_or_else(|| Error::InvalidBestDescendant(best_descendant_index))?; + .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; self.node_is_viable_for_head(best_descendant) } else { diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index e4cf5bbc6..252c6db89 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -287,14 +287,14 @@ fn compute_deltas( // If the validator was not included in the _old_ balances (i.e., it did not exist yet) // then say its balance was zero. - let old_balance = old_balances.get(val_index).copied().unwrap_or_else(|| 0); + let old_balance = old_balances.get(val_index).copied().unwrap_or(0); // If the validators vote is not known in the _new_ balances, then use a balance of zero. // // It is possible that there is a vote for an unknown validator if we change our justified // state to a new state with a higher epoch that is on a different fork because that fork may have // on-boarded less validators than the prior fork. - let new_balance = new_balances.get(val_index).copied().unwrap_or_else(|| 0); + let new_balance = new_balances.get(val_index).copied().unwrap_or(0); if vote.current_root != vote.next_root || old_balance != new_balance { // We ignore the vote if it is not known in `indices`. We assume that it is outside @@ -302,9 +302,9 @@ fn compute_deltas( if let Some(current_delta_index) = indices.get(&vote.current_root).copied() { let delta = deltas .get(current_delta_index) - .ok_or_else(|| Error::InvalidNodeDelta(current_delta_index))? + .ok_or(Error::InvalidNodeDelta(current_delta_index))? .checked_sub(old_balance as i64) - .ok_or_else(|| Error::DeltaOverflow(current_delta_index))?; + .ok_or(Error::DeltaOverflow(current_delta_index))?; // Array access safe due to check on previous line. deltas[current_delta_index] = delta; @@ -315,9 +315,9 @@ fn compute_deltas( if let Some(next_delta_index) = indices.get(&vote.next_root).copied() { let delta = deltas .get(next_delta_index) - .ok_or_else(|| Error::InvalidNodeDelta(next_delta_index))? + .ok_or(Error::InvalidNodeDelta(next_delta_index))? .checked_add(new_balance as i64) - .ok_or_else(|| Error::DeltaOverflow(next_delta_index))?; + .ok_or(Error::DeltaOverflow(next_delta_index))?; // Array access safe due to check on previous line. deltas[next_delta_index] = delta; diff --git a/consensus/serde_utils/src/hex.rs b/consensus/serde_utils/src/hex.rs index 79dfaa506..7ffa347e5 100644 --- a/consensus/serde_utils/src/hex.rs +++ b/consensus/serde_utils/src/hex.rs @@ -13,8 +13,8 @@ pub fn encode>(data: T) -> String { /// Decode `data` from a 0x-prefixed hex string. pub fn decode(s: &str) -> Result, String> { - if s.starts_with("0x") { - hex::decode(&s[2..]).map_err(|e| format!("invalid hex: {:?}", e)) + if let Some(stripped) = s.strip_prefix("0x") { + hex::decode(stripped).map_err(|e| format!("invalid hex: {:?}", e)) } else { Err("hex must have 0x prefix".to_string()) } @@ -33,8 +33,8 @@ impl<'de> Visitor<'de> for PrefixedHexVisitor { where E: de::Error, { - if value.starts_with("0x") { - Ok(hex::decode(&value[2..]) + if let Some(stripped) = value.strip_prefix("0x") { + Ok(hex::decode(stripped) .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))?) } else { Err(de::Error::custom("missing 0x prefix")) diff --git a/consensus/ssz/src/decode/impls.rs b/consensus/ssz/src/decode/impls.rs index e039e2d16..29c8c1550 100644 --- a/consensus/ssz/src/decode/impls.rs +++ b/consensus/ssz/src/decode/impls.rs @@ -450,7 +450,7 @@ pub fn decode_list_of_variable_length_items( bytes.get(start..offset) }; - let slice = slice_option.ok_or_else(|| DecodeError::OutOfBoundsByte { i: offset })?; + let slice = slice_option.ok_or(DecodeError::OutOfBoundsByte { i: offset })?; values.push(T::from_ssz_bytes(slice)?); } diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs index 09fa9fc2d..df9523c87 100644 --- a/consensus/ssz_types/src/bitfield.rs +++ b/consensus/ssz_types/src/bitfield.rs @@ -170,7 +170,7 @@ impl Bitfield> { let len = initial_bitfield .highest_set_bit() - .ok_or_else(|| Error::MissingLengthInformation)?; + .ok_or(Error::MissingLengthInformation)?; // The length bit should be in the last byte, or else it means we have too many bytes. if len / 8 + 1 != bytes_len { @@ -286,7 +286,7 @@ impl Bitfield { let byte = self .bytes .get_mut(i / 8) - .ok_or_else(|| Error::OutOfBounds { i, len })?; + .ok_or(Error::OutOfBounds { i, len })?; if value { *byte |= 1 << (i % 8) @@ -308,7 +308,7 @@ impl Bitfield { let byte = self .bytes .get(i / 8) - .ok_or_else(|| Error::OutOfBounds { i, len: self.len })?; + .ok_or(Error::OutOfBounds { i, len: self.len })?; Ok(*byte & 1 << (i % 8) > 0) } else { diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs index 35efbf996..01173e659 100644 --- a/consensus/ssz_types/src/fixed_vector.rs +++ b/consensus/ssz_types/src/fixed_vector.rs @@ -236,7 +236,7 @@ where let num_items = bytes .len() .checked_div(T::ssz_fixed_len()) - .ok_or_else(|| ssz::DecodeError::ZeroLengthItem)?; + .ok_or(ssz::DecodeError::ZeroLengthItem)?; if num_items != fixed_len { return Err(ssz::DecodeError::BytesInvalid(format!( diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs index 599e4f5b6..4cbef6743 100644 --- a/consensus/ssz_types/src/variable_list.rs +++ b/consensus/ssz_types/src/variable_list.rs @@ -236,7 +236,7 @@ where let num_items = bytes .len() .checked_div(T::ssz_fixed_len()) - .ok_or_else(|| ssz::DecodeError::ZeroLengthItem)?; + .ok_or(ssz::DecodeError::ZeroLengthItem)?; if num_items > max_len { return Err(ssz::DecodeError::BytesInvalid(format!( diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 857eca13a..e6b71b24e 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -206,8 +206,7 @@ where .attesting_indices .into_iter() .map(|&validator_idx| { - Ok(get_pubkey(validator_idx as usize) - .ok_or_else(|| Error::ValidatorUnknown(validator_idx))?) + Ok(get_pubkey(validator_idx as usize).ok_or(Error::ValidatorUnknown(validator_idx))?) }) .collect::>()?; @@ -241,8 +240,7 @@ where .attesting_indices .into_iter() .map(|&validator_idx| { - Ok(get_pubkey(validator_idx as usize) - .ok_or_else(|| Error::ValidatorUnknown(validator_idx))?) + Ok(get_pubkey(validator_idx as usize).ok_or(Error::ValidatorUnknown(validator_idx))?) }) .collect::>()?; @@ -355,8 +353,7 @@ where Ok(SignatureSet::single_pubkey( signature, - get_pubkey(validator_index as usize) - .ok_or_else(|| Error::ValidatorUnknown(validator_index))?, + get_pubkey(validator_index as usize).ok_or(Error::ValidatorUnknown(validator_index))?, message, )) } @@ -391,8 +388,7 @@ where Ok(SignatureSet::single_pubkey( signature, - get_pubkey(validator_index as usize) - .ok_or_else(|| Error::ValidatorUnknown(validator_index))?, + get_pubkey(validator_index as usize).ok_or(Error::ValidatorUnknown(validator_index))?, message, )) } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index fd2781f1b..f2642111e 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -884,7 +884,7 @@ impl BeaconState { self.eth1_data .deposit_count .checked_sub(self.eth1_deposit_index) - .ok_or_else(|| Error::InvalidDepositState { + .ok_or(Error::InvalidDepositState { deposit_count: self.eth1_data.deposit_count, deposit_index: self.eth1_deposit_index, }) diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 728c9cf02..0d435f168 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -56,7 +56,7 @@ impl CommitteeCache { &seed[..], false, ) - .ok_or_else(|| Error::UnableToShuffle)?; + .ok_or(Error::UnableToShuffle)?; // The use of `NonZeroUsize` reduces the maximum number of possible validators by one. if state.validators.len() == usize::max_value() { @@ -148,7 +148,7 @@ impl CommitteeCache { pub fn get_all_beacon_committees(&self) -> Result, Error> { let initialized_epoch = self .initialized_epoch - .ok_or_else(|| Error::CommitteeCacheUninitialized(None))?; + .ok_or(Error::CommitteeCacheUninitialized(None))?; initialized_epoch.slot_iter(self.slots_per_epoch).try_fold( Vec::with_capacity(self.slots_per_epoch as usize), diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index 6abc795a1..ddd85bb93 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -419,7 +419,7 @@ impl ParallelValidatorTreeHash { let validator = validators .get(val_index) - .ok_or_else(|| Error::TreeHashCacheInconsistent)?; + .ok_or(Error::TreeHashCacheInconsistent)?; validator .recalculate_tree_hash_root(arena, cache) diff --git a/crypto/bls/src/macros.rs b/crypto/bls/src/macros.rs index 136faeb44..a5fce70a9 100644 --- a/crypto/bls/src/macros.rs +++ b/crypto/bls/src/macros.rs @@ -95,8 +95,8 @@ macro_rules! impl_from_str { type Err = String; fn from_str(s: &str) -> Result { - if s.starts_with("0x") { - let bytes = hex::decode(&s[2..]).map_err(|e| e.to_string())?; + if let Some(stripped) = s.strip_prefix("0x") { + let bytes = hex::decode(stripped).map_err(|e| e.to_string())?; Self::deserialize(&bytes[..]).map_err(|e| format!("{:?}", e)) } else { Err("must start with 0x".to_string()) diff --git a/crypto/eth2_wallet/src/wallet.rs b/crypto/eth2_wallet/src/wallet.rs index 39ab816e1..694be44d4 100644 --- a/crypto/eth2_wallet/src/wallet.rs +++ b/crypto/eth2_wallet/src/wallet.rs @@ -202,7 +202,7 @@ impl Wallet { .json .nextaccount .checked_add(1) - .ok_or_else(|| Error::PathExhausted)?; + .ok_or(Error::PathExhausted)?; Ok(keystores) } diff --git a/lcli/src/change_genesis_time.rs b/lcli/src/change_genesis_time.rs index 3c9441851..94ee6b2cf 100644 --- a/lcli/src/change_genesis_time.rs +++ b/lcli/src/change_genesis_time.rs @@ -8,13 +8,13 @@ use types::{BeaconState, EthSpec}; pub fn run(matches: &ArgMatches) -> Result<(), String> { let path = matches .value_of("ssz-state") - .ok_or_else(|| "ssz-state not specified")? + .ok_or("ssz-state not specified")? .parse::() .map_err(|e| format!("Unable to parse ssz-state: {}", e))?; let genesis_time = matches .value_of("genesis-time") - .ok_or_else(|| "genesis-time not specified")? + .ok_or("genesis-time not specified")? .parse::() .map_err(|e| format!("Unable to parse genesis-time: {}", e))?; diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs index b110f63cf..97aefe560 100644 --- a/lcli/src/eth1_genesis.rs +++ b/lcli/src/eth1_genesis.rs @@ -24,7 +24,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches<'_>) -> Res let testnet_dir = matches .value_of("testnet-dir") - .ok_or_else(|| ()) + .ok_or(()) .and_then(|dir| dir.parse::().map_err(|_| ())) .unwrap_or_else(|_| { dirs::home_dir() @@ -37,7 +37,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches<'_>) -> Res let spec = eth2_testnet_config .yaml_config .as_ref() - .ok_or_else(|| "The testnet directory must contain a spec config".to_string())? + .ok_or("The testnet directory must contain a spec config")? .apply_to_chain_spec::(&env.core_context().eth2_config.spec) .ok_or_else(|| { format!( diff --git a/lcli/src/interop_genesis.rs b/lcli/src/interop_genesis.rs index 68414c856..4aed06b3d 100644 --- a/lcli/src/interop_genesis.rs +++ b/lcli/src/interop_genesis.rs @@ -11,7 +11,7 @@ use types::{test_utils::generate_deterministic_keypairs, EthSpec}; pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { let validator_count = matches .value_of("validator-count") - .ok_or_else(|| "validator-count not specified")? + .ok_or("validator-count not specified")? .parse::() .map_err(|e| format!("Unable to parse validator-count: {}", e))?; @@ -28,7 +28,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< let testnet_dir = matches .value_of("testnet-dir") - .ok_or_else(|| ()) + .ok_or(()) .and_then(|dir| dir.parse::().map_err(|_| ())) .unwrap_or_else(|_| { dirs::home_dir() @@ -41,7 +41,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< let mut spec = eth2_testnet_config .yaml_config .as_ref() - .ok_or_else(|| "The testnet directory must contain a spec config".to_string())? + .ok_or("The testnet directory must contain a spec config")? .apply_to_chain_spec::(&env.core_context().eth2_config.spec) .ok_or_else(|| { format!( diff --git a/lcli/src/parse_hex.rs b/lcli/src/parse_hex.rs index 6c120c033..992cbf91d 100644 --- a/lcli/src/parse_hex.rs +++ b/lcli/src/parse_hex.rs @@ -4,12 +4,10 @@ use ssz::Decode; use types::{BeaconBlock, BeaconState, EthSpec}; pub fn run_parse_hex(matches: &ArgMatches) -> Result<(), String> { - let type_str = matches - .value_of("type") - .ok_or_else(|| "No type supplied".to_string())?; + let type_str = matches.value_of("type").ok_or("No type supplied")?; let mut hex: String = matches .value_of("hex_ssz") - .ok_or_else(|| "No hex ssz supplied".to_string())? + .ok_or("No hex ssz supplied")? .to_string(); if hex.starts_with("0x") { diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs index a6359c8b0..aba94beb0 100644 --- a/lcli/src/parse_ssz.rs +++ b/lcli/src/parse_ssz.rs @@ -9,7 +9,7 @@ use types::{EthSpec, SignedBeaconBlock}; pub fn run(matches: &ArgMatches) -> Result<(), String> { let type_str = matches .value_of("type") - .ok_or_else(|| "No type supplied".to_string())?; + .ok_or("No type supplied")?; let path = parse_path(matches, "path")?; info!("Type: {:?}", type_str); diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index d048cdea3..7ca0891dc 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -10,19 +10,19 @@ use types::{BeaconState, EthSpec}; pub fn run(matches: &ArgMatches) -> Result<(), String> { let pre_state_path = matches .value_of("pre-state") - .ok_or_else(|| "No pre-state file supplied".to_string())? + .ok_or("No pre-state file supplied")? .parse::() .map_err(|e| format!("Failed to parse pre-state path: {}", e))?; let slots = matches .value_of("slots") - .ok_or_else(|| "No slots supplied".to_string())? + .ok_or("No slots supplied")? .parse::() .map_err(|e| format!("Failed to parse slots: {}", e))?; let output_path = matches .value_of("output") - .ok_or_else(|| "No output file supplied".to_string())? + .ok_or("No output file supplied")? .parse::() .map_err(|e| format!("Failed to parse output path: {}", e))?; diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 073cf83d4..e6845cb08 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -9,19 +9,19 @@ use types::{BeaconState, EthSpec, SignedBeaconBlock}; pub fn run_transition_blocks(matches: &ArgMatches) -> Result<(), String> { let pre_state_path = matches .value_of("pre-state") - .ok_or_else(|| "No pre-state file supplied".to_string())? + .ok_or("No pre-state file supplied")? .parse::() .map_err(|e| format!("Failed to parse pre-state path: {}", e))?; let block_path = matches .value_of("block") - .ok_or_else(|| "No block file supplied".to_string())? + .ok_or("No block file supplied")? .parse::() .map_err(|e| format!("Failed to parse block path: {}", e))?; let output_path = matches .value_of("output") - .ok_or_else(|| "No output file supplied".to_string())? + .ok_or("No output file supplied")? .parse::() .map_err(|e| format!("Failed to parse output path: {}", e))?; diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 64f2a916e..f892d2b61 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -161,9 +161,9 @@ impl EnvironmentBuilder { .as_secs(); let file_stem = path .file_stem() - .ok_or_else(|| "Invalid file name".to_string())? + .ok_or("Invalid file name")? .to_str() - .ok_or_else(|| "Failed to create str from filename".to_string())?; + .ok_or("Failed to create str from filename")?; let file_ext = path.extension().unwrap_or_else(|| OsStr::new("")); let backup_name = format!("{}_backup_{}", file_stem, timestamp); let backup_path = path.with_file_name(backup_name).with_extension(file_ext); @@ -229,7 +229,7 @@ impl EnvironmentBuilder { self.eth2_config.spec = eth2_testnet_config .yaml_config .as_ref() - .ok_or_else(|| "The testnet directory must contain a spec config".to_string())? + .ok_or("The testnet directory must contain a spec config")? .apply_to_chain_spec::(&self.eth2_config.spec) .ok_or_else(|| { format!( @@ -262,14 +262,12 @@ impl EnvironmentBuilder { Ok(Environment { runtime: self .runtime - .ok_or_else(|| "Cannot build environment without runtime".to_string())?, + .ok_or("Cannot build environment without runtime")?, signal_tx, signal_rx: Some(signal_rx), signal: Some(signal), exit, - log: self - .log - .ok_or_else(|| "Cannot build environment without log".to_string())?, + log: self.log.ok_or("Cannot build environment without log")?, eth_spec_instance: self.eth_spec_instance, eth2_config: self.eth2_config, testnet: self.testnet, diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index d8148f2e2..251e25aea 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -200,7 +200,7 @@ fn run( let debug_level = matches .value_of("debug-level") - .ok_or_else(|| "Expected --debug-level flag".to_string())?; + .ok_or("Expected --debug-level flag")?; let log_format = matches.value_of("log-format"); diff --git a/slasher/src/array.rs b/slasher/src/array.rs index 6a19dbdc3..eff7518b6 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -47,7 +47,7 @@ impl Chunk { self.data .get(cell_index) .map(|distance| epoch + u64::from(*distance)) - .ok_or_else(|| Error::ChunkIndexOutOfBounds(cell_index)) + .ok_or(Error::ChunkIndexOutOfBounds(cell_index)) } pub fn set_target( @@ -75,7 +75,7 @@ impl Chunk { let cell = self .data .get_mut(cell_index) - .ok_or_else(|| Error::ChunkIndexOutOfBounds(cell_index))?; + .ok_or(Error::ChunkIndexOutOfBounds(cell_index))?; *cell = target_distance; Ok(()) } diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 26eac835f..cc95a90c5 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -342,7 +342,7 @@ impl SlasherDB { let bytes = txn .get(self.indexed_attestation_db, &key) .optional()? - .ok_or_else(|| Error::MissingIndexedAttestation { + .ok_or(Error::MissingIndexedAttestation { root: indexed_attestation_hash, })?; Ok(IndexedAttestation::from_ssz_bytes(bytes)?) @@ -400,7 +400,7 @@ impl SlasherDB { ) -> Result, Error> { let record = self .get_attester_record(txn, validator_index, target_epoch)? - .ok_or_else(|| Error::MissingAttesterRecord { + .ok_or(Error::MissingAttesterRecord { validator_index, target_epoch, })?; @@ -512,7 +512,7 @@ impl SlasherDB { let key_bytes = cursor .get(None, None, lmdb_sys::MDB_GET_CURRENT)? .0 - .ok_or_else(|| Error::MissingProposerKey)?; + .ok_or(Error::MissingProposerKey)?; let (slot, _) = ProposerKey::parse(key_bytes)?; if slot < min_slot { @@ -558,7 +558,7 @@ impl SlasherDB { let key_bytes = cursor .get(None, None, lmdb_sys::MDB_GET_CURRENT)? .0 - .ok_or_else(|| Error::MissingAttesterKey)?; + .ok_or(Error::MissingAttesterKey)?; let (target_epoch, _) = AttesterKey::parse(key_bytes)?; @@ -605,7 +605,7 @@ impl SlasherDB { let key_bytes = cursor .get(None, None, lmdb_sys::MDB_GET_CURRENT)? .0 - .ok_or_else(|| Error::MissingAttesterKey)?; + .ok_or(Error::MissingAttesterKey)?; let (target_epoch, _) = IndexedAttestationKey::parse(key_bytes)?; diff --git a/testing/eth1_test_rig/src/ganache.rs b/testing/eth1_test_rig/src/ganache.rs index d044e5131..bd8b4c519 100644 --- a/testing/eth1_test_rig/src/ganache.rs +++ b/testing/eth1_test_rig/src/ganache.rs @@ -35,7 +35,7 @@ impl GanacheInstance { ) -> Result { let stdout = child .stdout - .ok_or_else(|| "Unable to get stdout for ganache child process")?; + .ok_or("Unable to get stdout for ganache child process")?; let start = Instant::now(); let mut reader = BufReader::new(stdout); diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 4b1b0bec4..20e9c8ba9 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -64,7 +64,7 @@ impl LocalBeaconNode { let listen_addr = self .client .http_api_listen_addr() - .ok_or_else(|| "A remote beacon node must have a http server".to_string())?; + .ok_or("A remote beacon node must have a http server")?; let beacon_node_url: Url = format!("http://{}:{}", listen_addr.ip(), listen_addr.port()) .parse() diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index e9931c164..794b59e38 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -69,19 +69,19 @@ impl AttestationServiceBuilder { inner: Arc::new(Inner { duties_service: self .duties_service - .ok_or_else(|| "Cannot build AttestationService without duties_service")?, + .ok_or("Cannot build AttestationService without duties_service")?, validator_store: self .validator_store - .ok_or_else(|| "Cannot build AttestationService without validator_store")?, + .ok_or("Cannot build AttestationService without validator_store")?, slot_clock: self .slot_clock - .ok_or_else(|| "Cannot build AttestationService without slot_clock")?, + .ok_or("Cannot build AttestationService without slot_clock")?, beacon_node: self .beacon_node - .ok_or_else(|| "Cannot build AttestationService without beacon_node")?, + .ok_or("Cannot build AttestationService without beacon_node")?, context: self .context - .ok_or_else(|| "Cannot build AttestationService without runtime_context")?, + .ok_or("Cannot build AttestationService without runtime_context")?, }), }) } @@ -130,7 +130,7 @@ impl AttestationService { let duration_to_next_slot = self .slot_clock .duration_to_next_slot() - .ok_or_else(|| "Unable to determine duration to next slot".to_string())?; + .ok_or("Unable to determine duration to next slot")?; info!( log, @@ -174,14 +174,11 @@ impl AttestationService { /// For each each required attestation, spawn a new task that downloads, signs and uploads the /// attestation to the beacon node. fn spawn_attestation_tasks(&self, slot_duration: Duration) -> Result<(), String> { - let slot = self - .slot_clock - .now() - .ok_or_else(|| "Failed to read slot clock".to_string())?; + let slot = self.slot_clock.now().ok_or("Failed to read slot clock")?; let duration_to_next_slot = self .slot_clock .duration_to_next_slot() - .ok_or_else(|| "Unable to determine duration to next slot".to_string())?; + .ok_or("Unable to determine duration to next slot")?; // If a validator needs to publish an aggregate attestation, they must do so at 2/3 // through the slot. This delay triggers at this time @@ -336,7 +333,7 @@ impl AttestationService { let current_epoch = self .slot_clock .now() - .ok_or_else(|| "Unable to determine current slot from clock".to_string())? + .ok_or("Unable to determine current slot from clock")? .epoch(E::slots_per_epoch()); let attestation_data = self diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 324d34ba6..d34947a13 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -59,16 +59,16 @@ impl BlockServiceBuilder { inner: Arc::new(Inner { validator_store: self .validator_store - .ok_or_else(|| "Cannot build BlockService without validator_store")?, + .ok_or("Cannot build BlockService without validator_store")?, slot_clock: self .slot_clock - .ok_or_else(|| "Cannot build BlockService without slot_clock")?, + .ok_or("Cannot build BlockService without slot_clock")?, beacon_node: self .beacon_node - .ok_or_else(|| "Cannot build BlockService without beacon_node")?, + .ok_or("Cannot build BlockService without beacon_node")?, context: self .context - .ok_or_else(|| "Cannot build BlockService without runtime_context")?, + .ok_or("Cannot build BlockService without runtime_context")?, graffiti: self.graffiti, }), }) @@ -217,12 +217,12 @@ impl BlockService { let current_slot = self .slot_clock .now() - .ok_or_else(|| "Unable to determine current slot from clock".to_string())?; + .ok_or("Unable to determine current slot from clock")?; let randao_reveal = self .validator_store .randao_reveal(&validator_pubkey, slot.epoch(E::slots_per_epoch())) - .ok_or_else(|| "Unable to produce randao reveal".to_string())?; + .ok_or("Unable to produce randao reveal")?; let block = self .beacon_node @@ -234,7 +234,7 @@ impl BlockService { let signed_block = self .validator_store .sign_block(&validator_pubkey, block, current_slot) - .ok_or_else(|| "Unable to sign block".to_string())?; + .ok_or("Unable to sign block")?; self.beacon_node .post_beacon_blocks(&signed_block) diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 781a6f5fb..52d47d3fe 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -57,7 +57,7 @@ impl DutyAndProof { let selection_proof = validator_store .produce_selection_proof(&self.duty.validator_pubkey, slot) - .ok_or_else(|| "Failed to produce selection proof".to_string())?; + .ok_or("Failed to produce selection proof")?; self.selection_proof = selection_proof .is_aggregator(committee_length, spec) @@ -375,16 +375,16 @@ impl DutiesServiceBuilder { store: Arc::new(DutiesStore::default()), validator_store: self .validator_store - .ok_or_else(|| "Cannot build DutiesService without validator_store")?, + .ok_or("Cannot build DutiesService without validator_store")?, slot_clock: self .slot_clock - .ok_or_else(|| "Cannot build DutiesService without slot_clock")?, + .ok_or("Cannot build DutiesService without slot_clock")?, beacon_node: self .beacon_node - .ok_or_else(|| "Cannot build DutiesService without beacon_node")?, + .ok_or("Cannot build DutiesService without beacon_node")?, context: self .context - .ok_or_else(|| "Cannot build DutiesService without runtime_context")?, + .ok_or("Cannot build DutiesService without runtime_context")?, allow_unsynced_beacon_node: self.allow_unsynced_beacon_node, }), }) @@ -466,7 +466,7 @@ impl DutiesService { let duration_to_next_slot = self .slot_clock .duration_to_next_slot() - .ok_or_else(|| "Unable to determine duration to next slot".to_string())?; + .ok_or("Unable to determine duration to next slot")?; let mut interval = { let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); diff --git a/validator_client/src/fork_service.rs b/validator_client/src/fork_service.rs index 0fca6fb64..27149892a 100644 --- a/validator_client/src/fork_service.rs +++ b/validator_client/src/fork_service.rs @@ -54,13 +54,13 @@ impl ForkServiceBuilder { fork: RwLock::new(self.fork), slot_clock: self .slot_clock - .ok_or_else(|| "Cannot build ForkService without slot_clock")?, + .ok_or("Cannot build ForkService without slot_clock")?, beacon_node: self .beacon_node - .ok_or_else(|| "Cannot build ForkService without beacon_node")?, + .ok_or("Cannot build ForkService without beacon_node")?, log: self .log - .ok_or_else(|| "Cannot build ForkService without logger")? + .ok_or("Cannot build ForkService without logger")? .clone(), }), }) @@ -131,7 +131,7 @@ impl ForkService { let duration_to_next_epoch = self .slot_clock .duration_to_next_epoch(E::slots_per_epoch()) - .ok_or_else(|| "Unable to determine duration to next epoch".to_string())?; + .ok_or("Unable to determine duration to next epoch")?; let mut interval = { let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); diff --git a/validator_client/src/http_api/api_secret.rs b/validator_client/src/http_api/api_secret.rs index a3aa5f0b9..7f2a81d19 100644 --- a/validator_client/src/http_api/api_secret.rs +++ b/validator_client/src/http_api/api_secret.rs @@ -98,8 +98,8 @@ impl ApiSecret { .and_then(|bytes| { let hex = String::from_utf8(bytes).map_err(|_| format!("{} is not utf8", SK_FILENAME))?; - if hex.starts_with(PK_PREFIX) { - serde_utils::hex::decode(&hex[PK_PREFIX.len()..]) + if let Some(stripped) = hex.strip_prefix(PK_PREFIX) { + serde_utils::hex::decode(stripped) .map_err(|_| format!("{} should be 0x-prefixed hex", SK_FILENAME)) } else { Err(format!("unable to parse {}", SK_FILENAME)) diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 47e8c6763..f36b2b117 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -490,8 +490,8 @@ impl InitializedValidators { // Create a lock file for the cache let key_cache_path = KeyCache::cache_file_path(&self.validators_dir); - let cache_lockfile_path = get_lockfile_path(&key_cache_path) - .ok_or_else(|| Error::BadKeyCachePath(key_cache_path))?; + let cache_lockfile_path = + get_lockfile_path(&key_cache_path).ok_or(Error::BadKeyCachePath(key_cache_path))?; let _cache_lockfile = Lockfile::new(cache_lockfile_path)?; let cache = diff --git a/validator_client/src/notifier.rs b/validator_client/src/notifier.rs index c997979b9..944537d17 100644 --- a/validator_client/src/notifier.rs +++ b/validator_client/src/notifier.rs @@ -16,7 +16,7 @@ pub fn spawn_notifier(client: &ProductionValidatorClient) -> Resu let duration_to_next_slot = duties_service .slot_clock .duration_to_next_slot() - .ok_or_else(|| "slot_notifier unable to determine time to next slot")?; + .ok_or("slot_notifier unable to determine time to next slot")?; // Run the notifier half way through each slot. let start_instant = Instant::now() + duration_to_next_slot + (slot_duration / 2);