Rust 1.54.0 lints (#2483)
## Issue Addressed N/A ## Proposed Changes - Removing a bunch of unnecessary references - Updated `Error::VariantError` to `Error::Variant` - There were additional enum variant lints that I ignored, because I thought our variant names were fine - removed `MonitoredValidator`'s `pubkey` field, because I couldn't find it used anywhere. It looks like we just use the string version of the pubkey (the `id` field) if there is no index ## Additional Info Co-authored-by: realbigsean <seananderson33@gmail.com>
This commit is contained in:
parent
8efd9fc324
commit
303deb9969
@ -19,7 +19,7 @@ pub fn read_mnemonic_from_cli(
|
||||
.map_err(|e| format!("Unable to read {:?}: {:?}", path, e))
|
||||
.and_then(|bytes| {
|
||||
let bytes_no_newlines: PlainText = strip_off_newlines(bytes).into();
|
||||
let phrase = from_utf8(&bytes_no_newlines.as_ref())
|
||||
let phrase = from_utf8(bytes_no_newlines.as_ref())
|
||||
.map_err(|e| format!("Unable to derive mnemonic: {:?}", e))?;
|
||||
Mnemonic::from_phrase(phrase, Language::English).map_err(|e| {
|
||||
format!(
|
||||
|
@ -51,7 +51,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.long(BEACON_SERVER_FLAG)
|
||||
.value_name("NETWORK_ADDRESS")
|
||||
.help("Address to a beacon node HTTP API")
|
||||
.default_value(&DEFAULT_BEACON_NODE)
|
||||
.default_value(DEFAULT_BEACON_NODE)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
|
@ -87,8 +87,8 @@ pub fn cli_run<T: EthSpec>(
|
||||
|
||||
match matches.subcommand() {
|
||||
(IMPORT_CMD, Some(matches)) => {
|
||||
let import_filename: PathBuf = clap_utils::parse_required(&matches, IMPORT_FILE_ARG)?;
|
||||
let minify: bool = clap_utils::parse_required(&matches, MINIFY_FLAG)?;
|
||||
let import_filename: PathBuf = clap_utils::parse_required(matches, IMPORT_FILE_ARG)?;
|
||||
let minify: bool = clap_utils::parse_required(matches, MINIFY_FLAG)?;
|
||||
let import_file = File::open(&import_filename).map_err(|e| {
|
||||
format!(
|
||||
"Unable to open import file at {}: {:?}",
|
||||
@ -199,8 +199,8 @@ pub fn cli_run<T: EthSpec>(
|
||||
Ok(())
|
||||
}
|
||||
(EXPORT_CMD, Some(matches)) => {
|
||||
let export_filename: PathBuf = clap_utils::parse_required(&matches, EXPORT_FILE_ARG)?;
|
||||
let minify: bool = clap_utils::parse_required(&matches, MINIFY_FLAG)?;
|
||||
let export_filename: PathBuf = clap_utils::parse_required(matches, EXPORT_FILE_ARG)?;
|
||||
let minify: bool = clap_utils::parse_required(matches, MINIFY_FLAG)?;
|
||||
|
||||
if !slashing_protection_db_path.exists() {
|
||||
return Err(format!(
|
||||
|
@ -114,7 +114,7 @@ pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), Str
|
||||
Language::English,
|
||||
);
|
||||
|
||||
let wallet = create_wallet_from_mnemonic(matches, &wallet_base_dir.as_path(), &mnemonic)?;
|
||||
let wallet = create_wallet_from_mnemonic(matches, wallet_base_dir.as_path(), &mnemonic)?;
|
||||
|
||||
if let Some(path) = mnemonic_output_path {
|
||||
create_with_600_perms(&path, mnemonic.phrase().as_bytes())
|
||||
@ -168,7 +168,7 @@ pub fn create_wallet_from_mnemonic(
|
||||
if !path.exists() {
|
||||
// To prevent users from accidentally supplying their password to the PASSWORD_FLAG and
|
||||
// create a file with that name, we require that the password has a .pass suffix.
|
||||
if path.extension() != Some(&OsStr::new("pass")) {
|
||||
if path.extension() != Some(OsStr::new("pass")) {
|
||||
return Err(format!(
|
||||
"Only creates a password file if that file ends in .pass: {:?}",
|
||||
path
|
||||
@ -189,7 +189,7 @@ pub fn create_wallet_from_mnemonic(
|
||||
.create_wallet(
|
||||
wallet_name,
|
||||
wallet_type,
|
||||
&mnemonic,
|
||||
mnemonic,
|
||||
wallet_password.as_bytes(),
|
||||
)
|
||||
.map_err(|e| format!("Unable to create wallet: {:?}", e))?;
|
||||
|
@ -71,7 +71,7 @@ pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), Str
|
||||
|
||||
let mnemonic = read_mnemonic_from_cli(mnemonic_path, stdin_inputs)?;
|
||||
|
||||
let wallet = create_wallet_from_mnemonic(matches, &wallet_base_dir.as_path(), &mnemonic)
|
||||
let wallet = create_wallet_from_mnemonic(matches, wallet_base_dir.as_path(), &mnemonic)
|
||||
.map_err(|e| format!("Unable to create wallet: {:?}", e))?;
|
||||
|
||||
println!("Your wallet has been successfully recovered.");
|
||||
|
@ -448,7 +448,7 @@ impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> {
|
||||
//
|
||||
// Attestations must be for a known block. If the block is unknown, we simply drop the
|
||||
// attestation and do not delay consideration for later.
|
||||
let head_block = verify_head_block_is_known(chain, &attestation, None)?;
|
||||
let head_block = verify_head_block_is_known(chain, attestation, None)?;
|
||||
|
||||
// Check the attestation target root is consistent with the head root.
|
||||
//
|
||||
@ -457,7 +457,7 @@ impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> {
|
||||
//
|
||||
// Whilst this attestation *technically* could be used to add value to a block, it is
|
||||
// invalid in the spirit of the protocol. Here we choose safety over profit.
|
||||
verify_attestation_target_root::<T::EthSpec>(&head_block, &attestation)?;
|
||||
verify_attestation_target_root::<T::EthSpec>(&head_block, attestation)?;
|
||||
|
||||
// Ensure that the attestation has participants.
|
||||
if attestation.aggregation_bits.is_zero() {
|
||||
@ -628,7 +628,7 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
||||
// MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
|
||||
//
|
||||
// We do not queue future attestations for later processing.
|
||||
verify_propagation_slot_range(chain, &attestation)?;
|
||||
verify_propagation_slot_range(chain, attestation)?;
|
||||
|
||||
// Check to ensure that the attestation is "unaggregated". I.e., it has exactly one
|
||||
// aggregation bit set.
|
||||
@ -642,10 +642,10 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
||||
//
|
||||
// Enforce a maximum skip distance for unaggregated attestations.
|
||||
let head_block =
|
||||
verify_head_block_is_known(chain, &attestation, chain.config.import_max_skip_slots)?;
|
||||
verify_head_block_is_known(chain, attestation, chain.config.import_max_skip_slots)?;
|
||||
|
||||
// Check the attestation target root is consistent with the head root.
|
||||
verify_attestation_target_root::<T::EthSpec>(&head_block, &attestation)?;
|
||||
verify_attestation_target_root::<T::EthSpec>(&head_block, attestation)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -927,7 +927,7 @@ pub fn verify_attestation_signature<T: BeaconChainTypes>(
|
||||
let signature_set = indexed_attestation_signature_set_from_pubkeys(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
&indexed_attestation.signature,
|
||||
&indexed_attestation,
|
||||
indexed_attestation,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
@ -1031,7 +1031,7 @@ pub fn verify_signed_aggregate_signatures<T: BeaconChainTypes>(
|
||||
let signature_sets = vec![
|
||||
signed_aggregate_selection_proof_signature_set(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
&signed_aggregate,
|
||||
signed_aggregate,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
@ -1039,7 +1039,7 @@ pub fn verify_signed_aggregate_signatures<T: BeaconChainTypes>(
|
||||
.map_err(BeaconChainError::SignatureSetError)?,
|
||||
signed_aggregate_signature_set(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
&signed_aggregate,
|
||||
signed_aggregate,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
@ -1048,7 +1048,7 @@ pub fn verify_signed_aggregate_signatures<T: BeaconChainTypes>(
|
||||
indexed_attestation_signature_set_from_pubkeys(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
&indexed_attestation.signature,
|
||||
&indexed_attestation,
|
||||
indexed_attestation,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
@ -1069,7 +1069,7 @@ fn obtain_indexed_attestation_and_committees_per_slot<T: BeaconChainTypes>(
|
||||
attestation: &Attestation<T::EthSpec>,
|
||||
) -> Result<(IndexedAttestation<T::EthSpec>, CommitteesPerSlot), Error> {
|
||||
map_attestation_committee(chain, attestation, |(committee, committees_per_slot)| {
|
||||
get_indexed_attestation(committee.committee, &attestation)
|
||||
get_indexed_attestation(committee.committee, attestation)
|
||||
.map(|attestation| (attestation, committees_per_slot))
|
||||
.map_err(Error::Invalid)
|
||||
})
|
||||
|
@ -1372,7 +1372,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
attester_cache_key,
|
||||
request_slot,
|
||||
request_index,
|
||||
&self,
|
||||
self,
|
||||
)?
|
||||
};
|
||||
drop(cache_timer);
|
||||
@ -1729,7 +1729,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self.shuffling_is_compatible(
|
||||
&att.data.beacon_block_root,
|
||||
att.data.target.epoch,
|
||||
&state,
|
||||
state,
|
||||
)
|
||||
})
|
||||
}
|
||||
@ -2182,8 +2182,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
for attestation in signed_block.message().body().attestations() {
|
||||
let committee =
|
||||
state.get_beacon_committee(attestation.data.slot, attestation.data.index)?;
|
||||
let indexed_attestation =
|
||||
get_indexed_attestation(&committee.committee, attestation)
|
||||
let indexed_attestation = get_indexed_attestation(committee.committee, attestation)
|
||||
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
|
||||
slasher.accept_attestation(indexed_attestation);
|
||||
}
|
||||
@ -3267,7 +3266,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
metrics::stop_timer(committee_building_timer);
|
||||
|
||||
map_fn(&committee_cache, shuffling_decision_block)
|
||||
map_fn(committee_cache, shuffling_decision_block)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -278,6 +278,7 @@ impl<T: EthSpec> From<DBError> for BlockError<T> {
|
||||
}
|
||||
|
||||
/// Information about invalid blocks which might still be slashable despite being invalid.
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
pub enum BlockSlashInfo<TErr> {
|
||||
/// The block is invalid, but its proposer signature wasn't checked.
|
||||
SignatureNotChecked(SignedBeaconBlockHeader, TErr),
|
||||
@ -837,7 +838,7 @@ impl<T: BeaconChainTypes> IntoFullyVerifiedBlock<T> for SignedBeaconBlock<T::Eth
|
||||
}
|
||||
|
||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||
&self
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
@ -996,7 +997,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> {
|
||||
for (i, summary) in summaries.iter().enumerate() {
|
||||
let epoch = state.current_epoch() - Epoch::from(summaries.len() - i);
|
||||
if let Err(e) =
|
||||
validator_monitor.process_validator_statuses(epoch, &summary, &chain.spec)
|
||||
validator_monitor.process_validator_statuses(epoch, summary, &chain.spec)
|
||||
{
|
||||
error!(
|
||||
chain.log,
|
||||
@ -1204,7 +1205,7 @@ pub fn check_block_relevancy<T: BeaconChainTypes>(
|
||||
// Do not process a block from a finalized slot.
|
||||
check_block_against_finalized_slot(block, chain)?;
|
||||
|
||||
let block_root = block_root.unwrap_or_else(|| get_block_root(&signed_block));
|
||||
let block_root = block_root.unwrap_or_else(|| get_block_root(signed_block));
|
||||
|
||||
// Check if the block is already known. We know it is post-finalization, so it is
|
||||
// sufficient to check the fork choice.
|
||||
|
@ -666,7 +666,7 @@ fn genesis_block<T: EthSpec>(
|
||||
genesis_state: &mut BeaconState<T>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<SignedBeaconBlock<T>, String> {
|
||||
let mut genesis_block = BeaconBlock::empty(&spec);
|
||||
let mut genesis_block = BeaconBlock::empty(spec);
|
||||
*genesis_block.state_root_mut() = genesis_state
|
||||
.update_tree_hash_cache()
|
||||
.map_err(|e| format!("Error hashing genesis state: {:?}", e))?;
|
||||
|
@ -762,7 +762,7 @@ mod test {
|
||||
"test should not use dummy backend"
|
||||
);
|
||||
|
||||
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), &spec);
|
||||
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), spec);
|
||||
*state.eth1_deposit_index_mut() = 0;
|
||||
state.eth1_data_mut().deposit_count = 0;
|
||||
|
||||
@ -815,7 +815,7 @@ mod test {
|
||||
"cache should store all logs"
|
||||
);
|
||||
|
||||
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), &spec);
|
||||
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), spec);
|
||||
*state.eth1_deposit_index_mut() = 0;
|
||||
state.eth1_data_mut().deposit_count = 0;
|
||||
|
||||
@ -877,10 +877,10 @@ mod test {
|
||||
"test should not use dummy backend"
|
||||
);
|
||||
|
||||
let state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), &spec);
|
||||
let state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), spec);
|
||||
|
||||
let a = eth1_chain
|
||||
.eth1_data_for_block_production(&state, &spec)
|
||||
.eth1_data_for_block_production(&state, spec)
|
||||
.expect("should produce default eth1 data vote");
|
||||
assert_eq!(
|
||||
a,
|
||||
@ -902,11 +902,11 @@ mod test {
|
||||
"test should not use dummy backend"
|
||||
);
|
||||
|
||||
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), &spec);
|
||||
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), spec);
|
||||
|
||||
*state.slot_mut() = Slot::from(slots_per_eth1_voting_period * 10);
|
||||
let follow_distance_seconds = eth1_follow_distance * spec.seconds_per_eth1_block;
|
||||
let voting_period_start = get_voting_period_start_seconds(&state, &spec);
|
||||
let voting_period_start = get_voting_period_start_seconds(&state, spec);
|
||||
let start_eth1_block = voting_period_start - follow_distance_seconds * 2;
|
||||
let end_eth1_block = voting_period_start - follow_distance_seconds;
|
||||
|
||||
@ -926,7 +926,7 @@ mod test {
|
||||
});
|
||||
|
||||
let vote = eth1_chain
|
||||
.eth1_data_for_block_production(&state, &spec)
|
||||
.eth1_data_for_block_production(&state, spec)
|
||||
.expect("should produce default eth1 data vote");
|
||||
|
||||
assert_eq!(
|
||||
@ -956,7 +956,7 @@ mod test {
|
||||
get_votes_to_consider(
|
||||
blocks.iter(),
|
||||
get_voting_period_start_seconds(&state, spec),
|
||||
&spec,
|
||||
spec,
|
||||
),
|
||||
HashMap::new()
|
||||
);
|
||||
|
@ -574,7 +574,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn key_from_sync_contribution(a: &SyncCommitteeContribution<E>) -> SyncContributionData {
|
||||
SyncContributionData::from_contribution(&a)
|
||||
SyncContributionData::from_contribution(a)
|
||||
}
|
||||
|
||||
macro_rules! test_suite {
|
||||
|
@ -573,7 +573,7 @@ pub fn verify_signed_aggregate_signatures<T: BeaconChainTypes>(
|
||||
let signature_sets = vec![
|
||||
signed_sync_aggregate_selection_proof_signature_set(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
&signed_aggregate,
|
||||
signed_aggregate,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
@ -581,7 +581,7 @@ pub fn verify_signed_aggregate_signatures<T: BeaconChainTypes>(
|
||||
.map_err(BeaconChainError::SignatureSetError)?,
|
||||
signed_sync_aggregate_signature_set(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
&signed_aggregate,
|
||||
signed_aggregate,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
|
@ -698,8 +698,8 @@ where
|
||||
slot: Slot,
|
||||
) -> HarnessAttestations<E> {
|
||||
let unaggregated_attestations = self.make_unaggregated_attestations(
|
||||
&attesting_validators,
|
||||
&state,
|
||||
attesting_validators,
|
||||
state,
|
||||
state_root,
|
||||
block_hash,
|
||||
slot,
|
||||
@ -785,7 +785,7 @@ where
|
||||
relative_sync_committee: RelativeSyncCommittee,
|
||||
) -> HarnessSyncContributions<E> {
|
||||
let sync_messages =
|
||||
self.make_sync_committee_messages(&state, block_hash, slot, relative_sync_committee);
|
||||
self.make_sync_committee_messages(state, block_hash, slot, relative_sync_committee);
|
||||
|
||||
let sync_contributions: Vec<Option<SignedContributionAndProof<E>>> = sync_messages
|
||||
.iter()
|
||||
@ -825,7 +825,7 @@ where
|
||||
})?;
|
||||
|
||||
let default = SyncCommitteeContribution::from_message(
|
||||
&sync_message,
|
||||
sync_message,
|
||||
subnet_id as u64,
|
||||
*subcommittee_position,
|
||||
)
|
||||
@ -989,7 +989,7 @@ where
|
||||
let mut signed_block_headers = vec![block_header_1, block_header_2]
|
||||
.into_iter()
|
||||
.map(|block_header| {
|
||||
block_header.sign::<E>(&sk, &fork, genesis_validators_root, &self.chain.spec)
|
||||
block_header.sign::<E>(sk, &fork, genesis_validators_root, &self.chain.spec)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
@ -1199,7 +1199,7 @@ where
|
||||
validators: &[usize],
|
||||
) {
|
||||
let attestations =
|
||||
self.make_attestations(validators, &state, state_root, block_hash, block.slot());
|
||||
self.make_attestations(validators, state, state_root, block_hash, block.slot());
|
||||
self.process_attestations(attestations);
|
||||
}
|
||||
|
||||
|
@ -126,8 +126,6 @@ type SummaryMap = HashMap<Epoch, EpochSummary>;
|
||||
struct MonitoredValidator {
|
||||
/// A human-readable identifier for the validator.
|
||||
pub id: String,
|
||||
/// The validator voting pubkey.
|
||||
pub pubkey: PublicKeyBytes,
|
||||
/// The validator index in the state.
|
||||
pub index: Option<u64>,
|
||||
/// A history of the validator over time.
|
||||
@ -140,7 +138,6 @@ impl MonitoredValidator {
|
||||
id: index
|
||||
.map(|i| i.to_string())
|
||||
.unwrap_or_else(|| pubkey.to_string()),
|
||||
pubkey,
|
||||
index,
|
||||
summaries: <_>::default(),
|
||||
}
|
||||
|
@ -12,7 +12,7 @@ pub enum Error {
|
||||
/// Logs have to be added with monotonically-increasing block numbers.
|
||||
NonConsecutive { log_index: u64, expected: usize },
|
||||
/// The eth1 event log data was unable to be parsed.
|
||||
LogParseError(String),
|
||||
LogParse(String),
|
||||
/// There are insufficient deposits in the cache to fulfil the request.
|
||||
InsufficientDeposits {
|
||||
known_deposits: usize,
|
||||
@ -26,9 +26,9 @@ pub enum Error {
|
||||
/// E.g., you cannot request deposit 10 when the deposit count is 9.
|
||||
DepositCountInvalid { deposit_count: u64, range_end: u64 },
|
||||
/// Error with the merkle tree for deposits.
|
||||
DepositTreeError(merkle_proof::MerkleTreeError),
|
||||
DepositTree(merkle_proof::MerkleTreeError),
|
||||
/// An unexpected condition was encountered.
|
||||
InternalError(String),
|
||||
Internal(String),
|
||||
}
|
||||
|
||||
#[derive(Encode, Decode, Clone)]
|
||||
@ -160,7 +160,7 @@ impl DepositCache {
|
||||
self.logs.push(log);
|
||||
self.deposit_tree
|
||||
.push_leaf(deposit)
|
||||
.map_err(Error::DepositTreeError)?;
|
||||
.map_err(Error::DepositTree)?;
|
||||
self.deposit_roots.push(self.deposit_tree.root());
|
||||
Ok(DepositCacheInsertOutcome::Inserted)
|
||||
}
|
||||
@ -219,7 +219,7 @@ impl DepositCache {
|
||||
let leaves = self
|
||||
.leaves
|
||||
.get(0..deposit_count as usize)
|
||||
.ok_or_else(|| Error::InternalError("Unable to get known leaves".into()))?;
|
||||
.ok_or_else(|| Error::Internal("Unable to get known leaves".into()))?;
|
||||
|
||||
// Note: there is likely a more optimal solution than recreating the `DepositDataTree`
|
||||
// each time this function is called.
|
||||
@ -233,7 +233,7 @@ impl DepositCache {
|
||||
let deposits = self
|
||||
.logs
|
||||
.get(start as usize..end as usize)
|
||||
.ok_or_else(|| Error::InternalError("Unable to get known log".into()))?
|
||||
.ok_or_else(|| Error::Internal("Unable to get known log".into()))?
|
||||
.iter()
|
||||
.map(|deposit_log| {
|
||||
let (_leaf, proof) = tree.generate_proof(deposit_log.index as usize);
|
||||
|
@ -378,7 +378,7 @@ pub async fn get_deposit_logs_in_range(
|
||||
.ok_or("Data was not string")?;
|
||||
|
||||
Ok(Log {
|
||||
block_number: hex_to_u64_be(&block_number)?,
|
||||
block_number: hex_to_u64_be(block_number)?,
|
||||
data: hex_to_bytes(data)?,
|
||||
})
|
||||
})
|
||||
@ -446,7 +446,7 @@ pub async fn send_rpc_request(
|
||||
|
||||
/// Accepts an entire HTTP body (as a string) and returns either the `result` field or the `error['message']` field, as a serde `Value`.
|
||||
fn response_result_or_error(response: &str) -> Result<Value, RpcError> {
|
||||
let json = serde_json::from_str::<Value>(&response)
|
||||
let json = serde_json::from_str::<Value>(response)
|
||||
.map_err(|e| RpcError::InvalidJson(e.to_string()))?;
|
||||
|
||||
if let Some(error) = json.get("error").map(|e| e.get("message")).flatten() {
|
||||
|
@ -48,7 +48,7 @@ impl Inner {
|
||||
|
||||
/// Encode the eth1 block and deposit cache as bytes.
|
||||
pub fn as_bytes(&self) -> Vec<u8> {
|
||||
let ssz_eth1_cache = SszEth1Cache::from_inner(&self);
|
||||
let ssz_eth1_cache = SszEth1Cache::from_inner(self);
|
||||
ssz_eth1_cache.as_ssz_bytes()
|
||||
}
|
||||
|
||||
|
@ -705,7 +705,7 @@ impl Service {
|
||||
}
|
||||
}
|
||||
}
|
||||
endpoints.fallback.map_format_error(|s| &s.endpoint, &e)
|
||||
endpoints.fallback.map_format_error(|s| &s.endpoint, e)
|
||||
};
|
||||
|
||||
let process_err = |e: Error| match &e {
|
||||
@ -716,7 +716,7 @@ impl Service {
|
||||
let (remote_head_block, new_block_numbers_deposit, new_block_numbers_block_cache) =
|
||||
endpoints
|
||||
.first_success(|e| async move {
|
||||
get_remote_head_and_new_block_ranges(e, &self, node_far_behind_seconds).await
|
||||
get_remote_head_and_new_block_ranges(e, self, node_far_behind_seconds).await
|
||||
})
|
||||
.await
|
||||
.map_err(|e| {
|
||||
@ -881,7 +881,7 @@ impl Service {
|
||||
Some(range) => range,
|
||||
None => endpoints
|
||||
.first_success(|e| async move {
|
||||
relevant_new_block_numbers_from_endpoint(e, &self, HeadType::Deposit).await
|
||||
relevant_new_block_numbers_from_endpoint(e, self, HeadType::Deposit).await
|
||||
})
|
||||
.await
|
||||
.map_err(Error::FallbackError)?,
|
||||
@ -922,7 +922,7 @@ impl Service {
|
||||
.first_success(|e| async move {
|
||||
get_deposit_logs_in_range(
|
||||
e,
|
||||
&deposit_contract_address_ref,
|
||||
deposit_contract_address_ref,
|
||||
block_range_ref.clone(),
|
||||
Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS),
|
||||
)
|
||||
@ -1034,7 +1034,7 @@ impl Service {
|
||||
Some(range) => range,
|
||||
None => endpoints
|
||||
.first_success(|e| async move {
|
||||
relevant_new_block_numbers_from_endpoint(e, &self, HeadType::BlockCache)
|
||||
relevant_new_block_numbers_from_endpoint(e, self, HeadType::BlockCache)
|
||||
.await
|
||||
})
|
||||
.await
|
||||
|
@ -67,7 +67,7 @@ pub fn use_or_load_enr(
|
||||
Ok(disk_enr) => {
|
||||
// if the same node id, then we may need to update our sequence number
|
||||
if local_enr.node_id() == disk_enr.node_id() {
|
||||
if compare_enr(&local_enr, &disk_enr) {
|
||||
if compare_enr(local_enr, &disk_enr) {
|
||||
debug!(log, "ENR loaded from disk"; "file" => ?enr_f);
|
||||
// the stored ENR has the same configuration, use it
|
||||
*local_enr = disk_enr;
|
||||
@ -92,7 +92,7 @@ pub fn use_or_load_enr(
|
||||
}
|
||||
}
|
||||
|
||||
save_enr_to_disk(&config.network_dir, &local_enr, log);
|
||||
save_enr_to_disk(&config.network_dir, local_enr, log);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -193,7 +193,7 @@ pub fn load_enr_from_disk(dir: &Path) -> Result<Enr, String> {
|
||||
pub fn save_enr_to_disk(dir: &Path, enr: &Enr, log: &slog::Logger) {
|
||||
let _ = std::fs::create_dir_all(dir);
|
||||
match File::create(dir.join(Path::new(ENR_FILENAME)))
|
||||
.and_then(|mut f| f.write_all(&enr.to_base64().as_bytes()))
|
||||
.and_then(|mut f| f.write_all(enr.to_base64().as_bytes()))
|
||||
{
|
||||
Ok(_) => {
|
||||
debug!(log, "ENR written to disk");
|
||||
|
@ -254,7 +254,7 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result<discv5::enr::NodeId, Strin
|
||||
let uncompressed_key_bytes = &pk.encode_uncompressed()[1..];
|
||||
let mut output = [0_u8; 32];
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(&uncompressed_key_bytes);
|
||||
hasher.update(uncompressed_key_bytes);
|
||||
hasher.finalize(&mut output);
|
||||
Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"))
|
||||
}
|
||||
|
@ -198,7 +198,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
||||
let listen_socket = SocketAddr::new(config.listen_address, config.discovery_port);
|
||||
|
||||
// convert the keypair into an ENR key
|
||||
let enr_key: CombinedKey = CombinedKey::from_libp2p(&local_key)?;
|
||||
let enr_key: CombinedKey = CombinedKey::from_libp2p(local_key)?;
|
||||
|
||||
let mut discv5 = Discv5::new(local_enr, enr_key, config.discv5_config.clone())
|
||||
.map_err(|e| format!("Discv5 service failed. Error: {:?}", e))?;
|
||||
|
@ -778,7 +778,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
) -> bool {
|
||||
{
|
||||
let mut peerdb = self.network_globals.peers.write();
|
||||
if peerdb.is_banned(&peer_id) {
|
||||
if peerdb.is_banned(peer_id) {
|
||||
// don't connect if the peer is banned
|
||||
slog::crit!(self.log, "Connection has been allowed to a banned peer"; "peer_id" => %peer_id);
|
||||
}
|
||||
@ -952,7 +952,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
/// previous bans from discovery.
|
||||
fn unban_peer(&mut self, peer_id: &PeerId) -> Result<(), &'static str> {
|
||||
let mut peer_db = self.network_globals.peers.write();
|
||||
peer_db.unban(&peer_id)?;
|
||||
peer_db.unban(peer_id)?;
|
||||
|
||||
let seen_ip_addresses = peer_db
|
||||
.peer_info(peer_id)
|
||||
|
@ -319,7 +319,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
let mut by_status = self
|
||||
.peers
|
||||
.iter()
|
||||
.filter(|(_, info)| is_status(&info))
|
||||
.filter(|(_, info)| is_status(info))
|
||||
.collect::<Vec<_>>();
|
||||
by_status.sort_by_key(|(_, info)| info.score());
|
||||
by_status.into_iter().rev().collect()
|
||||
@ -332,7 +332,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
{
|
||||
self.peers
|
||||
.iter()
|
||||
.filter(|(_, info)| is_status(&info))
|
||||
.filter(|(_, info)| is_status(info))
|
||||
.max_by_key(|(_, info)| info.score())
|
||||
.map(|(id, _)| id)
|
||||
}
|
||||
@ -1066,7 +1066,7 @@ mod tests {
|
||||
let mut socker_addr = Multiaddr::from(ip2);
|
||||
socker_addr.push(Protocol::Tcp(8080));
|
||||
for p in &peers {
|
||||
pdb.connect_ingoing(&p, socker_addr.clone(), None);
|
||||
pdb.connect_ingoing(p, socker_addr.clone(), None);
|
||||
pdb.disconnect_and_ban(p);
|
||||
pdb.inject_disconnect(p);
|
||||
pdb.disconnect_and_ban(p);
|
||||
@ -1078,7 +1078,7 @@ mod tests {
|
||||
|
||||
// unban all peers
|
||||
for p in &peers {
|
||||
reset_score(&mut pdb, &p);
|
||||
reset_score(&mut pdb, p);
|
||||
pdb.unban(p).unwrap();
|
||||
}
|
||||
|
||||
|
@ -578,6 +578,6 @@ fn load_or_build_metadata<E: EthSpec>(
|
||||
};
|
||||
|
||||
debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number);
|
||||
save_metadata_to_disk(network_dir, meta_data.clone(), &log);
|
||||
save_metadata_to_disk(network_dir, meta_data.clone(), log);
|
||||
meta_data
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ pub async fn build_full_mesh(
|
||||
}
|
||||
let multiaddrs: Vec<Multiaddr> = nodes
|
||||
.iter()
|
||||
.map(|x| get_enr(&x).multiaddr()[1].clone())
|
||||
.map(|x| get_enr(x).multiaddr()[1].clone())
|
||||
.collect();
|
||||
|
||||
for (i, node) in nodes.iter_mut().enumerate().take(n) {
|
||||
@ -216,7 +216,7 @@ pub async fn build_linear(rt: Weak<Runtime>, log: slog::Logger, n: usize) -> Vec
|
||||
|
||||
let multiaddrs: Vec<Multiaddr> = nodes
|
||||
.iter()
|
||||
.map(|x| get_enr(&x).multiaddr()[1].clone())
|
||||
.map(|x| get_enr(x).multiaddr()[1].clone())
|
||||
.collect();
|
||||
for i in 0..n - 1 {
|
||||
match libp2p::Swarm::dial_addr(&mut nodes[i].swarm, multiaddrs[i + 1].clone()) {
|
||||
|
@ -316,7 +316,7 @@ impl Eth1GenesisService {
|
||||
//
|
||||
// Note: this state is fully valid, some fields have been bypassed to make verification
|
||||
// faster.
|
||||
let state = self.cheap_state_at_eth1_block::<E>(block, &spec)?;
|
||||
let state = self.cheap_state_at_eth1_block::<E>(block, spec)?;
|
||||
let active_validator_count = state
|
||||
.get_active_validator_indices(E::genesis_epoch(), spec)
|
||||
.map_err(|e| format!("Genesis validators error: {:?}", e))?
|
||||
@ -328,7 +328,7 @@ impl Eth1GenesisService {
|
||||
|
||||
if is_valid_genesis_state(&state, spec) {
|
||||
let genesis_state = self
|
||||
.genesis_from_eth1_block(block.clone(), &spec)
|
||||
.genesis_from_eth1_block(block.clone(), spec)
|
||||
.map_err(|e| format!("Failed to generate valid genesis state : {}", e))?;
|
||||
|
||||
return Ok(Some(genesis_state));
|
||||
@ -372,12 +372,12 @@ impl Eth1GenesisService {
|
||||
let genesis_state = initialize_beacon_state_from_eth1(
|
||||
eth1_block.hash,
|
||||
eth1_block.timestamp,
|
||||
genesis_deposits(deposit_logs, &spec)?,
|
||||
&spec,
|
||||
genesis_deposits(deposit_logs, spec)?,
|
||||
spec,
|
||||
)
|
||||
.map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?;
|
||||
|
||||
if is_valid_genesis_state(&genesis_state, &spec) {
|
||||
if is_valid_genesis_state(&genesis_state, spec) {
|
||||
Ok(genesis_state)
|
||||
} else {
|
||||
Err("Generated state was not valid.".to_string())
|
||||
@ -406,7 +406,7 @@ impl Eth1GenesisService {
|
||||
deposit_root: Hash256::zero(),
|
||||
deposit_count: 0,
|
||||
},
|
||||
&spec,
|
||||
spec,
|
||||
);
|
||||
|
||||
self.deposit_logs_at_block(eth1_block.number)
|
||||
|
@ -63,7 +63,7 @@ fn cached_attestation_duties<T: BeaconChainTypes>(
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||
|
||||
let (duties, dependent_root) = chain
|
||||
.validator_attestation_duties(&request_indices, request_epoch, head.block_root)
|
||||
.validator_attestation_duties(request_indices, request_epoch, head.block_root)
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||
|
||||
convert_to_api_response(duties, request_indices, dependent_root, chain)
|
||||
@ -104,7 +104,7 @@ fn compute_historic_attester_duties<T: BeaconChainTypes>(
|
||||
)?;
|
||||
state
|
||||
} else {
|
||||
StateId::slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())).state(&chain)?
|
||||
StateId::slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)?
|
||||
};
|
||||
|
||||
// Sanity-check the state lookup.
|
||||
|
@ -1515,11 +1515,9 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
peer_id: peer_id.to_string(),
|
||||
enr: peer_info.enr.as_ref().map(|enr| enr.to_base64()),
|
||||
last_seen_p2p_address: address,
|
||||
direction: api_types::PeerDirection::from_connection_direction(
|
||||
&dir,
|
||||
),
|
||||
direction: api_types::PeerDirection::from_connection_direction(dir),
|
||||
state: api_types::PeerState::from_peer_connection_status(
|
||||
&peer_info.connection_status(),
|
||||
peer_info.connection_status(),
|
||||
),
|
||||
}));
|
||||
}
|
||||
@ -1563,9 +1561,9 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
// the eth2 API spec implies only peers we have been connected to at some point should be included.
|
||||
if let Some(dir) = peer_info.connection_direction.as_ref() {
|
||||
let direction =
|
||||
api_types::PeerDirection::from_connection_direction(&dir);
|
||||
api_types::PeerDirection::from_connection_direction(dir);
|
||||
let state = api_types::PeerState::from_peer_connection_status(
|
||||
&peer_info.connection_status(),
|
||||
peer_info.connection_status(),
|
||||
);
|
||||
|
||||
let state_matches = query.state.as_ref().map_or(true, |states| {
|
||||
@ -1616,7 +1614,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.peers()
|
||||
.for_each(|(_, peer_info)| {
|
||||
let state = api_types::PeerState::from_peer_connection_status(
|
||||
&peer_info.connection_status(),
|
||||
peer_info.connection_status(),
|
||||
);
|
||||
match state {
|
||||
api_types::PeerState::Connected => connected += 1,
|
||||
|
@ -185,7 +185,7 @@ fn compute_historic_proposer_duties<T: BeaconChainTypes>(
|
||||
ensure_state_is_in_epoch(&mut state, state_root, epoch, &chain.spec)?;
|
||||
state
|
||||
} else {
|
||||
StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(&chain)?
|
||||
StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)?
|
||||
};
|
||||
|
||||
// Ensure the state lookup was correct.
|
||||
|
@ -410,7 +410,7 @@ pub fn expose_publish_metrics<T: EthSpec>(messages: &[PubsubMessage<T>]) {
|
||||
PubsubMessage::Attestation(subnet_id) => {
|
||||
inc_counter_vec(
|
||||
&ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT,
|
||||
&[&subnet_id.0.as_ref()],
|
||||
&[subnet_id.0.as_ref()],
|
||||
);
|
||||
inc_counter(&GOSSIP_UNAGGREGATED_ATTESTATIONS_TX)
|
||||
}
|
||||
@ -577,7 +577,7 @@ pub fn update_gossip_metrics<T: EthSpec>(
|
||||
|
||||
// mesh peers
|
||||
for topic_hash in gossipsub.topics() {
|
||||
let peers = gossipsub.mesh_peers(&topic_hash).count();
|
||||
let peers = gossipsub.mesh_peers(topic_hash).count();
|
||||
if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) {
|
||||
match topic.kind() {
|
||||
GossipKind::Attestation(subnet_id) => {
|
||||
@ -633,7 +633,7 @@ pub fn update_gossip_metrics<T: EthSpec>(
|
||||
if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) {
|
||||
match topic.kind() {
|
||||
GossipKind::BeaconBlock => {
|
||||
for peer in gossipsub.mesh_peers(&topic_hash) {
|
||||
for peer in gossipsub.mesh_peers(topic_hash) {
|
||||
if let Some(client) = peer_to_client.get(peer) {
|
||||
if let Some(v) =
|
||||
get_int_gauge(&BEACON_BLOCK_MESH_PEERS_PER_CLIENT, &[client])
|
||||
@ -644,7 +644,7 @@ pub fn update_gossip_metrics<T: EthSpec>(
|
||||
}
|
||||
}
|
||||
GossipKind::BeaconAggregateAndProof => {
|
||||
for peer in gossipsub.mesh_peers(&topic_hash) {
|
||||
for peer in gossipsub.mesh_peers(topic_hash) {
|
||||
if let Some(client) = peer_to_client.get(peer) {
|
||||
if let Some(v) = get_int_gauge(
|
||||
&BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT,
|
||||
|
@ -195,7 +195,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
||||
|
||||
// attestation service
|
||||
let attestation_service =
|
||||
AttestationService::new(beacon_chain.clone(), &config, &network_log);
|
||||
AttestationService::new(beacon_chain.clone(), config, &network_log);
|
||||
|
||||
// create a timer for updating network metrics
|
||||
let metrics_update = tokio::time::interval(Duration::from_secs(METRIC_UPDATE_INTERVAL));
|
||||
@ -251,7 +251,7 @@ fn spawn_service<T: BeaconChainTypes>(
|
||||
.map(|gauge| gauge.reset());
|
||||
}
|
||||
metrics::update_gossip_metrics::<T::EthSpec>(
|
||||
&service.libp2p.swarm.behaviour_mut().gs(),
|
||||
service.libp2p.swarm.behaviour_mut().gs(),
|
||||
&service.network_globals,
|
||||
);
|
||||
// update sync metrics
|
||||
|
@ -126,7 +126,7 @@ impl<T: EthSpec> BatchInfo<T> {
|
||||
BatchState::Downloading(peer_id, _, _)
|
||||
| BatchState::AwaitingProcessing(peer_id, _)
|
||||
| BatchState::Processing(Attempt { peer_id, .. })
|
||||
| BatchState::AwaitingValidation(Attempt { peer_id, .. }) => Some(&peer_id),
|
||||
| BatchState::AwaitingValidation(Attempt { peer_id, .. }) => Some(peer_id),
|
||||
BatchState::Poisoned => unreachable!("Poisoned batch"),
|
||||
}
|
||||
}
|
||||
|
@ -383,7 +383,7 @@ impl<T: EthSpec> OperationPool<T> {
|
||||
|
||||
let relevant_attester_slashings = reader.iter().flat_map(|(slashing, fork)| {
|
||||
if *fork == state.fork().previous_version || *fork == state.fork().current_version {
|
||||
AttesterSlashingMaxCover::new(&slashing, &to_be_slashed, state)
|
||||
AttesterSlashingMaxCover::new(slashing, &to_be_slashed, state)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ mod test {
|
||||
}
|
||||
|
||||
fn covering_set(&self) -> &Self {
|
||||
&self
|
||||
self
|
||||
}
|
||||
|
||||
fn update_covering_set(&mut self, _: &Self, other: &Self) {
|
||||
|
@ -264,7 +264,7 @@ pub fn get_config<E: EthSpec>(
|
||||
/*
|
||||
* Load the eth2 network dir to obtain some additional config values.
|
||||
*/
|
||||
let eth2_network_config = get_eth2_network_config(&cli_args)?;
|
||||
let eth2_network_config = get_eth2_network_config(cli_args)?;
|
||||
|
||||
client_config.eth1.deposit_contract_address = format!("{:?}", spec.deposit_contract_address);
|
||||
client_config.eth1.deposit_contract_deploy_block =
|
||||
|
@ -305,7 +305,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
pub fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error> {
|
||||
let mut ops: Vec<KeyValueStoreOp> = Vec::new();
|
||||
if state.slot() < self.get_split_slot() {
|
||||
self.store_cold_state(state_root, &state, &mut ops)?;
|
||||
self.store_cold_state(state_root, state, &mut ops)?;
|
||||
self.cold_db.do_atomically(ops)
|
||||
} else {
|
||||
self.store_hot_state(state_root, state, &mut ops)?;
|
||||
@ -563,7 +563,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
"slot" => state.slot().as_u64(),
|
||||
"state_root" => format!("{:?}", state_root)
|
||||
);
|
||||
store_full_state(state_root, &state, ops)?;
|
||||
store_full_state(state_root, state, ops)?;
|
||||
}
|
||||
|
||||
// Store a summary of the state.
|
||||
@ -861,7 +861,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
|
||||
per_block_processing(
|
||||
&mut state,
|
||||
&block,
|
||||
block,
|
||||
None,
|
||||
BlockSignatureStrategy::NoVerification,
|
||||
&self.spec,
|
||||
|
@ -271,7 +271,7 @@ mod tests {
|
||||
fn simplediskdb() {
|
||||
let dir = tempdir().unwrap();
|
||||
let path = dir.path();
|
||||
let store = LevelDB::open(&path).unwrap();
|
||||
let store = LevelDB::open(path).unwrap();
|
||||
|
||||
test_impl(store);
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ impl<T: EthSpec> TryFrom<&ArgMatches<'_>> for BootNodeConfig<T> {
|
||||
let data_dir = get_data_dir(matches);
|
||||
|
||||
// Try and grab network config from input CLI params
|
||||
let eth2_network_config = get_eth2_network_config(&matches)?;
|
||||
let eth2_network_config = get_eth2_network_config(matches)?;
|
||||
|
||||
// Try and obtain bootnodes
|
||||
|
||||
|
@ -73,14 +73,14 @@ pub fn write_file_via_temporary(
|
||||
// If the file already exists, preserve its permissions by copying it.
|
||||
// Otherwise, create a new file with restricted permissions.
|
||||
if file_path.exists() {
|
||||
fs::copy(&file_path, &temp_path).map_err(FsError::UnableToCopyFile)?;
|
||||
fs::write(&temp_path, &bytes).map_err(FsError::UnableToWriteFile)?;
|
||||
fs::copy(file_path, temp_path).map_err(FsError::UnableToCopyFile)?;
|
||||
fs::write(temp_path, bytes).map_err(FsError::UnableToWriteFile)?;
|
||||
} else {
|
||||
create_with_600_perms(&temp_path, &bytes)?;
|
||||
create_with_600_perms(temp_path, bytes)?;
|
||||
}
|
||||
|
||||
// With the temporary file created, perform an atomic rename.
|
||||
fs::rename(&temp_path, &file_path).map_err(FsError::UnableToRenameFile)?;
|
||||
fs::rename(temp_path, file_path).map_err(FsError::UnableToRenameFile)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -405,7 +405,7 @@ mod tests {
|
||||
voting_keystore_path: ""
|
||||
voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7"
|
||||
"#;
|
||||
let def: ValidatorDefinition = serde_yaml::from_str(&no_graffiti).unwrap();
|
||||
let def: ValidatorDefinition = serde_yaml::from_str(no_graffiti).unwrap();
|
||||
assert!(def.graffiti.is_none());
|
||||
|
||||
let invalid_graffiti = r#"---
|
||||
@ -417,7 +417,7 @@ mod tests {
|
||||
voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7"
|
||||
"#;
|
||||
|
||||
let def: Result<ValidatorDefinition, _> = serde_yaml::from_str(&invalid_graffiti);
|
||||
let def: Result<ValidatorDefinition, _> = serde_yaml::from_str(invalid_graffiti);
|
||||
assert!(def.is_err());
|
||||
|
||||
let valid_graffiti = r#"---
|
||||
@ -429,7 +429,7 @@ mod tests {
|
||||
voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7"
|
||||
"#;
|
||||
|
||||
let def: ValidatorDefinition = serde_yaml::from_str(&valid_graffiti).unwrap();
|
||||
let def: ValidatorDefinition = serde_yaml::from_str(valid_graffiti).unwrap();
|
||||
assert_eq!(
|
||||
def.graffiti,
|
||||
Some(GraffitiString::from_str("mrfwashere").unwrap())
|
||||
|
@ -35,7 +35,7 @@ pub fn parse_pubkey(secret: &str) -> Result<PublicKey, Error> {
|
||||
&secret[SECRET_PREFIX.len()..]
|
||||
};
|
||||
|
||||
serde_utils::hex::decode(&secret)
|
||||
serde_utils::hex::decode(secret)
|
||||
.map_err(|e| Error::InvalidSecret(format!("invalid hex: {:?}", e)))
|
||||
.and_then(|bytes| {
|
||||
if bytes.len() != PK_LEN {
|
||||
|
@ -16,8 +16,8 @@ pub enum Error {
|
||||
UnableToRemoveWallet(io::Error),
|
||||
UnableToCreateWallet(io::Error),
|
||||
UnableToReadWallet(io::Error),
|
||||
JsonWriteError(WalletError),
|
||||
JsonReadError(WalletError),
|
||||
JsonWrite(WalletError),
|
||||
JsonRead(WalletError),
|
||||
}
|
||||
|
||||
/// Read a wallet with the given `uuid` from the `wallet_dir`.
|
||||
@ -32,7 +32,7 @@ pub fn read<P: AsRef<Path>>(wallet_dir: P, uuid: &Uuid) -> Result<Wallet, Error>
|
||||
.create(false)
|
||||
.open(json_path)
|
||||
.map_err(Error::UnableToReadWallet)
|
||||
.and_then(|f| Wallet::from_json_reader(f).map_err(Error::JsonReadError))
|
||||
.and_then(|f| Wallet::from_json_reader(f).map_err(Error::JsonRead))
|
||||
}
|
||||
}
|
||||
|
||||
@ -84,7 +84,7 @@ pub fn create<P: AsRef<Path>>(wallet_dir: P, wallet: &Wallet) -> Result<(), Erro
|
||||
.create_new(true)
|
||||
.open(json_path)
|
||||
.map_err(Error::UnableToCreateWallet)
|
||||
.and_then(|f| wallet.to_json_writer(f).map_err(Error::JsonWriteError))
|
||||
.and_then(|f| wallet.to_json_writer(f).map_err(Error::JsonWrite))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -140,9 +140,7 @@ impl<'a> slog_term::RecordDecorator for AlignedRecordDecorator<'a> {
|
||||
write!(
|
||||
self,
|
||||
"{}",
|
||||
std::iter::repeat(' ')
|
||||
.take(self.message_width - self.message_count)
|
||||
.collect::<String>()
|
||||
" ".repeat(self.message_width - self.message_count)
|
||||
)?;
|
||||
self.message_active = false;
|
||||
self.message_count = 0;
|
||||
|
@ -153,7 +153,7 @@ pub fn gather_metrics(metrics_map: &HashMap<String, JsonMetric>) -> Option<serde
|
||||
for mf in metric_families.iter() {
|
||||
let metric_name = mf.get_name();
|
||||
if metrics_map.contains_key(metric_name) {
|
||||
let value = get_value(&mf).unwrap_or_default();
|
||||
let value = get_value(mf).unwrap_or_default();
|
||||
let metric = metrics_map.get(metric_name)?;
|
||||
let value = metric.get_typed_value(value);
|
||||
let _ = res.insert(metric.json_output_key.to_string(), value);
|
||||
|
@ -131,8 +131,8 @@ impl MonitoringHttpClient {
|
||||
let freezer_db_path = self.db_path.as_ref().ok_or_else(|| {
|
||||
Error::BeaconMetricsFailed("Beacon metrics require freezer db path".to_string())
|
||||
})?;
|
||||
let metrics = gather_beacon_metrics(&db_path, &freezer_db_path)
|
||||
.map_err(Error::BeaconMetricsFailed)?;
|
||||
let metrics =
|
||||
gather_beacon_metrics(db_path, freezer_db_path).map_err(Error::BeaconMetricsFailed)?;
|
||||
Ok(MonitoringMetrics {
|
||||
metadata: Metadata::new(ProcessType::BeaconNode),
|
||||
process_metrics: Process::Beacon(metrics),
|
||||
|
@ -180,7 +180,7 @@ impl<'a> Builder<'a> {
|
||||
signature: Signature::empty().into(),
|
||||
};
|
||||
|
||||
deposit_data.signature = deposit_data.create_signature(&voting_keypair.sk, &spec);
|
||||
deposit_data.signature = deposit_data.create_signature(&voting_keypair.sk, spec);
|
||||
|
||||
let deposit_data =
|
||||
encode_eth1_tx_data(&deposit_data).map_err(Error::UnableToEncodeDeposit)?;
|
||||
|
@ -60,7 +60,7 @@ impl<N: Unsigned> CachedTreeHash<TreeHashCache> for FixedVector<Hash256, N> {
|
||||
arena: &mut CacheArena,
|
||||
cache: &mut TreeHashCache,
|
||||
) -> Result<Hash256, Error> {
|
||||
cache.recalculate_merkle_root(arena, hash256_iter(&self))
|
||||
cache.recalculate_merkle_root(arena, hash256_iter(self))
|
||||
}
|
||||
}
|
||||
|
||||
@ -79,7 +79,7 @@ impl<N: Unsigned> CachedTreeHash<TreeHashCache> for FixedVector<u64, N> {
|
||||
arena: &mut CacheArena,
|
||||
cache: &mut TreeHashCache,
|
||||
) -> Result<Hash256, Error> {
|
||||
cache.recalculate_merkle_root(arena, u64_iter(&self))
|
||||
cache.recalculate_merkle_root(arena, u64_iter(self))
|
||||
}
|
||||
}
|
||||
|
||||
@ -98,7 +98,7 @@ impl<N: Unsigned> CachedTreeHash<TreeHashCache> for VariableList<Hash256, N> {
|
||||
cache: &mut TreeHashCache,
|
||||
) -> Result<Hash256, Error> {
|
||||
Ok(mix_in_length(
|
||||
&cache.recalculate_merkle_root(arena, hash256_iter(&self))?,
|
||||
&cache.recalculate_merkle_root(arena, hash256_iter(self))?,
|
||||
self.len(),
|
||||
))
|
||||
}
|
||||
@ -120,7 +120,7 @@ impl<N: Unsigned> CachedTreeHash<TreeHashCache> for VariableList<u64, N> {
|
||||
cache: &mut TreeHashCache,
|
||||
) -> Result<Hash256, Error> {
|
||||
Ok(mix_in_length(
|
||||
&cache.recalculate_merkle_root(arena, u64_iter(&self))?,
|
||||
&cache.recalculate_merkle_root(arena, u64_iter(self))?,
|
||||
self.len(),
|
||||
))
|
||||
}
|
||||
|
@ -270,11 +270,11 @@ mod tests {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let leaves: Vec<_> = int_leaves.into_iter().map(H256::from_low_u64_be).collect();
|
||||
let leaves_iter = int_leaves.into_iter().map(H256::from_low_u64_be);
|
||||
|
||||
let mut merkle_tree = MerkleTree::create(&[], depth);
|
||||
|
||||
let proofs_ok = leaves.into_iter().enumerate().all(|(i, leaf)| {
|
||||
let proofs_ok = leaves_iter.enumerate().all(|(i, leaf)| {
|
||||
assert_eq!(merkle_tree.push_leaf(leaf, depth), Ok(()));
|
||||
let (stored_leaf, branch) = merkle_tree.generate_proof(i, depth);
|
||||
stored_leaf == leaf && verify_merkle_proof(leaf, &branch, depth, i, merkle_tree.hash())
|
||||
|
@ -210,7 +210,7 @@ impl ProtoArray {
|
||||
.ok_or(Error::InvalidBestDescendant(best_descendant_index))?;
|
||||
|
||||
// Perform a sanity check that the node is indeed valid to be the head.
|
||||
if !self.node_is_viable_for_head(&best_node) {
|
||||
if !self.node_is_viable_for_head(best_node) {
|
||||
return Err(Error::InvalidBestNode {
|
||||
start_root: *justified_root,
|
||||
justified_epoch: self.justified_epoch,
|
||||
@ -321,7 +321,7 @@ impl ProtoArray {
|
||||
.get(parent_index)
|
||||
.ok_or(Error::InvalidNodeIndex(parent_index))?;
|
||||
|
||||
let child_leads_to_viable_head = self.node_leads_to_viable_head(&child)?;
|
||||
let child_leads_to_viable_head = self.node_leads_to_viable_head(child)?;
|
||||
|
||||
// These three variables are aliases to the three options that we may set the
|
||||
// `parent.best_child` and `parent.best_descendant` to.
|
||||
@ -334,8 +334,9 @@ impl ProtoArray {
|
||||
);
|
||||
let no_change = (parent.best_child, parent.best_descendant);
|
||||
|
||||
let (new_best_child, new_best_descendant) =
|
||||
if let Some(best_child_index) = parent.best_child {
|
||||
let (new_best_child, new_best_descendant) = if let Some(best_child_index) =
|
||||
parent.best_child
|
||||
{
|
||||
if best_child_index == child_index && !child_leads_to_viable_head {
|
||||
// If the child is already the best-child of the parent but it's not viable for
|
||||
// the head, remove it.
|
||||
@ -350,8 +351,7 @@ impl ProtoArray {
|
||||
.get(best_child_index)
|
||||
.ok_or(Error::InvalidBestDescendant(best_child_index))?;
|
||||
|
||||
let best_child_leads_to_viable_head =
|
||||
self.node_leads_to_viable_head(&best_child)?;
|
||||
let best_child_leads_to_viable_head = self.node_leads_to_viable_head(best_child)?;
|
||||
|
||||
if child_leads_to_viable_head && !best_child_leads_to_viable_head {
|
||||
// The child leads to a viable head, but the current best-child doesn't.
|
||||
|
@ -148,8 +148,8 @@ impl ProtoArrayForkChoice {
|
||||
let deltas = compute_deltas(
|
||||
&self.proto_array.indices,
|
||||
&mut self.votes,
|
||||
&old_balances,
|
||||
&new_balances,
|
||||
old_balances,
|
||||
new_balances,
|
||||
)
|
||||
.map_err(|e| format!("find_head compute_deltas failed: {:?}", e))?;
|
||||
|
||||
|
@ -9,7 +9,7 @@ mod round_trip {
|
||||
for item in items {
|
||||
let encoded = &item.as_ssz_bytes();
|
||||
assert_eq!(item.ssz_bytes_len(), encoded.len());
|
||||
assert_eq!(T::from_ssz_bytes(&encoded), Ok(item));
|
||||
assert_eq!(T::from_ssz_bytes(encoded), Ok(item));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ fn get_serializable_named_field_idents(struct_data: &syn::DataStruct) -> Vec<&sy
|
||||
.fields
|
||||
.iter()
|
||||
.filter_map(|f| {
|
||||
if should_skip_serializing(&f) {
|
||||
if should_skip_serializing(f) {
|
||||
None
|
||||
} else {
|
||||
Some(match &f.ident {
|
||||
@ -36,7 +36,7 @@ fn get_serializable_field_types(struct_data: &syn::DataStruct) -> Vec<&syn::Type
|
||||
.fields
|
||||
.iter()
|
||||
.filter_map(|f| {
|
||||
if should_skip_serializing(&f) {
|
||||
if should_skip_serializing(f) {
|
||||
None
|
||||
} else {
|
||||
Some(&f.ty)
|
||||
|
@ -364,7 +364,7 @@ mod test {
|
||||
fn ssz_round_trip<T: Encode + Decode + std::fmt::Debug + PartialEq>(item: T) {
|
||||
let encoded = &item.as_ssz_bytes();
|
||||
assert_eq!(item.ssz_bytes_len(), encoded.len());
|
||||
assert_eq!(T::from_ssz_bytes(&encoded), Ok(item));
|
||||
assert_eq!(T::from_ssz_bytes(encoded), Ok(item));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -345,7 +345,7 @@ mod test {
|
||||
fn round_trip<T: Encode + Decode + std::fmt::Debug + PartialEq>(item: T) {
|
||||
let encoded = &item.as_ssz_bytes();
|
||||
assert_eq!(item.ssz_bytes_len(), encoded.len());
|
||||
assert_eq!(T::from_ssz_bytes(&encoded), Ok(item));
|
||||
assert_eq!(T::from_ssz_bytes(encoded), Ok(item));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -34,7 +34,7 @@ pub fn initialize_beacon_state_from_eth1<T: EthSpec>(
|
||||
.push_leaf(deposit.data.tree_hash_root())
|
||||
.map_err(BlockProcessingError::MerkleTreeError)?;
|
||||
state.eth1_data_mut().deposit_root = deposit_tree.root();
|
||||
process_deposit(&mut state, &deposit, spec, true)?;
|
||||
process_deposit(&mut state, deposit, spec, true)?;
|
||||
}
|
||||
|
||||
process_activations(&mut state, spec)?;
|
||||
|
@ -244,10 +244,10 @@ where
|
||||
.iter()
|
||||
.try_for_each(|attester_slashing| {
|
||||
let (set_1, set_2) = attester_slashing_signature_sets(
|
||||
&self.state,
|
||||
self.state,
|
||||
self.get_pubkey.clone(),
|
||||
attester_slashing,
|
||||
&self.spec,
|
||||
self.spec,
|
||||
)?;
|
||||
|
||||
self.sets.push(set_1);
|
||||
@ -280,11 +280,11 @@ where
|
||||
get_indexed_attestation(committee.committee, attestation)?;
|
||||
|
||||
self.sets.push(indexed_attestation_signature_set(
|
||||
&self.state,
|
||||
self.state,
|
||||
self.get_pubkey.clone(),
|
||||
&attestation.signature,
|
||||
&indexed_attestation,
|
||||
&self.spec,
|
||||
self.spec,
|
||||
)?);
|
||||
|
||||
vec.push(indexed_attestation);
|
||||
@ -307,7 +307,7 @@ where
|
||||
.iter()
|
||||
.try_for_each(|exit| {
|
||||
let exit =
|
||||
exit_signature_set(&self.state, self.get_pubkey.clone(), exit, &self.spec)?;
|
||||
exit_signature_set(self.state, self.get_pubkey.clone(), exit, self.spec)?;
|
||||
|
||||
self.sets.push(exit);
|
||||
|
||||
@ -323,8 +323,8 @@ where
|
||||
sync_aggregate,
|
||||
block.slot(),
|
||||
block.parent_root(),
|
||||
&self.state,
|
||||
&self.spec,
|
||||
self.state,
|
||||
self.spec,
|
||||
)? {
|
||||
self.sets.push(signature_set);
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ pub fn is_valid_indexed_attestation<T: EthSpec>(
|
||||
state,
|
||||
|i| get_pubkey_from_state(state, i),
|
||||
&indexed_attestation.signature,
|
||||
&indexed_attestation,
|
||||
indexed_attestation,
|
||||
spec
|
||||
)?
|
||||
.verify(),
|
||||
|
@ -177,7 +177,7 @@ pub fn process_proposer_slashings<T: EthSpec>(
|
||||
.iter()
|
||||
.enumerate()
|
||||
.try_for_each(|(i, proposer_slashing)| {
|
||||
verify_proposer_slashing(proposer_slashing, &state, verify_signatures, spec)
|
||||
verify_proposer_slashing(proposer_slashing, state, verify_signatures, spec)
|
||||
.map_err(|e| e.into_with_index(i))?;
|
||||
|
||||
slash_validator(
|
||||
@ -202,11 +202,11 @@ pub fn process_attester_slashings<T: EthSpec>(
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), BlockProcessingError> {
|
||||
for (i, attester_slashing) in attester_slashings.iter().enumerate() {
|
||||
verify_attester_slashing(&state, &attester_slashing, verify_signatures, spec)
|
||||
verify_attester_slashing(state, attester_slashing, verify_signatures, spec)
|
||||
.map_err(|e| e.into_with_index(i))?;
|
||||
|
||||
let slashable_indices =
|
||||
get_slashable_indices(&state, &attester_slashing).map_err(|e| e.into_with_index(i))?;
|
||||
get_slashable_indices(state, attester_slashing).map_err(|e| e.into_with_index(i))?;
|
||||
|
||||
for i in slashable_indices {
|
||||
slash_validator(state, i as usize, None, spec)?;
|
||||
@ -254,7 +254,7 @@ pub fn process_exits<T: EthSpec>(
|
||||
// Verify and apply each exit in series. We iterate in series because higher-index exits may
|
||||
// become invalid due to the application of lower-index ones.
|
||||
for (i, exit) in voluntary_exits.iter().enumerate() {
|
||||
verify_exit(&state, exit, verify_signatures, spec).map_err(|e| e.into_with_index(i))?;
|
||||
verify_exit(state, exit, verify_signatures, spec).map_err(|e| e.into_with_index(i))?;
|
||||
|
||||
initiate_validator_exit(state, exit.message.validator_index as usize, spec)?;
|
||||
}
|
||||
|
@ -257,7 +257,7 @@ where
|
||||
let domain = spec.get_domain(
|
||||
indexed_attestation.data.target.epoch,
|
||||
Domain::BeaconAttester,
|
||||
&fork,
|
||||
fork,
|
||||
genesis_validators_root,
|
||||
);
|
||||
|
||||
@ -494,7 +494,7 @@ where
|
||||
pubkeys.push(get_pubkey(pubkey).ok_or_else(|| Error::ValidatorPubkeyUnknown(*pubkey))?);
|
||||
}
|
||||
|
||||
let domain = spec.get_domain(epoch, Domain::SyncCommittee, &fork, genesis_validators_root);
|
||||
let domain = spec.get_domain(epoch, Domain::SyncCommittee, fork, genesis_validators_root);
|
||||
|
||||
let message = beacon_block_root.signing_root(domain);
|
||||
|
||||
@ -513,7 +513,7 @@ pub fn sync_committee_message_set_from_pubkeys<'a, T>(
|
||||
where
|
||||
T: EthSpec,
|
||||
{
|
||||
let domain = spec.get_domain(epoch, Domain::SyncCommittee, &fork, genesis_validators_root);
|
||||
let domain = spec.get_domain(epoch, Domain::SyncCommittee, fork, genesis_validators_root);
|
||||
|
||||
let message = beacon_block_root.signing_root(domain);
|
||||
|
||||
|
@ -33,9 +33,9 @@ pub fn verify_attester_slashing<T: EthSpec>(
|
||||
Invalid::NotSlashable
|
||||
);
|
||||
|
||||
is_valid_indexed_attestation(state, &attestation_1, verify_signatures, spec)
|
||||
is_valid_indexed_attestation(state, attestation_1, verify_signatures, spec)
|
||||
.map_err(|e| error(Invalid::IndexedAttestation1Invalid(e)))?;
|
||||
is_valid_indexed_attestation(state, &attestation_2, verify_signatures, spec)
|
||||
is_valid_indexed_attestation(state, attestation_2, verify_signatures, spec)
|
||||
.map_err(|e| error(Invalid::IndexedAttestation2Invalid(e)))?;
|
||||
|
||||
Ok(())
|
||||
|
@ -15,7 +15,7 @@ fn error(reason: DepositInvalid) -> BlockOperationError<DepositInvalid> {
|
||||
///
|
||||
/// Spec v0.12.1
|
||||
pub fn verify_deposit_signature(deposit_data: &DepositData, spec: &ChainSpec) -> Result<()> {
|
||||
let (public_key, signature, msg) = deposit_pubkey_signature_message(&deposit_data, spec)
|
||||
let (public_key, signature, msg) = deposit_pubkey_signature_message(deposit_data, spec)
|
||||
.ok_or_else(|| error(DepositInvalid::BadBlsBytes))?;
|
||||
|
||||
verify!(
|
||||
|
@ -28,7 +28,7 @@ pub fn process_epoch<T: EthSpec>(
|
||||
//
|
||||
// E.g., attestation in the previous epoch, attested to the head, etc.
|
||||
let mut validator_statuses = ValidatorStatuses::new(state, spec)?;
|
||||
validator_statuses.process_attestations(&state)?;
|
||||
validator_statuses.process_attestations(state)?;
|
||||
|
||||
// Justification and finalization.
|
||||
process_justification_and_finalization(state, &validator_statuses.total_balances, spec)?;
|
||||
|
@ -60,7 +60,7 @@ pub fn process_rewards_and_penalties<T: EthSpec>(
|
||||
return Err(Error::ValidatorStatusesInconsistent);
|
||||
}
|
||||
|
||||
let deltas = get_attestation_deltas(state, &validator_statuses, spec)?;
|
||||
let deltas = get_attestation_deltas(state, validator_statuses, spec)?;
|
||||
|
||||
// Apply the deltas, erroring on overflow above but not on overflow below (saturating at 0
|
||||
// instead).
|
||||
|
@ -26,7 +26,7 @@ pub fn translate_participation<E: EthSpec>(
|
||||
// Apply flags to all attesting validators.
|
||||
let committee = state.get_beacon_committee(data.slot, data.index)?;
|
||||
let attesting_indices =
|
||||
get_attesting_indices::<E>(&committee.committee, &attestation.aggregation_bits)?;
|
||||
get_attesting_indices::<E>(committee.committee, &attestation.aggregation_bits)?;
|
||||
let epoch_participation = state.previous_epoch_participation_mut()?;
|
||||
|
||||
for index in attesting_indices {
|
||||
|
@ -221,7 +221,7 @@ mod test {
|
||||
use crate::ZERO_HASHES_MAX_INDEX;
|
||||
|
||||
pub fn reference_root(bytes: &[u8]) -> Hash256 {
|
||||
crate::merkleize_standard(&bytes)
|
||||
crate::merkleize_standard(bytes)
|
||||
}
|
||||
|
||||
macro_rules! common_tests {
|
||||
@ -322,7 +322,7 @@ mod test {
|
||||
|
||||
assert_eq!(
|
||||
reference_root(&reference_input),
|
||||
merkleize_padded(&input, min_nodes),
|
||||
merkleize_padded(input, min_nodes),
|
||||
"input.len(): {:?}",
|
||||
input.len()
|
||||
);
|
||||
|
@ -23,14 +23,14 @@ fn get_hashable_fields_and_their_caches(
|
||||
.fields
|
||||
.iter()
|
||||
.filter_map(|f| {
|
||||
if should_skip_hashing(&f) {
|
||||
if should_skip_hashing(f) {
|
||||
None
|
||||
} else {
|
||||
let ident = f
|
||||
.ident
|
||||
.as_ref()
|
||||
.expect("tree_hash_derive only supports named struct fields");
|
||||
let opt_cache_field = get_cache_field_for(&f);
|
||||
let opt_cache_field = get_cache_field_for(f);
|
||||
Some((ident, f.ty.clone(), opt_cache_field))
|
||||
}
|
||||
})
|
||||
@ -94,7 +94,7 @@ fn tree_hash_derive_struct(item: &DeriveInput, struct_data: &DataStruct) -> Toke
|
||||
let name = &item.ident;
|
||||
let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl();
|
||||
|
||||
let idents = get_hashable_fields(&struct_data);
|
||||
let idents = get_hashable_fields(struct_data);
|
||||
let num_leaves = idents.len();
|
||||
|
||||
let output = quote! {
|
||||
|
@ -489,7 +489,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
) -> Result<&[usize], Error> {
|
||||
let cache = self.committee_cache(relative_epoch)?;
|
||||
|
||||
Ok(&cache.active_validator_indices())
|
||||
Ok(cache.active_validator_indices())
|
||||
}
|
||||
|
||||
/// Returns the active validator indices for the given epoch.
|
||||
@ -770,7 +770,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
.pubkeys
|
||||
.iter()
|
||||
.map(|pubkey| {
|
||||
self.get_validator_index(&pubkey)?
|
||||
self.get_validator_index(pubkey)?
|
||||
.ok_or(Error::PubkeyCacheInconsistent)
|
||||
})
|
||||
.collect()
|
||||
@ -1326,7 +1326,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
epoch: Epoch,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<CommitteeCache, Error> {
|
||||
CommitteeCache::initialized(&self, epoch, spec)
|
||||
CommitteeCache::initialized(self, epoch, spec)
|
||||
}
|
||||
|
||||
/// Advances the cache for this state into the next epoch.
|
||||
@ -1438,7 +1438,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
if let Some(mut cache) = cache {
|
||||
// Note: we return early if the tree hash fails, leaving `self.tree_hash_cache` as
|
||||
// None. There's no need to keep a cache that fails.
|
||||
let root = cache.recalculate_tree_hash_root(&self)?;
|
||||
let root = cache.recalculate_tree_hash_root(self)?;
|
||||
self.tree_hash_cache_mut().restore(cache);
|
||||
Ok(root)
|
||||
} else {
|
||||
|
@ -67,13 +67,13 @@ fn initializes_with_the_right_epoch() {
|
||||
let cache = CommitteeCache::default();
|
||||
assert!(!cache.is_initialized_at(state.current_epoch()));
|
||||
|
||||
let cache = CommitteeCache::initialized(&state, state.current_epoch(), &spec).unwrap();
|
||||
let cache = CommitteeCache::initialized(&state, state.current_epoch(), spec).unwrap();
|
||||
assert!(cache.is_initialized_at(state.current_epoch()));
|
||||
|
||||
let cache = CommitteeCache::initialized(&state, state.previous_epoch(), &spec).unwrap();
|
||||
let cache = CommitteeCache::initialized(&state, state.previous_epoch(), spec).unwrap();
|
||||
assert!(cache.is_initialized_at(state.previous_epoch()));
|
||||
|
||||
let cache = CommitteeCache::initialized(&state, state.next_epoch().unwrap(), &spec).unwrap();
|
||||
let cache = CommitteeCache::initialized(&state, state.next_epoch().unwrap(), spec).unwrap();
|
||||
assert!(cache.is_initialized_at(state.next_epoch().unwrap()));
|
||||
}
|
||||
|
||||
|
@ -59,7 +59,7 @@ fn test_beacon_proposer_index<T: EthSpec>() {
|
||||
// Get the i'th candidate proposer for the given state and slot
|
||||
let ith_candidate = |state: &BeaconState<T>, slot: Slot, i: usize, spec: &ChainSpec| {
|
||||
let epoch = slot.epoch(T::slots_per_epoch());
|
||||
let seed = state.get_beacon_proposer_seed(slot, &spec).unwrap();
|
||||
let seed = state.get_beacon_proposer_seed(slot, spec).unwrap();
|
||||
let active_validators = state.get_active_validator_indices(epoch, spec).unwrap();
|
||||
active_validators[compute_shuffled_index(
|
||||
i,
|
||||
@ -338,7 +338,7 @@ mod committees {
|
||||
new_head_state,
|
||||
cache_epoch,
|
||||
validator_count as usize,
|
||||
&spec,
|
||||
spec,
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -247,7 +247,7 @@ impl<T: EthSpec> BeaconTreeHashCacheInner<T> {
|
||||
hasher.write(state.eth1_data().tree_hash_root().as_bytes())?;
|
||||
hasher.write(
|
||||
self.eth1_data_votes
|
||||
.recalculate_tree_hash_root(&state)?
|
||||
.recalculate_tree_hash_root(state)?
|
||||
.as_bytes(),
|
||||
)?;
|
||||
hasher.write(state.eth1_deposit_index().tree_hash_root().as_bytes())?;
|
||||
|
@ -84,7 +84,7 @@ impl Into<Graffiti> for GraffitiString {
|
||||
graffiti
|
||||
.get_mut(..graffiti_len)
|
||||
.expect("graffiti_len <= GRAFFITI_BYTES_LEN")
|
||||
.copy_from_slice(&graffiti_bytes);
|
||||
.copy_from_slice(graffiti_bytes);
|
||||
graffiti.into()
|
||||
}
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ impl<'a, N: Unsigned> CachedTreeHash<TreeHashCache> for ParticipationList<'a, N>
|
||||
cache: &mut TreeHashCache,
|
||||
) -> Result<Hash256, Error> {
|
||||
Ok(mix_in_length(
|
||||
&cache.recalculate_merkle_root(arena, leaf_iter(&self.inner))?,
|
||||
&cache.recalculate_merkle_root(arena, leaf_iter(self.inner))?,
|
||||
self.inner.len(),
|
||||
))
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ pub struct SubnetId(#[serde(with = "serde_utils::quoted_u64")] u64);
|
||||
|
||||
pub fn subnet_id_to_string(i: u64) -> &'static str {
|
||||
if i < MAX_SUBNET_ID as u64 {
|
||||
&SUBNET_ID_TO_STRING
|
||||
SUBNET_ID_TO_STRING
|
||||
.get(i as usize)
|
||||
.expect("index below MAX_SUBNET_ID")
|
||||
} else {
|
||||
|
@ -21,7 +21,7 @@ pub struct SyncSubnetId(#[serde(with = "serde_utils::quoted_u64")] u64);
|
||||
|
||||
pub fn sync_subnet_id_to_string(i: u64) -> &'static str {
|
||||
if i < SYNC_COMMITTEE_SUBNET_COUNT {
|
||||
&SYNC_SUBNET_ID_TO_STRING
|
||||
SYNC_SUBNET_ID_TO_STRING
|
||||
.get(i as usize)
|
||||
.expect("index below SYNC_COMMITTEE_SUBNET_COUNT")
|
||||
} else {
|
||||
|
@ -77,7 +77,7 @@ fn process_pubkey_bytes_field(
|
||||
|
||||
fn process_slice_field(new_tree_hash: &[u8], leaf: &mut Hash256, force_update: bool) -> bool {
|
||||
if force_update || leaf.as_bytes() != new_tree_hash {
|
||||
leaf.assign_from_slice(&new_tree_hash);
|
||||
leaf.assign_from_slice(new_tree_hash);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
|
@ -221,7 +221,7 @@ where
|
||||
{
|
||||
fn from(sig: &GenericSignature<Pub, Sig>) -> Self {
|
||||
let mut agg = Self::infinity();
|
||||
agg.add_assign(&sig);
|
||||
agg.add_assign(sig);
|
||||
agg
|
||||
}
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ impl TPublicKey for blst_core::PublicKey {
|
||||
expected: PUBLIC_KEY_BYTES_LEN,
|
||||
});
|
||||
}
|
||||
Self::key_validate(&bytes).map_err(Into::into)
|
||||
Self::key_validate(bytes).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
@ -278,6 +278,6 @@ impl TSecretKey<blst_core::Signature, blst_core::PublicKey> for blst_core::Secre
|
||||
}
|
||||
|
||||
fn deserialize(bytes: &[u8]) -> Result<Self, Error> {
|
||||
Self::from_bytes(&bytes).map_err(Into::into)
|
||||
Self::from_bytes(bytes).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
@ -146,7 +146,7 @@ impl TAggregateSignature<PublicKey, AggregatePublicKey, Signature> for Aggregate
|
||||
fn deserialize(bytes: &[u8]) -> Result<Self, Error> {
|
||||
let mut key = [0; SIGNATURE_BYTES_LEN];
|
||||
|
||||
key[..].copy_from_slice(&bytes);
|
||||
key[..].copy_from_slice(bytes);
|
||||
|
||||
Ok(Self(key))
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ impl TPublicKey for milagro::PublicKey {
|
||||
}
|
||||
|
||||
fn deserialize(bytes: &[u8]) -> Result<Self, Error> {
|
||||
Self::from_bytes(&bytes).map_err(Into::into)
|
||||
Self::from_bytes(bytes).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
@ -189,6 +189,6 @@ impl TSecretKey<milagro::Signature, milagro::PublicKey> for milagro::SecretKey {
|
||||
}
|
||||
|
||||
fn deserialize(bytes: &[u8]) -> Result<Self, Error> {
|
||||
Self::from_bytes(&bytes).map_err(Into::into)
|
||||
Self::from_bytes(bytes).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ fn mod_r(bytes: &[u8]) -> ZeroizeHash {
|
||||
debug_assert!(x_slice.len() <= HASH_SIZE);
|
||||
|
||||
let mut output = ZeroizeHash::zero();
|
||||
output.as_mut_bytes()[HASH_SIZE - x_slice.len()..].copy_from_slice(&x_slice);
|
||||
output.as_mut_bytes()[HASH_SIZE - x_slice.len()..].copy_from_slice(x_slice);
|
||||
output
|
||||
}
|
||||
|
||||
|
@ -377,7 +377,7 @@ pub fn encrypt(
|
||||
|
||||
password.retain(|c| !is_control_character(c));
|
||||
|
||||
let derived_key = derive_key(&password.as_ref(), &kdf)?;
|
||||
let derived_key = derive_key(password.as_ref(), kdf)?;
|
||||
|
||||
// Encrypt secret.
|
||||
let mut cipher_text = plain_text.to_vec();
|
||||
@ -389,7 +389,7 @@ pub fn encrypt(
|
||||
// AES Encrypt
|
||||
let key = GenericArray::from_slice(&derived_key.as_bytes()[0..16]);
|
||||
let nonce = GenericArray::from_slice(params.iv.as_bytes());
|
||||
let mut cipher = AesCtr::new(&key, &nonce);
|
||||
let mut cipher = AesCtr::new(key, nonce);
|
||||
cipher.apply_keystream(&mut cipher_text);
|
||||
}
|
||||
};
|
||||
@ -435,7 +435,7 @@ pub fn decrypt(password: &[u8], crypto: &Crypto) -> Result<PlainText, Error> {
|
||||
// AES Decrypt
|
||||
let key = GenericArray::from_slice(&derived_key.as_bytes()[0..16]);
|
||||
let nonce = GenericArray::from_slice(params.iv.as_bytes());
|
||||
let mut cipher = AesCtr::new(&key, &nonce);
|
||||
let mut cipher = AesCtr::new(key, nonce);
|
||||
cipher.apply_keystream(plain_text.as_mut_bytes());
|
||||
}
|
||||
};
|
||||
|
@ -41,7 +41,7 @@ fn scrypt_reference() {
|
||||
}
|
||||
"#;
|
||||
|
||||
assert!(Keystore::from_json_str(&vector).is_ok());
|
||||
assert!(Keystore::from_json_str(vector).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -79,7 +79,7 @@ fn pbkdf2_reference() {
|
||||
}
|
||||
"#;
|
||||
|
||||
assert!(Keystore::from_json_str(&vector).is_ok());
|
||||
assert!(Keystore::from_json_str(vector).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -119,7 +119,7 @@ fn additional_top_level_key() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -162,7 +162,7 @@ fn additional_cipher_key() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -205,7 +205,7 @@ fn additional_checksum_key() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -248,7 +248,7 @@ fn additional_kdf_key() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -291,7 +291,7 @@ fn additional_crypto_key() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -333,7 +333,7 @@ fn bad_version() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -377,7 +377,7 @@ fn json_bad_checksum() {
|
||||
"#;
|
||||
|
||||
assert_eq!(
|
||||
Keystore::from_json_str(&vector)
|
||||
Keystore::from_json_str(vector)
|
||||
.unwrap()
|
||||
.decrypt_keypair("testpassword".as_bytes())
|
||||
.err()
|
||||
@ -422,7 +422,7 @@ fn kdf_function() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -463,7 +463,7 @@ fn missing_scrypt_param() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -506,7 +506,7 @@ fn additional_scrypt_param() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -548,7 +548,7 @@ fn checksum_function() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -592,7 +592,7 @@ fn checksum_params() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -634,7 +634,7 @@ fn kdf_message() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -676,7 +676,7 @@ fn cipher_function() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -719,7 +719,7 @@ fn additional_cipher_param() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -759,7 +759,7 @@ fn missing_cipher_param() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -800,7 +800,7 @@ fn missing_pubkey() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -841,7 +841,7 @@ fn missing_path() {
|
||||
}
|
||||
"#;
|
||||
|
||||
assert!(Keystore::from_json_str(&vector).is_ok());
|
||||
assert!(Keystore::from_json_str(vector).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -879,7 +879,7 @@ fn missing_version() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -920,7 +920,7 @@ fn pbkdf2_bad_hmac() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -962,7 +962,7 @@ fn pbkdf2_additional_parameter() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -1002,7 +1002,7 @@ fn pbkdf2_missing_parameter() {
|
||||
}
|
||||
"#;
|
||||
|
||||
match Keystore::from_json_str(&vector) {
|
||||
match Keystore::from_json_str(vector) {
|
||||
Err(Error::InvalidJson(_)) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -1045,5 +1045,5 @@ fn name_field() {
|
||||
}
|
||||
"#;
|
||||
|
||||
assert!(Keystore::from_json_str(&vector).is_ok());
|
||||
assert!(Keystore::from_json_str(vector).is_ok());
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ use eth2_keystore::{Error, Keystore};
|
||||
const PASSWORD: &str = "testpassword";
|
||||
|
||||
fn decrypt_error(vector: &str) -> Error {
|
||||
Keystore::from_json_str(&vector)
|
||||
Keystore::from_json_str(vector)
|
||||
.unwrap()
|
||||
.decrypt_keypair(PASSWORD.as_bytes())
|
||||
.err()
|
||||
|
@ -138,7 +138,7 @@ impl Wallet {
|
||||
name: String,
|
||||
nextaccount: u32,
|
||||
) -> Result<Self, Error> {
|
||||
let (cipher_text, checksum) = encrypt(&seed, &password, &kdf, &cipher)?;
|
||||
let (cipher_text, checksum) = encrypt(seed, password, &kdf, &cipher)?;
|
||||
|
||||
Ok(Self {
|
||||
json: JsonWallet {
|
||||
@ -192,7 +192,7 @@ impl Wallet {
|
||||
// incrementing `nextaccount`.
|
||||
let derive = |key_type: KeyType, password: &[u8]| -> Result<Keystore, Error> {
|
||||
let (secret, path) =
|
||||
recover_validator_secret(&self, wallet_password, self.json.nextaccount, key_type)?;
|
||||
recover_validator_secret(self, wallet_password, self.json.nextaccount, key_type)?;
|
||||
|
||||
let keypair = keypair_from_secret(secret.as_bytes())?;
|
||||
|
||||
|
@ -48,7 +48,7 @@ fn eip2386_test_vector_scrypt() {
|
||||
}
|
||||
"#;
|
||||
|
||||
let wallet = decode_and_check_seed(&vector);
|
||||
let wallet = decode_and_check_seed(vector);
|
||||
assert_eq!(
|
||||
*wallet.uuid(),
|
||||
Uuid::parse_str("b74559b8-ed56-4841-b25c-dba1b7c9d9d5").unwrap(),
|
||||
|
@ -1,7 +1,7 @@
|
||||
use eth2_wallet::{Error, KeystoreError, Wallet};
|
||||
|
||||
fn assert_bad_json(json: &str) {
|
||||
match Wallet::from_json_str(&json) {
|
||||
match Wallet::from_json_str(json) {
|
||||
Err(Error::KeystoreError(KeystoreError::InvalidJson(_))) => {}
|
||||
_ => panic!("expected invalid json error"),
|
||||
}
|
||||
@ -48,7 +48,7 @@ fn additional_top_level_param() {
|
||||
}
|
||||
"#;
|
||||
|
||||
assert_bad_json(&vector);
|
||||
assert_bad_json(vector);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -86,7 +86,7 @@ fn missing_top_level_param() {
|
||||
}
|
||||
"#;
|
||||
|
||||
assert_bad_json(&vector);
|
||||
assert_bad_json(vector);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -125,7 +125,7 @@ fn bad_version() {
|
||||
}
|
||||
"#;
|
||||
|
||||
assert_bad_json(&vector);
|
||||
assert_bad_json(vector);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -164,7 +164,7 @@ fn bad_uuid() {
|
||||
}
|
||||
"#;
|
||||
|
||||
assert_bad_json(&vector);
|
||||
assert_bad_json(vector);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -203,7 +203,7 @@ fn bad_type() {
|
||||
}
|
||||
"#;
|
||||
|
||||
assert_bad_json(&vector);
|
||||
assert_bad_json(vector);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -242,5 +242,5 @@ fn more_that_u32_nextaccount() {
|
||||
}
|
||||
"#;
|
||||
|
||||
assert_bad_json(&vector);
|
||||
assert_bad_json(vector);
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
|
||||
let mut enr_file = File::create(output_dir.join(ENR_FILENAME))
|
||||
.map_err(|e| format!("Unable to create {}: {:?}", ENR_FILENAME, e))?;
|
||||
enr_file
|
||||
.write_all(&enr.to_base64().as_bytes())
|
||||
.write_all(enr.to_base64().as_bytes())
|
||||
.map_err(|e| format!("Unable to write ENR to {}: {:?}", ENR_FILENAME, e))?;
|
||||
|
||||
let secret_bytes = match local_keypair {
|
||||
|
@ -30,7 +30,7 @@ pub fn run_parse_ssz<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
|
||||
}
|
||||
|
||||
fn decode_and_print<T: Decode + Serialize>(bytes: &[u8]) -> Result<(), String> {
|
||||
let item = T::from_ssz_bytes(&bytes).map_err(|e| format!("SSZ decode failed: {:?}", e))?;
|
||||
let item = T::from_ssz_bytes(bytes).map_err(|e| format!("SSZ decode failed: {:?}", e))?;
|
||||
|
||||
println!(
|
||||
"{}",
|
||||
|
@ -364,7 +364,7 @@ fn run<E: EthSpec>(
|
||||
let context = environment.core_context();
|
||||
let log = context.log().clone();
|
||||
let executor = context.executor.clone();
|
||||
let config = validator_client::Config::from_cli(&matches, context.log())
|
||||
let config = validator_client::Config::from_cli(matches, context.log())
|
||||
.map_err(|e| format!("Unable to initialize validator config: {}", e))?;
|
||||
let shutdown_flag = matches.is_present("immediate-shutdown");
|
||||
if let Some(dump_path) = clap_utils::parse_optional::<PathBuf>(matches, "dump-config")?
|
||||
|
@ -73,7 +73,7 @@ mod object {
|
||||
#[test]
|
||||
fn v_u8_zeroized() {
|
||||
// Create from `hex_string_to_bytes`, and record the pointer to its buffer.
|
||||
let mut decoded_bytes = hex_string_to_bytes(&SECRET_KEY_1.to_string()).unwrap();
|
||||
let mut decoded_bytes = hex_string_to_bytes(SECRET_KEY_1).unwrap();
|
||||
let old_pointer = decoded_bytes.as_ptr() as usize;
|
||||
|
||||
// Do something with the borrowed vector, and zeroize.
|
||||
@ -185,17 +185,17 @@ mod functions {
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
hex_string_to_bytes(&SECRET_KEY_1).unwrap(),
|
||||
hex_string_to_bytes(SECRET_KEY_1).unwrap(),
|
||||
SECRET_KEY_1_BYTES
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
hex_string_to_bytes(&PUBLIC_KEY_1).unwrap(),
|
||||
hex_string_to_bytes(PUBLIC_KEY_1).unwrap(),
|
||||
PUBLIC_KEY_1_BYTES.to_vec()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
hex_string_to_bytes(&SIGNING_ROOT).unwrap(),
|
||||
hex_string_to_bytes(SIGNING_ROOT).unwrap(),
|
||||
SIGNING_ROOT_BYTES.to_vec()
|
||||
);
|
||||
|
||||
|
@ -214,7 +214,7 @@ impl<T: BeaconChainTypes> SlasherService<T> {
|
||||
// Publish to the network if broadcast is enabled.
|
||||
if slasher.config().broadcast {
|
||||
if let Err(e) =
|
||||
Self::publish_attester_slashing(&beacon_chain, &network_sender, slashing)
|
||||
Self::publish_attester_slashing(beacon_chain, network_sender, slashing)
|
||||
{
|
||||
debug!(
|
||||
log,
|
||||
@ -267,7 +267,7 @@ impl<T: BeaconChainTypes> SlasherService<T> {
|
||||
|
||||
if slasher.config().broadcast {
|
||||
if let Err(e) =
|
||||
Self::publish_proposer_slashing(&beacon_chain, &network_sender, slashing)
|
||||
Self::publish_proposer_slashing(beacon_chain, network_sender, slashing)
|
||||
{
|
||||
debug!(
|
||||
log,
|
||||
|
@ -244,7 +244,7 @@ impl<E: EthSpec> Slasher<E> {
|
||||
let slashing_status = self.db.check_and_update_attester_record(
|
||||
txn,
|
||||
validator_index,
|
||||
&attestation,
|
||||
attestation,
|
||||
attester_record,
|
||||
)?;
|
||||
|
||||
|
@ -41,7 +41,7 @@ pub fn compare_beacon_state_results_without_caches<T: EthSpec, E: Debug>(
|
||||
expected.drop_all_caches().unwrap();
|
||||
}
|
||||
|
||||
compare_result_detailed(&result, &expected)
|
||||
compare_result_detailed(result, expected)
|
||||
}
|
||||
|
||||
/// Same as `compare_result`, however utilizes the `CompareFields` trait to give a list of
|
||||
|
@ -129,8 +129,8 @@ impl<E: EthSpec> EpochTransition<E> for Slashings {
|
||||
fn run(state: &mut BeaconState<E>, spec: &ChainSpec) -> Result<(), EpochProcessingError> {
|
||||
match state {
|
||||
BeaconState::Base(_) => {
|
||||
let mut validator_statuses = base::ValidatorStatuses::new(&state, spec)?;
|
||||
validator_statuses.process_attestations(&state)?;
|
||||
let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?;
|
||||
validator_statuses.process_attestations(state)?;
|
||||
process_slashings(
|
||||
state,
|
||||
validator_statuses.total_balances.current_epoch(),
|
||||
|
@ -53,7 +53,7 @@ type Accessor = fn(&AttestationDelta) -> Δ
|
||||
|
||||
fn load_optional_deltas_file(path: &Path) -> Result<Option<Deltas>, Error> {
|
||||
let deltas = if path.is_file() {
|
||||
Some(ssz_decode_file(&path)?)
|
||||
Some(ssz_decode_file(path)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
@ -11,7 +11,7 @@ pub fn assert_tests_pass(handler_name: &str, path: &Path, results: &[CaseResult]
|
||||
&failed,
|
||||
&skipped_bls,
|
||||
&skipped_known_failures,
|
||||
&results,
|
||||
results,
|
||||
);
|
||||
if !failed.is_empty() {
|
||||
panic!("Tests failed (see above)");
|
||||
|
@ -93,7 +93,7 @@ impl SlashingDatabase {
|
||||
|
||||
/// Open an existing `SlashingDatabase` from disk.
|
||||
pub fn open(path: &Path) -> Result<Self, NotSafe> {
|
||||
let conn_pool = Self::open_conn_pool(&path)?;
|
||||
let conn_pool = Self::open_conn_pool(path)?;
|
||||
Ok(Self { conn_pool })
|
||||
}
|
||||
|
||||
@ -159,7 +159,7 @@ impl SlashingDatabase {
|
||||
) -> Result<(), NotSafe> {
|
||||
let mut stmt = txn.prepare("INSERT INTO validators (public_key) VALUES (?1)")?;
|
||||
for pubkey in public_keys {
|
||||
if self.get_validator_id_opt(&txn, pubkey)?.is_none() {
|
||||
if self.get_validator_id_opt(txn, pubkey)?.is_none() {
|
||||
stmt.execute(&[pubkey.as_hex_string()])?;
|
||||
}
|
||||
}
|
||||
@ -481,10 +481,10 @@ impl SlashingDatabase {
|
||||
signing_root: SigningRoot,
|
||||
txn: &Transaction,
|
||||
) -> Result<Safe, NotSafe> {
|
||||
let safe = self.check_block_proposal(&txn, validator_pubkey, slot, signing_root)?;
|
||||
let safe = self.check_block_proposal(txn, validator_pubkey, slot, signing_root)?;
|
||||
|
||||
if safe != Safe::SameData {
|
||||
self.insert_block_proposal(&txn, validator_pubkey, slot, signing_root)?;
|
||||
self.insert_block_proposal(txn, validator_pubkey, slot, signing_root)?;
|
||||
}
|
||||
Ok(safe)
|
||||
}
|
||||
@ -541,7 +541,7 @@ impl SlashingDatabase {
|
||||
txn: &Transaction,
|
||||
) -> Result<Safe, NotSafe> {
|
||||
let safe = self.check_attestation(
|
||||
&txn,
|
||||
txn,
|
||||
validator_pubkey,
|
||||
att_source_epoch,
|
||||
att_target_epoch,
|
||||
@ -550,7 +550,7 @@ impl SlashingDatabase {
|
||||
|
||||
if safe != Safe::SameData {
|
||||
self.insert_attestation(
|
||||
&txn,
|
||||
txn,
|
||||
validator_pubkey,
|
||||
att_source_epoch,
|
||||
att_target_epoch,
|
||||
@ -695,7 +695,7 @@ impl SlashingDatabase {
|
||||
.query_and_then(params![], |row| {
|
||||
let validator_pubkey: String = row.get(0)?;
|
||||
let slot = row.get(1)?;
|
||||
let signing_root = Some(hash256_from_row(2, &row)?);
|
||||
let signing_root = Some(hash256_from_row(2, row)?);
|
||||
let signed_block = InterchangeBlock { slot, signing_root };
|
||||
data.entry(validator_pubkey)
|
||||
.or_insert_with(|| (vec![], vec![]))
|
||||
@ -715,7 +715,7 @@ impl SlashingDatabase {
|
||||
let validator_pubkey: String = row.get(0)?;
|
||||
let source_epoch = row.get(1)?;
|
||||
let target_epoch = row.get(2)?;
|
||||
let signing_root = Some(hash256_from_row(3, &row)?);
|
||||
let signing_root = Some(hash256_from_row(3, row)?);
|
||||
let signed_attestation = InterchangeAttestation {
|
||||
source_epoch,
|
||||
target_epoch,
|
||||
|
@ -183,7 +183,7 @@ impl Config {
|
||||
// Copy the provided bytes over.
|
||||
//
|
||||
// Panic-free because `graffiti_bytes.len()` <= `GRAFFITI_BYTES_LEN`.
|
||||
graffiti[..graffiti_bytes.len()].copy_from_slice(&graffiti_bytes);
|
||||
graffiti[..graffiti_bytes.len()].copy_from_slice(graffiti_bytes);
|
||||
|
||||
config.graffiti = Some(graffiti.into());
|
||||
}
|
||||
|
@ -378,7 +378,7 @@ async fn poll_beacon_attesters<T: SlotClock + 'static, E: EthSpec>(
|
||||
|
||||
// Download the duties and update the duties for the current epoch.
|
||||
if let Err(e) = poll_beacon_attesters_for_epoch(
|
||||
&duties_service,
|
||||
duties_service,
|
||||
current_epoch,
|
||||
&local_indices,
|
||||
&local_pubkeys,
|
||||
@ -402,7 +402,7 @@ async fn poll_beacon_attesters<T: SlotClock + 'static, E: EthSpec>(
|
||||
|
||||
// Download the duties and update the duties for the next epoch.
|
||||
if let Err(e) =
|
||||
poll_beacon_attesters_for_epoch(&duties_service, next_epoch, &local_indices, &local_pubkeys)
|
||||
poll_beacon_attesters_for_epoch(duties_service, next_epoch, &local_indices, &local_pubkeys)
|
||||
.await
|
||||
{
|
||||
error!(
|
||||
@ -431,7 +431,7 @@ async fn poll_beacon_attesters<T: SlotClock + 'static, E: EthSpec>(
|
||||
.attesters
|
||||
.read()
|
||||
.iter()
|
||||
.filter_map(|(_, map)| map.get(&epoch))
|
||||
.filter_map(|(_, map)| map.get(epoch))
|
||||
// The BN logs a warning if we try and subscribe to current or near-by slots. Give it a
|
||||
// buffer.
|
||||
.filter(|(_, duty_and_proof)| {
|
||||
@ -636,7 +636,7 @@ async fn poll_beacon_proposers<T: SlotClock + 'static, E: EthSpec>(
|
||||
current_slot,
|
||||
&initial_block_proposers,
|
||||
block_service_tx,
|
||||
&log,
|
||||
log,
|
||||
)
|
||||
.await;
|
||||
|
||||
@ -723,7 +723,7 @@ async fn poll_beacon_proposers<T: SlotClock + 'static, E: EthSpec>(
|
||||
current_slot,
|
||||
&additional_block_producers,
|
||||
block_service_tx,
|
||||
&log,
|
||||
log,
|
||||
)
|
||||
.await;
|
||||
debug!(
|
||||
|
@ -9,6 +9,7 @@ use bls::PublicKeyBytes;
|
||||
use types::{graffiti::GraffitiString, Graffiti};
|
||||
|
||||
#[derive(Debug)]
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
pub enum Error {
|
||||
InvalidFile(std::io::Error),
|
||||
InvalidLine(String),
|
||||
@ -91,7 +92,7 @@ fn read_line(line: &str) -> Result<(Option<PublicKeyBytes>, Graffiti), Error> {
|
||||
if key == "default" {
|
||||
Ok((None, graffiti))
|
||||
} else {
|
||||
let pk = PublicKeyBytes::from_str(&key).map_err(Error::InvalidPublicKey)?;
|
||||
let pk = PublicKeyBytes::from_str(key).map_err(Error::InvalidPublicKey)?;
|
||||
Ok((Some(pk), graffiti))
|
||||
}
|
||||
} else {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user