From 50ef0d7fbf28001021bf65362a0b9dfa54fafcda Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 20 Apr 2020 12:34:37 +1000 Subject: [PATCH 1/4] Check attestation shuffling when producing blocks (#900) Closes #845 --- beacon_node/beacon_chain/src/beacon_chain.rs | 88 ++++- beacon_node/beacon_chain/src/test_utils.rs | 4 +- .../tests/attestation_production.rs | 4 +- beacon_node/beacon_chain/tests/store_tests.rs | 305 +++++++++++++++++- eth2/operation_pool/src/lib.rs | 15 +- .../src/proto_array.rs | 37 +++ 6 files changed, 429 insertions(+), 24 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 2d1ee5c8a..8f4ef7c64 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -25,6 +25,7 @@ use state_processing::{ }; use std::borrow::Cow; use std::cmp::Ordering; +use std::collections::HashMap; use std::fs; use std::io::prelude::*; use std::sync::Arc; @@ -1112,6 +1113,76 @@ impl BeaconChain { } } + /// Check that the shuffling at `block_root` is equal to one of the shufflings of `state`. + /// + /// The `target_epoch` argument determines which shuffling to check compatibility with, it + /// should be equal to the current or previous epoch of `state`, or else `false` will be + /// returned. + /// + /// The compatibility check is designed to be fast: we check that the block that + /// determined the RANDAO mix for the `target_epoch` matches the ancestor of the block + /// identified by `block_root` (at that slot). + pub fn shuffling_is_compatible( + &self, + block_root: &Hash256, + target_epoch: Epoch, + state: &BeaconState, + ) -> bool { + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + let shuffling_lookahead = 1 + self.spec.min_seed_lookahead.as_u64(); + + // Shuffling can't have changed if we're in the first few epochs + if state.current_epoch() < shuffling_lookahead { + return true; + } + + // Otherwise the shuffling is determined by the block at the end of the target epoch + // minus the shuffling lookahead (usually 2). We call this the "pivot". + let pivot_slot = + if target_epoch == state.previous_epoch() || target_epoch == state.current_epoch() { + (target_epoch - shuffling_lookahead).end_slot(slots_per_epoch) + } else { + return false; + }; + + let state_pivot_block_root = match state.get_block_root(pivot_slot) { + Ok(root) => *root, + Err(e) => { + warn!( + &self.log, + "Missing pivot block root for attestation"; + "slot" => pivot_slot, + "error" => format!("{:?}", e), + ); + return false; + } + }; + + // Use fork choice's view of the block DAG to quickly evaluate whether the attestation's + // pivot block is the same as the current state's pivot block. If it is, then the + // attestation's shuffling is the same as the current state's. + // To account for skipped slots, find the first block at *or before* the pivot slot. + let fork_choice_lock = self.fork_choice.core_proto_array(); + let pivot_block_root = fork_choice_lock + .iter_block_roots(block_root) + .find(|(_, slot)| *slot <= pivot_slot) + .map(|(block_root, _)| block_root); + drop(fork_choice_lock); + + match pivot_block_root { + Some(root) => root == state_pivot_block_root, + None => { + debug!( + &self.log, + "Discarding attestation because of missing ancestor"; + "pivot_slot" => pivot_slot.as_u64(), + "block_root" => format!("{:?}", block_root), + ); + false + } + } + } + /// Accept some exit and queue it for inclusion in an appropriate block. pub fn process_voluntary_exit( &self, @@ -1638,6 +1709,21 @@ impl BeaconChain { .deposits_for_block_inclusion(&state, ð1_data, &self.spec)? .into(); + // Map from attestation head block root to shuffling compatibility. + // Used to memoize the `attestation_shuffling_is_compatible` function. + let mut shuffling_filter_cache = HashMap::new(); + let attestation_filter = |att: &&Attestation| -> bool { + *shuffling_filter_cache + .entry((att.data.beacon_block_root, att.data.target.epoch)) + .or_insert_with(|| { + self.shuffling_is_compatible( + &att.data.beacon_block_root, + att.data.target.epoch, + &state, + ) + }) + }; + let mut block = SignedBeaconBlock { message: BeaconBlock { slot: state.slot, @@ -1652,7 +1738,7 @@ impl BeaconChain { attester_slashings: attester_slashings.into(), attestations: self .op_pool - .get_attestations(&state, &self.spec) + .get_attestations(&state, attestation_filter, &self.spec) .map_err(BlockProductionError::OpPoolError)? .into(), deposits, diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index d9d15b7b9..ad957c874 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -353,7 +353,9 @@ where .process_attestation(attestation) .expect("should not error during attestation processing") { - AttestationProcessingOutcome::Processed => (), + // PastEpoch can occur if we fork over several epochs + AttestationProcessingOutcome::Processed + | AttestationProcessingOutcome::PastEpoch { .. } => (), other => panic!("did not successfully process attestation: {:?}", other), } }); diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 3b84a78ff..5a305e7cc 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -40,8 +40,8 @@ fn produces_attestations() { let state = &harness.chain.head().expect("should get head").beacon_state; assert_eq!(state.slot, num_blocks_produced, "head should have updated"); - assert!( - state.finalized_checkpoint.epoch > 0, + assert_ne!( + state.finalized_checkpoint.epoch, 0, "head should have updated" ); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index ea36a0125..c4dd0b58c 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -20,11 +20,12 @@ use types::test_utils::{SeedableRng, XorShiftRng}; use types::*; // Should ideally be divisible by 3. -pub const VALIDATOR_COUNT: usize = 24; +pub const LOW_VALIDATOR_COUNT: usize = 24; +pub const HIGH_VALIDATOR_COUNT: usize = 64; lazy_static! { /// A cached set of keys. - static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); + static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(HIGH_VALIDATOR_COUNT); } type E = MinimalEthSpec; @@ -57,7 +58,7 @@ fn full_participation_no_skips() { let num_blocks_produced = E::slots_per_epoch() * 5; let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), VALIDATOR_COUNT); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); harness.extend_chain( num_blocks_produced as usize, @@ -77,7 +78,7 @@ fn randomised_skips() { let mut num_blocks_produced = 0; let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), VALIDATOR_COUNT); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let rng = &mut XorShiftRng::from_seed([42; 16]); let mut head_slot = 0; @@ -113,14 +114,16 @@ fn randomised_skips() { fn long_skip() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), VALIDATOR_COUNT); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); // Number of blocks to create in the first run, intentionally not falling on an epoch // boundary in order to check that the DB hot -> cold migration is capable of reaching // back across the skip distance, and correctly migrating those extra non-finalized states. let initial_blocks = E::slots_per_epoch() * 5 + E::slots_per_epoch() / 2; let skip_slots = E::slots_per_historical_root() as u64 * 8; - let final_blocks = E::slots_per_epoch() * 4; + // Create the minimum ~2.5 epochs of extra blocks required to re-finalize the chain. + // Having this set lower ensures that we start justifying and finalizing quickly after a skip. + let final_blocks = 2 * E::slots_per_epoch() + E::slots_per_epoch() / 2; harness.extend_chain( initial_blocks as usize, @@ -223,7 +226,7 @@ fn split_slot_restore() { let split_slot = { let store = get_store(&db_path); - let harness = get_harness(store.clone(), VALIDATOR_COUNT); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let num_blocks = 4 * E::slots_per_epoch(); @@ -251,10 +254,10 @@ fn epoch_boundary_state_attestation_processing() { let num_blocks_produced = E::slots_per_epoch() * 5; let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), VALIDATOR_COUNT); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let late_validators = vec![0, 1]; - let timely_validators = (2..VALIDATOR_COUNT).collect::>(); + let timely_validators = (2..LOW_VALIDATOR_COUNT).collect::>(); let mut late_attestations = vec![]; @@ -333,7 +336,7 @@ fn epoch_boundary_state_attestation_processing() { fn delete_blocks_and_states() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), VALIDATOR_COUNT); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let unforked_blocks = 4 * E::slots_per_epoch(); @@ -345,13 +348,11 @@ fn delete_blocks_and_states() { ); // Create a fork post-finalization. - let two_thirds = (VALIDATOR_COUNT / 3) * 2; + let two_thirds = (LOW_VALIDATOR_COUNT / 3) * 2; let honest_validators: Vec = (0..two_thirds).collect(); - let faulty_validators: Vec = (two_thirds..VALIDATOR_COUNT).collect(); + let faulty_validators: Vec = (two_thirds..LOW_VALIDATOR_COUNT).collect(); - // NOTE: should remove this -1 and/or write a similar test once #845 is resolved - // https://github.com/sigp/lighthouse/issues/845 - let fork_blocks = 2 * E::slots_per_epoch() - 1; + let fork_blocks = 2 * E::slots_per_epoch(); let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block( &honest_validators, @@ -425,6 +426,280 @@ fn delete_blocks_and_states() { check_chain_dump(&harness, unforked_blocks + fork_blocks + 1); } +// Check that we never produce invalid blocks when there is deep forking that changes the shuffling. +// See https://github.com/sigp/lighthouse/issues/845 +fn multi_epoch_fork_valid_blocks_test( + initial_blocks: usize, + num_fork1_blocks: usize, + num_fork2_blocks: usize, + num_fork1_validators: usize, +) -> (TempDir, TestHarness, Hash256, Hash256) { + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + // Create the initial portion of the chain + if initial_blocks > 0 { + harness.extend_chain( + initial_blocks, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + } + + assert!(num_fork1_validators <= LOW_VALIDATOR_COUNT); + let fork1_validators: Vec = (0..num_fork1_validators).collect(); + let fork2_validators: Vec = (num_fork1_validators..LOW_VALIDATOR_COUNT).collect(); + + let (head1, head2) = harness.generate_two_forks_by_skipping_a_block( + &fork1_validators, + &fork2_validators, + num_fork1_blocks, + num_fork2_blocks, + ); + + (db_path, harness, head1, head2) +} + +// This is the minimal test of block production with different shufflings. +#[test] +fn block_production_different_shuffling_early() { + let slots_per_epoch = E::slots_per_epoch() as usize; + multi_epoch_fork_valid_blocks_test( + slots_per_epoch - 2, + slots_per_epoch + 3, + slots_per_epoch + 3, + LOW_VALIDATOR_COUNT / 2, + ); +} + +#[test] +fn block_production_different_shuffling_long() { + let slots_per_epoch = E::slots_per_epoch() as usize; + multi_epoch_fork_valid_blocks_test( + 2 * slots_per_epoch - 2, + 3 * slots_per_epoch, + 3 * slots_per_epoch, + LOW_VALIDATOR_COUNT / 2, + ); +} + +// Check that the op pool safely includes multiple attestations per block when necessary. +// This checks the correctness of the shuffling compatibility memoization. +#[test] +fn multiple_attestations_per_block() { + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store, HIGH_VALIDATOR_COUNT); + let chain = &harness.chain; + + harness.extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + let head = chain.head().unwrap(); + let committees_per_slot = head + .beacon_state + .get_committee_count_at_slot(head.beacon_state.slot) + .unwrap(); + assert!(committees_per_slot > 1); + + for snapshot in chain.chain_dump().unwrap() { + assert_eq!( + snapshot.beacon_block.message.body.attestations.len() as u64, + if snapshot.beacon_block.slot() <= 1 { + 0 + } else { + committees_per_slot + } + ); + } +} + +#[test] +fn shuffling_compatible_linear_chain() { + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + // Skip the block at the end of the first epoch. + let head_block_root = harness.extend_chain( + 4 * E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + check_shuffling_compatible( + &harness, + &get_state_for_block(&harness, head_block_root), + head_block_root, + true, + true, + None, + None, + ); +} + +#[test] +fn shuffling_compatible_missing_pivot_block() { + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + // Skip the block at the end of the first epoch. + harness.extend_chain( + E::slots_per_epoch() as usize - 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + harness.advance_slot(); + harness.advance_slot(); + let head_block_root = harness.extend_chain( + 2 * E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + check_shuffling_compatible( + &harness, + &get_state_for_block(&harness, head_block_root), + head_block_root, + true, + true, + Some(E::slots_per_epoch() - 2), + Some(E::slots_per_epoch() - 2), + ); +} + +#[test] +fn shuffling_compatible_simple_fork() { + let slots_per_epoch = E::slots_per_epoch() as usize; + let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test( + 2 * slots_per_epoch, + 3 * slots_per_epoch, + 3 * slots_per_epoch, + LOW_VALIDATOR_COUNT / 2, + ); + + let head1_state = get_state_for_block(&harness, head1); + let head2_state = get_state_for_block(&harness, head2); + + check_shuffling_compatible(&harness, &head1_state, head1, true, true, None, None); + check_shuffling_compatible(&harness, &head1_state, head2, false, false, None, None); + check_shuffling_compatible(&harness, &head2_state, head1, false, false, None, None); + check_shuffling_compatible(&harness, &head2_state, head2, true, true, None, None); + + drop(db_path); +} + +#[test] +fn shuffling_compatible_short_fork() { + let slots_per_epoch = E::slots_per_epoch() as usize; + let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test( + 2 * slots_per_epoch - 2, + slots_per_epoch + 2, + slots_per_epoch + 2, + LOW_VALIDATOR_COUNT / 2, + ); + + let head1_state = get_state_for_block(&harness, head1); + let head2_state = get_state_for_block(&harness, head2); + + check_shuffling_compatible(&harness, &head1_state, head1, true, true, None, None); + check_shuffling_compatible(&harness, &head1_state, head2, false, true, None, None); + // NOTE: don't check this case, as block 14 from the first chain appears valid on the second + // chain due to it matching the second chain's block 15. + // check_shuffling_compatible(&harness, &head2_state, head1, false, true, None, None); + check_shuffling_compatible( + &harness, + &head2_state, + head2, + true, + true, + // Required because of the skipped slot. + Some(2 * E::slots_per_epoch() - 2), + None, + ); + + drop(db_path); +} + +fn get_state_for_block(harness: &TestHarness, block_root: Hash256) -> BeaconState { + let head_block = harness.chain.get_block(&block_root).unwrap().unwrap(); + harness + .chain + .get_state(&head_block.state_root(), Some(head_block.slot())) + .unwrap() + .unwrap() +} + +/// Check the invariants that apply to `shuffling_is_compatible`. +fn check_shuffling_compatible( + harness: &TestHarness, + head_state: &BeaconState, + head_block_root: Hash256, + current_epoch_valid: bool, + previous_epoch_valid: bool, + current_epoch_cutoff_slot: Option, + previous_epoch_cutoff_slot: Option, +) { + let shuffling_lookahead = harness.chain.spec.min_seed_lookahead.as_u64() + 1; + let current_pivot_slot = + (head_state.current_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch()); + let previous_pivot_slot = + (head_state.previous_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch()); + + for (block_root, slot) in harness + .chain + .rev_iter_block_roots_from(head_block_root) + .unwrap() + { + // Shuffling is compatible targeting the current epoch, + // iff slot is greater than or equal to the current epoch pivot block + assert_eq!( + harness.chain.shuffling_is_compatible( + &block_root, + head_state.current_epoch(), + &head_state + ), + current_epoch_valid + && slot >= current_epoch_cutoff_slot.unwrap_or(current_pivot_slot.as_u64()) + ); + // Similarly for the previous epoch + assert_eq!( + harness.chain.shuffling_is_compatible( + &block_root, + head_state.previous_epoch(), + &head_state + ), + previous_epoch_valid + && slot >= previous_epoch_cutoff_slot.unwrap_or(previous_pivot_slot.as_u64()) + ); + // Targeting the next epoch should always return false + assert_eq!( + harness.chain.shuffling_is_compatible( + &block_root, + head_state.current_epoch() + 1, + &head_state + ), + false + ); + // Targeting two epochs before the current epoch should also always return false + if head_state.current_epoch() >= 2 { + assert_eq!( + harness.chain.shuffling_is_compatible( + &block_root, + head_state.current_epoch() - 2, + &head_state + ), + false + ); + } + } +} + /// Check that the head state's slot matches `expected_slot`. fn check_slot(harness: &TestHarness, expected_slot: u64) { let state = &harness.chain.head().expect("should get head").beacon_state; diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 7718c227a..ac2c1ed31 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -98,10 +98,14 @@ impl OperationPool { /// Get a list of attestations for inclusion in a block. /// - /// NOTE: Assumes that all attestations in the operation_pool are valid. + /// The `validity_filter` is a closure that provides extra filtering of the attestations + /// before an approximately optimal bundle is constructed. We use it to provide access + /// to the fork choice data from the `BeaconChain` struct that doesn't logically belong + /// in the operation pool. pub fn get_attestations( &self, state: &BeaconState, + validity_filter: impl FnMut(&&Attestation) -> bool, spec: &ChainSpec, ) -> Result>, OpPoolError> { // Attestations for the current fork, which may be from the current or previous epoch. @@ -143,6 +147,7 @@ impl OperationPool { ) .is_ok() }) + .filter(validity_filter) .flat_map(|att| AttMaxCover::new(att, state, total_active_balance, spec)); Ok(maximum_cover( @@ -584,7 +589,7 @@ mod release_tests { state.slot -= 1; assert_eq!( op_pool - .get_attestations(state, spec) + .get_attestations(state, |_| true, spec) .expect("should have attestations") .len(), 0 @@ -594,7 +599,7 @@ mod release_tests { state.slot += spec.min_attestation_inclusion_delay; let block_attestations = op_pool - .get_attestations(state, spec) + .get_attestations(state, |_| true, spec) .expect("Should have block attestations"); assert_eq!(block_attestations.len(), committees.len()); @@ -764,7 +769,7 @@ mod release_tests { state.slot += spec.min_attestation_inclusion_delay; let best_attestations = op_pool - .get_attestations(state, spec) + .get_attestations(state, |_| true, spec) .expect("should have best attestations"); assert_eq!(best_attestations.len(), max_attestations); @@ -839,7 +844,7 @@ mod release_tests { state.slot += spec.min_attestation_inclusion_delay; let best_attestations = op_pool - .get_attestations(state, spec) + .get_attestations(state, |_| true, spec) .expect("should have valid best attestations"); assert_eq!(best_attestations.len(), max_attestations); diff --git a/eth2/proto_array_fork_choice/src/proto_array.rs b/eth2/proto_array_fork_choice/src/proto_array.rs index ece8648bf..8516f8029 100644 --- a/eth2/proto_array_fork_choice/src/proto_array.rs +++ b/eth2/proto_array_fork_choice/src/proto_array.rs @@ -407,4 +407,41 @@ impl ProtoArray { && (node.finalized_epoch == self.finalized_epoch || self.finalized_epoch == Epoch::new(0)) } + + /// Return a reverse iterator over the nodes which comprise the chain ending at `block_root`. + pub fn iter_nodes<'a>(&'a self, block_root: &Hash256) -> Iter<'a> { + let next_node_index = self.indices.get(block_root).copied(); + Iter { + next_node_index, + proto_array: self, + } + } + + /// Return a reverse iterator over the block roots of the chain ending at `block_root`. + /// + /// Note that unlike many other iterators, this one WILL NOT yield anything at skipped slots. + pub fn iter_block_roots<'a>( + &'a self, + block_root: &Hash256, + ) -> impl Iterator + 'a { + self.iter_nodes(block_root) + .map(|node| (node.root, node.slot)) + } +} + +/// Reverse iterator over one path through a `ProtoArray`. +pub struct Iter<'a> { + next_node_index: Option, + proto_array: &'a ProtoArray, +} + +impl<'a> Iterator for Iter<'a> { + type Item = &'a ProtoNode; + + fn next(&mut self) -> Option { + let next_node_index = self.next_node_index?; + let node = self.proto_array.nodes.get(next_node_index)?; + self.next_node_index = node.parent; + Some(node) + } } From 32074f0d095d901e77ce039ac02b3687f9cce76c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 20 Apr 2020 12:35:11 +1000 Subject: [PATCH 2/4] Use checked arithmetic in types and state proc (#1009) --- .github/workflows/test-suite.yml | 7 + Cargo.lock | 8 + Cargo.toml | 1 + Makefile | 5 + beacon_node/beacon_chain/Cargo.toml | 1 + beacon_node/beacon_chain/src/eth1_chain.rs | 10 +- .../genesis/src/eth1_genesis_service.rs | 3 +- eth2/state_processing/Cargo.toml | 1 + .../src/common/deposit_data_tree.rs | 3 +- .../src/common/get_base_reward.rs | 11 +- .../src/common/slash_validator.rs | 12 +- eth2/state_processing/src/genesis.rs | 16 +- eth2/state_processing/src/lib.rs | 2 + .../src/per_block_processing.rs | 29 ++-- .../block_signature_verifier.rs | 2 + .../src/per_block_processing/errors.rs | 20 ++- .../per_block_processing/verify_deposit.rs | 3 +- .../src/per_epoch_processing.rs | 33 +++- .../src/per_epoch_processing/apply_rewards.rs | 98 ++++++----- .../src/per_epoch_processing/errors.rs | 7 + .../per_epoch_processing/process_slashings.rs | 14 +- .../validator_statuses.rs | 29 +++- eth2/types/Cargo.toml | 1 + eth2/types/src/beacon_state.rs | 60 ++++--- .../types/src/beacon_state/committee_cache.rs | 6 +- eth2/types/src/beacon_state/exit_cache.rs | 6 +- eth2/types/src/beacon_state/pubkey_cache.rs | 1 + .../types/src/beacon_state/tree_hash_cache.rs | 4 +- eth2/types/src/chain_spec.rs | 15 +- eth2/types/src/eth_spec.rs | 14 +- eth2/types/src/lib.rs | 2 + eth2/types/src/slot_epoch.rs | 9 +- eth2/types/src/slot_epoch_macros.rs | 25 +-- .../builders/testing_beacon_state_builder.rs | 1 - .../builders/testing_deposit_builder.rs | 2 +- .../testing_proposer_slashing_builder.rs | 2 +- eth2/types/src/test_utils/mod.rs | 2 + eth2/types/src/tree_hash_impls.rs | 8 +- eth2/types/src/utils/serde_utils.rs | 1 - eth2/utils/bls/src/aggregate_public_key.rs | 4 +- eth2/utils/cached_tree_hash/src/cache.rs | 4 +- .../utils/cached_tree_hash/src/cache_arena.rs | 1 + eth2/utils/merkle_proof/Cargo.toml | 1 + eth2/utils/merkle_proof/src/lib.rs | 9 + eth2/utils/safe_arith/Cargo.toml | 9 + eth2/utils/safe_arith/src/lib.rs | 161 ++++++++++++++++++ eth2/utils/serde_hex/src/lib.rs | 1 - eth2/utils/ssz_derive/src/lib.rs | 2 + eth2/utils/tree_hash/src/merkle_hasher.rs | 28 ++- 49 files changed, 525 insertions(+), 169 deletions(-) create mode 100644 eth2/utils/safe_arith/Cargo.toml create mode 100644 eth2/utils/safe_arith/src/lib.rs diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index bdbc9bb05..de889717f 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -65,3 +65,10 @@ jobs: - uses: actions/checkout@v1 - name: Typecheck benchmark code without running it run: make check-benches + clippy: + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Lint code for quality and style with Clippy + run: make lint diff --git a/Cargo.lock b/Cargo.lock index c3034cefc..8eab992b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -226,6 +226,7 @@ dependencies = [ "proto_array_fork_choice 0.1.0", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "safe_arith 0.1.0", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2498,6 +2499,7 @@ dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "quickcheck 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", "quickcheck_macros 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "safe_arith 0.1.0", ] [[package]] @@ -3524,6 +3526,10 @@ name = "ryu" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "safe_arith" +version = "0.1.0" + [[package]] name = "safemem" version = "0.3.3" @@ -3941,6 +3947,7 @@ dependencies = [ "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "merkle_proof 0.1.0", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "safe_arith 0.1.0", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4543,6 +4550,7 @@ dependencies = [ "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "rand_xorshift 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "safe_arith 0.1.0", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/Cargo.toml b/Cargo.toml index 0713dfebb..9d25d3dfc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,7 @@ members = [ "eth2/utils/lighthouse_metrics", "eth2/utils/merkle_proof", "eth2/utils/int_to_bytes", + "eth2/utils/safe_arith", "eth2/utils/serde_hex", "eth2/utils/slot_clock", "eth2/utils/ssz", diff --git a/Makefile b/Makefile index ad071a5e3..7462ee56e 100644 --- a/Makefile +++ b/Makefile @@ -41,6 +41,11 @@ test: test-release # Runs the entire test suite, downloading test vectors if required. test-full: cargo-fmt test-release test-debug test-ef +# Lints the code for bad style and potentially unsafe arithmetic using Clippy. +# Clippy lints are opt-in per-crate for now, which is why we allow all by default. +lint: + cargo clippy --all -- -A clippy::all + # Runs the makefile in the `ef_tests` repo. # # May download and extract an archive of test vectors from the ethereum diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 61fee2a89..c50c1db56 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -42,6 +42,7 @@ rand = "0.7.2" proto_array_fork_choice = { path = "../../eth2/proto_array_fork_choice" } lru = "0.4.3" tempfile = "3.1.0" +safe_arith = { path = "../../eth2/utils/safe_arith" } [dev-dependencies] lazy_static = "1.4.0" diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 148e15630..b3ad966a8 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -45,6 +45,14 @@ pub enum Error { /// /// The eth1 caches are stale, or a junk value was voted into the chain. UnknownPreviousEth1BlockHash, + /// An arithmetic error occurred. + ArithError(safe_arith::ArithError), +} + +impl From for Error { + fn from(e: safe_arith::ArithError) -> Self { + Self::ArithError(e) + } } #[derive(Encode, Decode, Clone)] @@ -369,7 +377,7 @@ impl> Eth1ChainBackend for CachingEth1Backend Result, Error> { let deposit_index = state.eth1_deposit_index; - let deposit_count = if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data_vote) { + let deposit_count = if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data_vote)? { new_eth1_data.deposit_count } else { state.eth1_data.deposit_count diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 85f7de8d4..14df35fa1 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -383,7 +383,8 @@ impl Eth1GenesisService { .map_err(|e| format!("Error whilst processing deposit: {:?}", e)) })?; - process_activations(&mut local_state, spec); + process_activations(&mut local_state, spec) + .map_err(|e| format!("Error whilst processing activations: {:?}", e))?; let is_valid = is_valid_genesis_state(&local_state, spec); trace!( diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index c49130bbb..ccd198436 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -27,6 +27,7 @@ eth2_ssz = "0.1.2" eth2_ssz_types = { path = "../utils/ssz_types" } merkle_proof = { path = "../utils/merkle_proof" } log = "0.4.8" +safe_arith = { path = "../utils/safe_arith" } tree_hash = "0.1.0" tree_hash_derive = "0.2" types = { path = "../types" } diff --git a/eth2/state_processing/src/common/deposit_data_tree.rs b/eth2/state_processing/src/common/deposit_data_tree.rs index e2d92d56d..319c437ee 100644 --- a/eth2/state_processing/src/common/deposit_data_tree.rs +++ b/eth2/state_processing/src/common/deposit_data_tree.rs @@ -1,6 +1,7 @@ use eth2_hashing::hash; use int_to_bytes::int_to_bytes32; use merkle_proof::{MerkleTree, MerkleTreeError}; +use safe_arith::SafeArith; use types::Hash256; /// Emulates the eth1 deposit contract merkle tree. @@ -46,7 +47,7 @@ impl DepositDataTree { /// Add a deposit to the merkle tree. pub fn push_leaf(&mut self, leaf: Hash256) -> Result<(), MerkleTreeError> { self.tree.push_leaf(leaf, self.depth)?; - self.mix_in_length += 1; + self.mix_in_length.increment()?; Ok(()) } } diff --git a/eth2/state_processing/src/common/get_base_reward.rs b/eth2/state_processing/src/common/get_base_reward.rs index 1fcb5b64e..ba7e696d6 100644 --- a/eth2/state_processing/src/common/get_base_reward.rs +++ b/eth2/state_processing/src/common/get_base_reward.rs @@ -1,4 +1,5 @@ use integer_sqrt::IntegerSquareRoot; +use safe_arith::SafeArith; use types::*; /// Returns the base reward for some validator. @@ -14,10 +15,10 @@ pub fn get_base_reward( if total_active_balance == 0 { Ok(0) } else { - Ok( - state.get_effective_balance(index, spec)? * spec.base_reward_factor - / total_active_balance.integer_sqrt() - / spec.base_rewards_per_epoch, - ) + Ok(state + .get_effective_balance(index, spec)? + .safe_mul(spec.base_reward_factor)? + .safe_div(total_active_balance.integer_sqrt())? + .safe_div(spec.base_rewards_per_epoch)?) } } diff --git a/eth2/state_processing/src/common/slash_validator.rs b/eth2/state_processing/src/common/slash_validator.rs index eb5621293..769289506 100644 --- a/eth2/state_processing/src/common/slash_validator.rs +++ b/eth2/state_processing/src/common/slash_validator.rs @@ -1,4 +1,5 @@ use crate::common::initiate_validator_exit; +use safe_arith::SafeArith; use std::cmp; use types::{BeaconStateError as Error, *}; @@ -27,18 +28,21 @@ pub fn slash_validator( let validator_effective_balance = state.get_effective_balance(slashed_index, spec)?; state.set_slashings( epoch, - state.get_slashings(epoch)? + validator_effective_balance, + state + .get_slashings(epoch)? + .safe_add(validator_effective_balance)?, )?; safe_sub_assign!( state.balances[slashed_index], - validator_effective_balance / spec.min_slashing_penalty_quotient + validator_effective_balance.safe_div(spec.min_slashing_penalty_quotient)? ); // Apply proposer and whistleblower rewards let proposer_index = state.get_beacon_proposer_index(state.slot, spec)?; let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index); - let whistleblower_reward = validator_effective_balance / spec.whistleblower_reward_quotient; - let proposer_reward = whistleblower_reward / spec.proposer_reward_quotient; + let whistleblower_reward = + validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?; + let proposer_reward = whistleblower_reward.safe_div(spec.proposer_reward_quotient)?; safe_add_assign!(state.balances[proposer_index], proposer_reward); safe_add_assign!( diff --git a/eth2/state_processing/src/genesis.rs b/eth2/state_processing/src/genesis.rs index 71c66cba8..9ae9fabdc 100644 --- a/eth2/state_processing/src/genesis.rs +++ b/eth2/state_processing/src/genesis.rs @@ -1,5 +1,6 @@ use super::per_block_processing::{errors::BlockProcessingError, process_deposit}; use crate::common::DepositDataTree; +use safe_arith::SafeArith; use tree_hash::TreeHash; use types::DEPOSIT_TREE_DEPTH; use types::*; @@ -14,8 +15,9 @@ pub fn initialize_beacon_state_from_eth1( deposits: Vec, spec: &ChainSpec, ) -> Result, BlockProcessingError> { - let genesis_time = - eth1_timestamp - eth1_timestamp % spec.min_genesis_delay + 2 * spec.min_genesis_delay; + let genesis_time = eth1_timestamp + .safe_sub(eth1_timestamp.safe_rem(spec.min_genesis_delay)?)? + .safe_add(2.safe_mul(spec.min_genesis_delay)?)?; let eth1_data = Eth1Data { // Temporary deposit root deposit_root: Hash256::zero(), @@ -37,7 +39,7 @@ pub fn initialize_beacon_state_from_eth1( process_deposit(&mut state, &deposit, spec, true)?; } - process_activations(&mut state, spec); + process_activations(&mut state, spec)?; // Now that we have our validators, initialize the caches (including the committees) state.build_all_caches(spec)?; @@ -60,11 +62,14 @@ pub fn is_valid_genesis_state(state: &BeaconState, spec: &ChainSp /// Activate genesis validators, if their balance is acceptable. /// /// Spec v0.11.1 -pub fn process_activations(state: &mut BeaconState, spec: &ChainSpec) { +pub fn process_activations( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { for (index, validator) in state.validators.iter_mut().enumerate() { let balance = state.balances[index]; validator.effective_balance = std::cmp::min( - balance - balance % spec.effective_balance_increment, + balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, spec.max_effective_balance, ); if validator.effective_balance == spec.max_effective_balance { @@ -72,4 +77,5 @@ pub fn process_activations(state: &mut BeaconState, spec: &ChainS validator.activation_epoch = T::genesis_epoch(); } } + Ok(()) } diff --git a/eth2/state_processing/src/lib.rs b/eth2/state_processing/src/lib.rs index 63c5e2550..86dc2294f 100644 --- a/eth2/state_processing/src/lib.rs +++ b/eth2/state_processing/src/lib.rs @@ -1,3 +1,5 @@ +#![deny(clippy::integer_arithmetic)] + #[macro_use] mod macros; diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 2f3a12da5..8634bb0e1 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -1,6 +1,7 @@ use crate::common::{initiate_validator_exit, slash_validator}; use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid, IntoWithIndex}; use rayon::prelude::*; +use safe_arith::{ArithError, SafeArith}; use signature_sets::{block_proposal_signature_set, get_pubkey_from_state, randao_signature_set}; use std::convert::TryInto; use tree_hash::TreeHash; @@ -239,7 +240,7 @@ pub fn process_eth1_data( state: &mut BeaconState, eth1_data: &Eth1Data, ) -> Result<(), Error> { - if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data) { + if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data)? { state.eth1_data = new_eth1_data; } @@ -248,14 +249,14 @@ pub fn process_eth1_data( Ok(()) } -/// Returns `Some(eth1_data)` if adding the given `eth1_data` to `state.eth1_data_votes` would +/// Returns `Ok(Some(eth1_data))` if adding the given `eth1_data` to `state.eth1_data_votes` would /// result in a change to `state.eth1_data`. /// /// Spec v0.11.1 pub fn get_new_eth1_data( state: &BeaconState, eth1_data: &Eth1Data, -) -> Option { +) -> Result, ArithError> { let num_votes = state .eth1_data_votes .iter() @@ -263,10 +264,10 @@ pub fn get_new_eth1_data( .count(); // The +1 is to account for the `eth1_data` supplied to the function. - if 2 * (num_votes + 1) > T::SlotsPerEth1VotingPeriod::to_usize() { - Some(eth1_data.clone()) + if num_votes.safe_add(1)?.safe_mul(2)? > T::SlotsPerEth1VotingPeriod::to_usize() { + Ok(Some(eth1_data.clone())) } else { - None + Ok(None) } } @@ -318,7 +319,8 @@ pub fn process_attester_slashings( ) -> Result<(), BlockProcessingError> { // Verify the `IndexedAttestation`s in parallel (these are the resource-consuming objects, not // the `AttesterSlashing`s themselves). - let mut indexed_attestations: Vec<&_> = Vec::with_capacity(attester_slashings.len() * 2); + let mut indexed_attestations: Vec<&_> = + Vec::with_capacity(attester_slashings.len().safe_mul(2)?); for attester_slashing in attester_slashings { indexed_attestations.push(&attester_slashing.attestation_1); indexed_attestations.push(&attester_slashing.attestation_2); @@ -432,8 +434,13 @@ pub fn process_deposits( .par_iter() .enumerate() .try_for_each(|(i, deposit)| { - verify_deposit_merkle_proof(state, deposit, state.eth1_deposit_index + i as u64, spec) - .map_err(|e| e.into_with_index(i)) + verify_deposit_merkle_proof( + state, + deposit, + state.eth1_deposit_index.safe_add(i as u64)?, + spec, + ) + .map_err(|e| e.into_with_index(i)) })?; // Update the state in series. @@ -459,7 +466,7 @@ pub fn process_deposit( .map_err(|e| e.into_with_index(deposit_index))?; } - state.eth1_deposit_index += 1; + state.eth1_deposit_index.increment()?; // Ensure the state's pubkey cache is fully up-to-date, it will be used to check to see if the // depositing validator already exists in the registry. @@ -495,7 +502,7 @@ pub fn process_deposit( exit_epoch: spec.far_future_epoch, withdrawable_epoch: spec.far_future_epoch, effective_balance: std::cmp::min( - amount - amount % spec.effective_balance_increment, + amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, spec.max_effective_balance, ), slashed: false, diff --git a/eth2/state_processing/src/per_block_processing/block_signature_verifier.rs b/eth2/state_processing/src/per_block_processing/block_signature_verifier.rs index 81cbab38a..30b057f53 100644 --- a/eth2/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/eth2/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -1,3 +1,5 @@ +#![allow(clippy::integer_arithmetic)] + use super::signature_sets::{Error as SignatureSetError, Result as SignatureSetResult, *}; use crate::common::get_indexed_attestation; diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index 757705c99..83e2a6ac2 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -1,9 +1,10 @@ use super::signature_sets::Error as SignatureSetError; use merkle_proof::MerkleTreeError; +use safe_arith::ArithError; use types::*; /// The error returned from the `per_block_processing` function. Indicates that a block is either -/// invalid, or we were unable to determine it's validity (we encountered an unexpected error). +/// invalid, or we were unable to determine its validity (we encountered an unexpected error). /// /// Any of the `...Error` variants indicate that at some point during block (and block operation) /// verification, there was an error. There is no indication as to _where_ that error happened @@ -48,6 +49,7 @@ pub enum BlockProcessingError { SignatureSetError(SignatureSetError), SszTypesError(ssz_types::Error), MerkleTreeError(MerkleTreeError), + ArithError(ArithError), } impl From for BlockProcessingError { @@ -68,6 +70,12 @@ impl From for BlockProcessingError { } } +impl From for BlockProcessingError { + fn from(e: ArithError) -> Self { + BlockProcessingError::ArithError(e) + } +} + impl From> for BlockProcessingError { fn from(e: BlockOperationError) -> BlockProcessingError { match e { @@ -75,6 +83,7 @@ impl From> for BlockProcessingError { BlockOperationError::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e), BlockOperationError::SignatureSetError(e) => BlockProcessingError::SignatureSetError(e), BlockOperationError::SszTypesError(e) => BlockProcessingError::SszTypesError(e), + BlockOperationError::ArithError(e) => BlockProcessingError::ArithError(e), } } } @@ -101,6 +110,7 @@ macro_rules! impl_into_block_processing_error_with_index { BlockOperationError::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e), BlockOperationError::SignatureSetError(e) => BlockProcessingError::SignatureSetError(e), BlockOperationError::SszTypesError(e) => BlockProcessingError::SszTypesError(e), + BlockOperationError::ArithError(e) => BlockProcessingError::ArithError(e), } } } @@ -130,6 +140,7 @@ pub enum BlockOperationError { BeaconStateError(BeaconStateError), SignatureSetError(SignatureSetError), SszTypesError(ssz_types::Error), + ArithError(ArithError), } impl BlockOperationError { @@ -155,6 +166,12 @@ impl From for BlockOperationError { } } +impl From for BlockOperationError { + fn from(e: ArithError) -> Self { + BlockOperationError::ArithError(e) + } +} + #[derive(Debug, PartialEq, Clone)] pub enum HeaderInvalid { ProposalSignatureInvalid, @@ -265,6 +282,7 @@ impl From> BlockOperationError::BeaconStateError(e) => BlockOperationError::BeaconStateError(e), BlockOperationError::SignatureSetError(e) => BlockOperationError::SignatureSetError(e), BlockOperationError::SszTypesError(e) => BlockOperationError::SszTypesError(e), + BlockOperationError::ArithError(e) => BlockOperationError::ArithError(e), } } } diff --git a/eth2/state_processing/src/per_block_processing/verify_deposit.rs b/eth2/state_processing/src/per_block_processing/verify_deposit.rs index cd92ee895..33478399b 100644 --- a/eth2/state_processing/src/per_block_processing/verify_deposit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_deposit.rs @@ -3,6 +3,7 @@ use crate::per_block_processing::signature_sets::{ deposit_pubkey_signature_message, deposit_signature_set, }; use merkle_proof::verify_merkle_proof; +use safe_arith::SafeArith; use tree_hash::TreeHash; use types::*; @@ -59,7 +60,7 @@ pub fn verify_deposit_merkle_proof( verify_merkle_proof( leaf, &deposit.proof[..], - spec.deposit_contract_tree_depth as usize + 1, + spec.deposit_contract_tree_depth.safe_add(1)? as usize, deposit_index as usize, state.eth1_data.deposit_root, ), diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index cc995a8ac..99ac78a77 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -1,4 +1,5 @@ use errors::EpochProcessingError as Error; +use safe_arith::SafeArith; use tree_hash::TreeHash; use types::*; @@ -90,7 +91,11 @@ pub fn process_justification_and_finalization( state.previous_justified_checkpoint = state.current_justified_checkpoint.clone(); state.justification_bits.shift_up(1)?; - if total_balances.previous_epoch_target_attesters() * 3 >= total_balances.current_epoch() * 2 { + if total_balances + .previous_epoch_target_attesters() + .safe_mul(3)? + >= total_balances.current_epoch().safe_mul(2)? + { state.current_justified_checkpoint = Checkpoint { epoch: previous_epoch, root: *state.get_block_root_at_epoch(previous_epoch)?, @@ -98,7 +103,11 @@ pub fn process_justification_and_finalization( state.justification_bits.set(1, true)?; } // If the current epoch gets justified, fill the last bit. - if total_balances.current_epoch_target_attesters() * 3 >= total_balances.current_epoch() * 2 { + if total_balances + .current_epoch_target_attesters() + .safe_mul(3)? + >= total_balances.current_epoch().safe_mul(2)? + { state.current_justified_checkpoint = Checkpoint { epoch: current_epoch, root: *state.get_block_root_at_epoch(current_epoch)?, @@ -152,17 +161,19 @@ pub fn process_final_updates( } // Update effective balances with hysteresis (lag). - let hysteresis_increment = spec.effective_balance_increment / spec.hysteresis_quotient; - let downward_threshold = hysteresis_increment * spec.hysteresis_downward_multiplier; - let upward_threshold = hysteresis_increment * spec.hysteresis_upward_multiplier; + let hysteresis_increment = spec + .effective_balance_increment + .safe_div(spec.hysteresis_quotient)?; + let downward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_downward_multiplier)?; + let upward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_upward_multiplier)?; for (index, validator) in state.validators.iter_mut().enumerate() { let balance = state.balances[index]; - if balance + downward_threshold < validator.effective_balance - || validator.effective_balance + upward_threshold < balance + if balance.safe_add(downward_threshold)? < validator.effective_balance + || validator.effective_balance.safe_add(upward_threshold)? < balance { validator.effective_balance = std::cmp::min( - balance - balance % spec.effective_balance_increment, + balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, spec.max_effective_balance, ); } @@ -175,7 +186,11 @@ pub fn process_final_updates( state.set_randao_mix(next_epoch, *state.get_randao_mix(current_epoch)?)?; // Set historical root accumulator - if next_epoch.as_u64() % (T::SlotsPerHistoricalRoot::to_u64() / T::slots_per_epoch()) == 0 { + if next_epoch + .as_u64() + .safe_rem(T::SlotsPerHistoricalRoot::to_u64().safe_div(T::slots_per_epoch())?)? + == 0 + { let historical_batch = state.historical_batch(); state .historical_roots diff --git a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs index 1f79680fa..4fad65377 100644 --- a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs +++ b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs @@ -1,6 +1,7 @@ use super::super::common::get_base_reward; use super::validator_statuses::{TotalBalances, ValidatorStatus, ValidatorStatuses}; use super::Error; +use safe_arith::SafeArith; use types::*; @@ -13,21 +14,21 @@ pub struct Delta { impl Delta { /// Reward the validator with the `reward`. - pub fn reward(&mut self, reward: u64) { - self.rewards += reward; + pub fn reward(&mut self, reward: u64) -> Result<(), Error> { + self.rewards = self.rewards.safe_add(reward)?; + Ok(()) } /// Penalize the validator with the `penalty`. - pub fn penalize(&mut self, penalty: u64) { - self.penalties += penalty; + pub fn penalize(&mut self, penalty: u64) -> Result<(), Error> { + self.penalties = self.penalties.safe_add(penalty)?; + Ok(()) } -} -impl std::ops::AddAssign for Delta { - /// Use wrapping addition as that is how it's defined in the spec. - fn add_assign(&mut self, other: Delta) { - self.rewards += other.rewards; - self.penalties += other.penalties; + /// Combine two deltas. + fn combine(&mut self, other: Delta) -> Result<(), Error> { + self.reward(other.rewards)?; + self.penalize(other.penalties) } } @@ -56,9 +57,10 @@ pub fn process_rewards_and_penalties( get_proposer_deltas(&mut deltas, state, validator_statuses, spec)?; - // Apply the deltas, over-flowing but not under-flowing (saturating at 0 instead). + // Apply the deltas, erroring on overflow above but not on overflow below (saturating at 0 + // instead). for (i, delta) in deltas.iter().enumerate() { - state.balances[i] += delta.rewards; + state.balances[i] = state.balances[i].safe_add(delta.rewards)?; state.balances[i] = state.balances[i].saturating_sub(delta.penalties); } @@ -91,7 +93,8 @@ fn get_proposer_deltas( return Err(Error::ValidatorStatusesInconsistent); } - deltas[inclusion.proposer_index].reward(base_reward / spec.proposer_reward_quotient); + deltas[inclusion.proposer_index] + .reward(base_reward.safe_div(spec.proposer_reward_quotient)?)?; } } @@ -123,9 +126,9 @@ fn get_attestation_deltas( base_reward, finality_delay, spec, - ); + )?; - deltas[index] += delta; + deltas[index].combine(delta)?; } Ok(()) @@ -140,7 +143,7 @@ fn get_attestation_delta( base_reward: u64, finality_delay: u64, spec: &ChainSpec, -) -> Delta { +) -> Result { let mut delta = Delta::default(); // Is this validator eligible to be rewarded or penalized? @@ -149,7 +152,7 @@ fn get_attestation_delta( || (validator.is_slashed && !validator.is_withdrawable_in_current_epoch); if !is_eligible { - return delta; + return Ok(delta); } // Handle integer overflow by dividing these quantities by EFFECTIVE_BALANCE_INCREMENT @@ -157,59 +160,78 @@ fn get_attestation_delta( // - increment = EFFECTIVE_BALANCE_INCREMENT // - reward_numerator = get_base_reward(state, index) * (attesting_balance // increment) // - rewards[index] = reward_numerator // (total_balance // increment) - let total_balance_ebi = total_balances.current_epoch() / spec.effective_balance_increment; - let total_attesting_balance_ebi = - total_balances.previous_epoch_attesters() / spec.effective_balance_increment; - let matching_target_balance_ebi = - total_balances.previous_epoch_target_attesters() / spec.effective_balance_increment; - let matching_head_balance_ebi = - total_balances.previous_epoch_head_attesters() / spec.effective_balance_increment; + let total_balance_ebi = total_balances + .current_epoch() + .safe_div(spec.effective_balance_increment)?; + let total_attesting_balance_ebi = total_balances + .previous_epoch_attesters() + .safe_div(spec.effective_balance_increment)?; + let matching_target_balance_ebi = total_balances + .previous_epoch_target_attesters() + .safe_div(spec.effective_balance_increment)?; + let matching_head_balance_ebi = total_balances + .previous_epoch_head_attesters() + .safe_div(spec.effective_balance_increment)?; // Expected FFG source. // Spec: // - validator index in `get_unslashed_attesting_indices(state, matching_source_attestations)` if validator.is_previous_epoch_attester && !validator.is_slashed { - delta.reward(base_reward * total_attesting_balance_ebi / total_balance_ebi); + delta.reward( + base_reward + .safe_mul(total_attesting_balance_ebi)? + .safe_div(total_balance_ebi)?, + )?; // Inclusion speed bonus - let proposer_reward = base_reward / spec.proposer_reward_quotient; - let max_attester_reward = base_reward - proposer_reward; + let proposer_reward = base_reward.safe_div(spec.proposer_reward_quotient)?; + let max_attester_reward = base_reward.safe_sub(proposer_reward)?; let inclusion = validator .inclusion_info .expect("It is a logic error for an attester not to have an inclusion delay."); - delta.reward(max_attester_reward / inclusion.delay); + delta.reward(max_attester_reward.safe_div(inclusion.delay)?)?; } else { - delta.penalize(base_reward); + delta.penalize(base_reward)?; } // Expected FFG target. // Spec: // - validator index in `get_unslashed_attesting_indices(state, matching_target_attestations)` if validator.is_previous_epoch_target_attester && !validator.is_slashed { - delta.reward(base_reward * matching_target_balance_ebi / total_balance_ebi); + delta.reward( + base_reward + .safe_mul(matching_target_balance_ebi)? + .safe_div(total_balance_ebi)?, + )?; } else { - delta.penalize(base_reward); + delta.penalize(base_reward)?; } // Expected head. // Spec: // - validator index in `get_unslashed_attesting_indices(state, matching_head_attestations)` if validator.is_previous_epoch_head_attester && !validator.is_slashed { - delta.reward(base_reward * matching_head_balance_ebi / total_balance_ebi); + delta.reward( + base_reward + .safe_mul(matching_head_balance_ebi)? + .safe_div(total_balance_ebi)?, + )?; } else { - delta.penalize(base_reward); + delta.penalize(base_reward)?; } // Inactivity penalty if finality_delay > spec.min_epochs_to_inactivity_penalty { // All eligible validators are penalized - delta.penalize(spec.base_rewards_per_epoch * base_reward); + delta.penalize(spec.base_rewards_per_epoch.safe_mul(base_reward)?)?; // Additionally, all validators whose FFG target didn't match are penalized extra if !validator.is_previous_epoch_target_attester { delta.penalize( - validator.current_epoch_effective_balance * finality_delay - / spec.inactivity_penalty_quotient, - ); + validator + .current_epoch_effective_balance + .safe_mul(finality_delay)? + .safe_div(spec.inactivity_penalty_quotient)?, + )?; } } @@ -218,5 +240,5 @@ fn get_attestation_delta( // This function only computes the delta for a single validator, so it cannot also return a // delta for a validator. - delta + Ok(delta) } diff --git a/eth2/state_processing/src/per_epoch_processing/errors.rs b/eth2/state_processing/src/per_epoch_processing/errors.rs index 98e012e90..245935c1d 100644 --- a/eth2/state_processing/src/per_epoch_processing/errors.rs +++ b/eth2/state_processing/src/per_epoch_processing/errors.rs @@ -18,6 +18,7 @@ pub enum EpochProcessingError { BeaconStateError(BeaconStateError), InclusionError(InclusionError), SszTypesError(ssz_types::Error), + ArithError(safe_arith::ArithError), } impl From for EpochProcessingError { @@ -38,6 +39,12 @@ impl From for EpochProcessingError { } } +impl From for EpochProcessingError { + fn from(e: safe_arith::ArithError) -> EpochProcessingError { + EpochProcessingError::ArithError(e) + } +} + #[derive(Debug, PartialEq)] pub enum InclusionError { /// The validator did not participate in an attestation in this period. diff --git a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs index 4bf24468f..c291d3f09 100644 --- a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs +++ b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs @@ -1,3 +1,4 @@ +use safe_arith::SafeArith; use types::{BeaconStateError as Error, *}; /// Process slashings. @@ -13,12 +14,17 @@ pub fn process_slashings( for (index, validator) in state.validators.iter().enumerate() { if validator.slashed - && epoch + T::EpochsPerSlashingsVector::to_u64() / 2 == validator.withdrawable_epoch + && epoch + T::EpochsPerSlashingsVector::to_u64().safe_div(2)? + == validator.withdrawable_epoch { let increment = spec.effective_balance_increment; - let penalty_numerator = validator.effective_balance / increment - * std::cmp::min(sum_slashings * 3, total_balance); - let penalty = penalty_numerator / total_balance * increment; + let penalty_numerator = validator + .effective_balance + .safe_div(increment)? + .safe_mul(std::cmp::min(sum_slashings.safe_mul(3)?, total_balance))?; + let penalty = penalty_numerator + .safe_div(total_balance)? + .safe_mul(increment)?; safe_sub_assign!(state.balances[index], penalty); } diff --git a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs index 8877ca815..269e366ad 100644 --- a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs +++ b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs @@ -1,4 +1,5 @@ use crate::common::get_attesting_indices; +use safe_arith::SafeArith; use types::*; /// Sets the boolean `var` on `self` to be true if it is true on `other`. Otherwise leaves `self` @@ -198,12 +199,16 @@ impl ValidatorStatuses { if validator.is_active_at(state.current_epoch()) { status.is_active_in_current_epoch = true; - total_balances.current_epoch += effective_balance; + total_balances + .current_epoch + .safe_add_assign(effective_balance)?; } if validator.is_active_at(state.previous_epoch()) { status.is_active_in_previous_epoch = true; - total_balances.previous_epoch += effective_balance; + total_balances + .previous_epoch + .safe_add_assign(effective_balance)?; } statuses.push(status); @@ -275,19 +280,29 @@ impl ValidatorStatuses { let validator_balance = state.get_effective_balance(index, spec)?; if v.is_current_epoch_attester { - self.total_balances.current_epoch_attesters += validator_balance; + self.total_balances + .current_epoch_attesters + .safe_add_assign(validator_balance)?; } if v.is_current_epoch_target_attester { - self.total_balances.current_epoch_target_attesters += validator_balance; + self.total_balances + .current_epoch_target_attesters + .safe_add_assign(validator_balance)?; } if v.is_previous_epoch_attester { - self.total_balances.previous_epoch_attesters += validator_balance; + self.total_balances + .previous_epoch_attesters + .safe_add_assign(validator_balance)?; } if v.is_previous_epoch_target_attester { - self.total_balances.previous_epoch_target_attesters += validator_balance; + self.total_balances + .previous_epoch_target_attesters + .safe_add_assign(validator_balance)?; } if v.is_previous_epoch_head_attester { - self.total_balances.previous_epoch_head_attesters += validator_balance; + self.total_balances + .previous_epoch_head_attesters + .safe_add_assign(validator_balance)?; } } } diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index e52ad8032..c442a8600 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -23,6 +23,7 @@ log = "0.4.8" merkle_proof = { path = "../utils/merkle_proof" } rayon = "1.2.0" rand = "0.7.2" +safe_arith = { path = "../utils/safe_arith" } serde = "1.0.102" serde_derive = "1.0.102" slog = "2.5.2" diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 58195c01d..7d2d6d445 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -7,6 +7,7 @@ use compare_fields_derive::CompareFields; use eth2_hashing::hash; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use pubkey_cache::PubkeyCache; +use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; use ssz::ssz_encode; use ssz_derive::{Decode, Encode}; @@ -75,6 +76,10 @@ pub enum Error { deposit_count: u64, deposit_index: u64, }, + /// An arithmetic operation occurred which would have overflowed or divided by 0. + /// + /// This represents a serious bug in either the spec or Lighthouse! + ArithError(ArithError), } /// Control whether an epoch-indexed field can be indexed at the next epoch or not. @@ -411,7 +416,7 @@ impl BeaconState { let mut i = 0; loop { let candidate_index = indices[compute_shuffled_index( - i % indices.len(), + i.safe_rem(indices.len())?, indices.len(), seed, spec.shuffle_round_count, @@ -419,17 +424,19 @@ impl BeaconState { .ok_or(Error::UnableToShuffle)?]; let random_byte = { let mut preimage = seed.to_vec(); - preimage.append(&mut int_to_bytes8((i / 32) as u64)); + preimage.append(&mut int_to_bytes8(i.safe_div(32)? as u64)); let hash = hash(&preimage); - hash[i % 32] + hash[i.safe_rem(32)?] }; let effective_balance = self.validators[candidate_index].effective_balance; - if effective_balance * MAX_RANDOM_BYTE - >= spec.max_effective_balance * u64::from(random_byte) + if effective_balance.safe_mul(MAX_RANDOM_BYTE)? + >= spec + .max_effective_balance + .safe_mul(u64::from(random_byte))? { return Ok(candidate_index); } - i += 1; + i.increment()?; } } @@ -476,8 +483,8 @@ impl BeaconState { /// /// Spec v0.11.1 fn get_latest_block_roots_index(&self, slot: Slot) -> Result { - if (slot < self.slot) && (self.slot <= slot + self.block_roots.len() as u64) { - Ok(slot.as_usize() % self.block_roots.len()) + if slot < self.slot && self.slot <= slot + self.block_roots.len() as u64 { + Ok(slot.as_usize().safe_rem(self.block_roots.len())?) } else { Err(BeaconStateError::SlotOutOfBounds) } @@ -529,7 +536,7 @@ impl BeaconState { let len = T::EpochsPerHistoricalVector::to_u64(); if current_epoch < epoch + len && epoch <= allow_next_epoch.upper_bound_of(current_epoch) { - Ok(epoch.as_usize() % len as usize) + Ok(epoch.as_usize().safe_rem(len as usize)?) } else { Err(Error::EpochOutOfBounds) } @@ -543,7 +550,9 @@ impl BeaconState { /// /// Spec v0.11.1 pub fn update_randao_mix(&mut self, epoch: Epoch, signature: &Signature) -> Result<(), Error> { - let i = epoch.as_usize() % T::EpochsPerHistoricalVector::to_usize(); + let i = epoch + .as_usize() + .safe_rem(T::EpochsPerHistoricalVector::to_usize())?; let signature_hash = Hash256::from_slice(&hash(&ssz_encode(signature))); @@ -573,8 +582,8 @@ impl BeaconState { /// /// Spec v0.11.1 fn get_latest_state_roots_index(&self, slot: Slot) -> Result { - if (slot < self.slot) && (self.slot <= slot + Slot::from(self.state_roots.len())) { - Ok(slot.as_usize() % self.state_roots.len()) + if slot < self.slot && self.slot <= slot + self.state_roots.len() as u64 { + Ok(slot.as_usize().safe_rem(self.state_roots.len())?) } else { Err(BeaconStateError::SlotOutOfBounds) } @@ -628,7 +637,9 @@ impl BeaconState { if current_epoch < epoch + T::EpochsPerSlashingsVector::to_u64() && epoch <= allow_next_epoch.upper_bound_of(current_epoch) { - Ok(epoch.as_usize() % T::EpochsPerSlashingsVector::to_usize()) + Ok(epoch + .as_usize() + .safe_rem(T::EpochsPerSlashingsVector::to_usize())?) } else { Err(Error::EpochOutOfBounds) } @@ -687,20 +698,20 @@ impl BeaconState { // == 0`. let mix = { let i = epoch + T::EpochsPerHistoricalVector::to_u64() - spec.min_seed_lookahead - 1; - self.randao_mixes[i.as_usize() % self.randao_mixes.len()] + self.randao_mixes[i.as_usize().safe_rem(self.randao_mixes.len())?] }; let domain_bytes = int_to_bytes4(spec.get_domain_constant(domain_type)); let epoch_bytes = int_to_bytes8(epoch.as_u64()); const NUM_DOMAIN_BYTES: usize = 4; const NUM_EPOCH_BYTES: usize = 8; + const MIX_OFFSET: usize = NUM_DOMAIN_BYTES + NUM_EPOCH_BYTES; const NUM_MIX_BYTES: usize = 32; let mut preimage = [0; NUM_DOMAIN_BYTES + NUM_EPOCH_BYTES + NUM_MIX_BYTES]; preimage[0..NUM_DOMAIN_BYTES].copy_from_slice(&domain_bytes); - preimage[NUM_DOMAIN_BYTES..NUM_DOMAIN_BYTES + NUM_EPOCH_BYTES] - .copy_from_slice(&epoch_bytes); - preimage[NUM_DOMAIN_BYTES + NUM_EPOCH_BYTES..].copy_from_slice(mix.as_bytes()); + preimage[NUM_DOMAIN_BYTES..MIX_OFFSET].copy_from_slice(&epoch_bytes); + preimage[MIX_OFFSET..].copy_from_slice(mix.as_bytes()); Ok(Hash256::from_slice(&hash(&preimage))) } @@ -734,9 +745,10 @@ impl BeaconState { pub fn get_churn_limit(&self, spec: &ChainSpec) -> Result { Ok(std::cmp::max( spec.min_per_epoch_churn_limit, - self.committee_cache(RelativeEpoch::Current)? - .active_validator_count() as u64 - / spec.churn_limit_quotient, + (self + .committee_cache(RelativeEpoch::Current)? + .active_validator_count() as u64) + .safe_div(spec.churn_limit_quotient)?, )) } @@ -766,7 +778,7 @@ impl BeaconState { ) -> Result { validator_indices.iter().try_fold(0_u64, |acc, i| { self.get_effective_balance(*i, spec) - .and_then(|bal| Ok(bal + acc)) + .and_then(|bal| Ok(acc.safe_add(bal)?)) }) } @@ -1072,3 +1084,9 @@ impl From for Error { Error::TreeHashError(e) } } + +impl From for Error { + fn from(e: ArithError) -> Error { + Error::ArithError(e) + } +} diff --git a/eth2/types/src/beacon_state/committee_cache.rs b/eth2/types/src/beacon_state/committee_cache.rs index 5783bdbc4..d5d89d311 100644 --- a/eth2/types/src/beacon_state/committee_cache.rs +++ b/eth2/types/src/beacon_state/committee_cache.rs @@ -1,3 +1,5 @@ +#![allow(clippy::integer_arithmetic)] + use super::BeaconState; use crate::*; use core::num::NonZeroUsize; @@ -43,7 +45,7 @@ impl CommitteeCache { } let committees_per_slot = - T::get_committee_count_per_slot(active_validator_indices.len(), spec) as u64; + T::get_committee_count_per_slot(active_validator_indices.len(), spec)? as u64; let seed = state.get_seed(epoch, Domain::BeaconAttester, spec)?; @@ -56,7 +58,7 @@ impl CommitteeCache { .ok_or_else(|| Error::UnableToShuffle)?; // The use of `NonZeroUsize` reduces the maximum number of possible validators by one. - if state.validators.len() > usize::max_value() - 1 { + if state.validators.len() == usize::max_value() { return Err(Error::TooManyValidators); } diff --git a/eth2/types/src/beacon_state/exit_cache.rs b/eth2/types/src/beacon_state/exit_cache.rs index 4908194fa..aff05baaf 100644 --- a/eth2/types/src/beacon_state/exit_cache.rs +++ b/eth2/types/src/beacon_state/exit_cache.rs @@ -1,4 +1,5 @@ use super::{BeaconStateError, ChainSpec, Epoch, Validator}; +use safe_arith::SafeArith; use serde_derive::{Deserialize, Serialize}; use std::collections::HashMap; @@ -50,7 +51,10 @@ impl ExitCache { /// Must only be called once per exiting validator. pub fn record_validator_exit(&mut self, exit_epoch: Epoch) -> Result<(), BeaconStateError> { self.check_initialized()?; - *self.exits_per_epoch.entry(exit_epoch).or_insert(0) += 1; + self.exits_per_epoch + .entry(exit_epoch) + .or_insert(0) + .increment()?; Ok(()) } diff --git a/eth2/types/src/beacon_state/pubkey_cache.rs b/eth2/types/src/beacon_state/pubkey_cache.rs index 0063758a2..a4a38ebc4 100644 --- a/eth2/types/src/beacon_state/pubkey_cache.rs +++ b/eth2/types/src/beacon_state/pubkey_cache.rs @@ -23,6 +23,7 @@ impl PubkeyCache { /// /// The added index must equal the number of validators already added to the map. This ensures /// that an index is never skipped. + #[allow(clippy::integer_arithmetic)] pub fn insert(&mut self, pubkey: PublicKeyBytes, index: ValidatorIndex) -> bool { if index == self.len { self.map.insert(pubkey, index); diff --git a/eth2/types/src/beacon_state/tree_hash_cache.rs b/eth2/types/src/beacon_state/tree_hash_cache.rs index 75b14e8ee..d77987acd 100644 --- a/eth2/types/src/beacon_state/tree_hash_cache.rs +++ b/eth2/types/src/beacon_state/tree_hash_cache.rs @@ -1,3 +1,5 @@ +#![allow(clippy::integer_arithmetic)] + use super::Error; use crate::{BeaconState, EthSpec, Hash256, Unsigned, Validator}; use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; @@ -195,7 +197,7 @@ impl ValidatorsListTreeHashCache { /// This function makes assumptions that the `validators` list will only change in accordance /// with valid per-block/per-slot state transitions. fn recalculate_tree_hash_root(&mut self, validators: &[Validator]) -> Result { - let mut list_arena = std::mem::replace(&mut self.list_arena, CacheArena::default()); + let mut list_arena = std::mem::take(&mut self.list_arena); let leaves = self .values diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index f82d2b352..cd7e603ef 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -238,10 +238,10 @@ impl ChainSpec { /* * Gwei values */ - min_deposit_amount: u64::pow(2, 0) * u64::pow(10, 9), - max_effective_balance: u64::pow(2, 5) * u64::pow(10, 9), - ejection_balance: u64::pow(2, 4) * u64::pow(10, 9), - effective_balance_increment: u64::pow(2, 0) * u64::pow(10, 9), + min_deposit_amount: u64::pow(2, 0).saturating_mul(u64::pow(10, 9)), + max_effective_balance: u64::pow(2, 5).saturating_mul(u64::pow(10, 9)), + ejection_balance: u64::pow(2, 4).saturating_mul(u64::pow(10, 9)), + effective_balance_increment: u64::pow(2, 0).saturating_mul(u64::pow(10, 9)), /* * Initial Values @@ -522,6 +522,7 @@ impl Default for YamlConfig { /// Spec v0.11.1 impl YamlConfig { + #[allow(clippy::integer_arithmetic)] pub fn from_spec(spec: &ChainSpec) -> Self { Self { // ChainSpec @@ -557,7 +558,7 @@ impl YamlConfig { proposer_reward_quotient: spec.proposer_reward_quotient, inactivity_penalty_quotient: spec.inactivity_penalty_quotient, min_slashing_penalty_quotient: spec.min_slashing_penalty_quotient, - genesis_fork_version: spec.genesis_fork_version.clone(), + genesis_fork_version: spec.genesis_fork_version, safe_slots_to_update_justified: spec.safe_slots_to_update_justified, domain_beacon_proposer: spec.domain_beacon_proposer, domain_beacon_attester: spec.domain_beacon_attester, @@ -642,7 +643,7 @@ impl YamlConfig { effective_balance_increment: self.effective_balance_increment, genesis_slot: Slot::from(self.genesis_slot), bls_withdrawal_prefix_byte: self.bls_withdrawal_prefix, - milliseconds_per_slot: self.seconds_per_slot * 1000, + milliseconds_per_slot: self.seconds_per_slot.saturating_mul(1000), min_attestation_inclusion_delay: self.min_attestation_inclusion_delay, min_seed_lookahead: Epoch::from(self.min_seed_lookahead), max_seed_lookahead: Epoch::from(self.max_seed_lookahead), @@ -662,7 +663,7 @@ impl YamlConfig { domain_deposit: self.domain_deposit, domain_voluntary_exit: self.domain_voluntary_exit, boot_nodes: chain_spec.boot_nodes.clone(), - genesis_fork_version: self.genesis_fork_version.clone(), + genesis_fork_version: self.genesis_fork_version, eth1_follow_distance: self.eth1_follow_distance, ..*chain_spec }) diff --git a/eth2/types/src/eth_spec.rs b/eth2/types/src/eth_spec.rs index b19d836c4..a4883551a 100644 --- a/eth2/types/src/eth_spec.rs +++ b/eth2/types/src/eth_spec.rs @@ -1,4 +1,5 @@ use crate::*; +use safe_arith::SafeArith; use serde_derive::{Deserialize, Serialize}; use ssz_types::typenum::{ Unsigned, U0, U1, U1024, U1099511627776, U128, U16, U16777216, U2, U2048, U32, U4, U4096, U64, @@ -63,16 +64,21 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { /// the `active_validator_count` during the slot's epoch. /// /// Spec v0.11.1 - fn get_committee_count_per_slot(active_validator_count: usize, spec: &ChainSpec) -> usize { + fn get_committee_count_per_slot( + active_validator_count: usize, + spec: &ChainSpec, + ) -> Result { let slots_per_epoch = Self::SlotsPerEpoch::to_usize(); - std::cmp::max( + Ok(std::cmp::max( 1, std::cmp::min( spec.max_committees_per_slot, - active_validator_count / slots_per_epoch / spec.target_committee_size, + active_validator_count + .safe_div(slots_per_epoch)? + .safe_div(spec.target_committee_size)?, ), - ) + )) } /// Returns the minimum number of validators required for this spec. diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 9ced60090..3391f3132 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -2,6 +2,8 @@ // Required for big type-level numbers #![recursion_limit = "128"] +// Clippy lint set up +#![deny(clippy::integer_arithmetic)] #[macro_use] pub mod test_utils; diff --git a/eth2/types/src/slot_epoch.rs b/eth2/types/src/slot_epoch.rs index 35dce8e38..bf43e2b09 100644 --- a/eth2/types/src/slot_epoch.rs +++ b/eth2/types/src/slot_epoch.rs @@ -14,7 +14,6 @@ use crate::test_utils::TestRandom; use crate::SignedRoot; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use slog; use ssz::{ssz_encode, Decode, DecodeError, Encode}; use std::cmp::{Ord, Ordering}; use std::fmt; @@ -38,7 +37,7 @@ impl Slot { } pub fn epoch(self, slots_per_epoch: u64) -> Epoch { - Epoch::from(self.0 / slots_per_epoch) + Epoch::from(self.0) / Epoch::from(slots_per_epoch) } pub fn max_value() -> Slot { @@ -77,8 +76,8 @@ impl Epoch { let start = self.start_slot(slots_per_epoch); let end = self.end_slot(slots_per_epoch); - if (slot >= start) && (slot <= end) { - Some(slot.as_usize() - start.as_usize()) + if slot >= start && slot <= end { + slot.as_usize().checked_sub(start.as_usize()) } else { None } @@ -110,7 +109,7 @@ impl<'a> Iterator for SlotIter<'a> { } else { let start_slot = self.epoch.start_slot(self.slots_per_epoch); let previous = self.current_iteration; - self.current_iteration += 1; + self.current_iteration = self.current_iteration.checked_add(1)?; Some(start_slot + previous) } } diff --git a/eth2/types/src/slot_epoch_macros.rs b/eth2/types/src/slot_epoch_macros.rs index 49a1b8e0d..0050fb5d6 100644 --- a/eth2/types/src/slot_epoch_macros.rs +++ b/eth2/types/src/slot_epoch_macros.rs @@ -107,20 +107,21 @@ macro_rules! impl_math_between { fn div(self, rhs: $other) -> $main { let rhs: u64 = rhs.into(); - if rhs == 0 { - panic!("Cannot divide by zero-valued Slot/Epoch") - } - $main::from(self.0 / rhs) + $main::from( + self.0 + .checked_div(rhs) + .expect("Cannot divide by zero-valued Slot/Epoch"), + ) } } impl DivAssign<$other> for $main { fn div_assign(&mut self, rhs: $other) { let rhs: u64 = rhs.into(); - if rhs == 0 { - panic!("Cannot divide by zero-valued Slot/Epoch") - } - self.0 = self.0 / rhs + self.0 = self + .0 + .checked_div(rhs) + .expect("Cannot divide by zero-valued Slot/Epoch"); } } @@ -129,7 +130,11 @@ macro_rules! impl_math_between { fn rem(self, modulus: $other) -> $main { let modulus: u64 = modulus.into(); - $main::from(self.0 % modulus) + $main::from( + self.0 + .checked_rem(modulus) + .expect("Cannot divide by zero-valued Slot/Epoch"), + ) } } }; @@ -234,7 +239,7 @@ macro_rules! impl_ssz { } fn tree_hash_packing_factor() -> usize { - 32 / 8 + 32usize.wrapping_div(8) } fn tree_hash_root(&self) -> tree_hash::Hash256 { diff --git a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs index 333f221ad..c4580a48f 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -2,7 +2,6 @@ use super::super::{generate_deterministic_keypairs, KeypairsFile}; use crate::test_utils::{AttestationTestTask, TestingPendingAttestationBuilder}; use crate::*; use bls::get_withdrawal_credentials; -use dirs; use log::debug; use rayon::prelude::*; use std::path::{Path, PathBuf}; diff --git a/eth2/types/src/test_utils/builders/testing_deposit_builder.rs b/eth2/types/src/test_utils/builders/testing_deposit_builder.rs index aad5f2098..69da25997 100644 --- a/eth2/types/src/test_utils/builders/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_deposit_builder.rs @@ -36,7 +36,7 @@ impl TestingDepositBuilder { let mut secret_key = keypair.sk.clone(); match test_task { - DepositTestTask::BadPubKey => pubkeybytes = PublicKeyBytes::from(new_key.pk.clone()), + DepositTestTask::BadPubKey => pubkeybytes = PublicKeyBytes::from(new_key.pk), DepositTestTask::InvalidPubKey => { // Creating invalid public key bytes let mut public_key_bytes: Vec = vec![0; 48]; diff --git a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs index a969843fa..88e84a399 100644 --- a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs @@ -50,7 +50,7 @@ impl TestingProposerSlashingBuilder { message: BeaconBlockHeader { parent_root: hash_2, slot: slot_2, - ..signed_header_1.message.clone() + ..signed_header_1.message }, signature: Signature::empty_signature(), }; diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index 0e5a6d41d..4156c513d 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -1,3 +1,5 @@ +#![allow(clippy::integer_arithmetic)] + #[macro_use] mod macros; mod builders; diff --git a/eth2/types/src/tree_hash_impls.rs b/eth2/types/src/tree_hash_impls.rs index 0b08a550b..787d62d76 100644 --- a/eth2/types/src/tree_hash_impls.rs +++ b/eth2/types/src/tree_hash_impls.rs @@ -32,12 +32,10 @@ impl CachedTreeHash for Validator { // Fields pubkey and withdrawal_credentials are constant if (i == 0 || i == 1) && cache.initialized { None + } else if process_field_by_index(self, i, leaf, !cache.initialized) { + Some(i) } else { - if process_field_by_index(self, i, leaf, !cache.initialized) { - Some(i) - } else { - None - } + None } }) .collect(); diff --git a/eth2/types/src/utils/serde_utils.rs b/eth2/types/src/utils/serde_utils.rs index 8d8e7dff0..d2d3e5655 100644 --- a/eth2/types/src/utils/serde_utils.rs +++ b/eth2/types/src/utils/serde_utils.rs @@ -1,4 +1,3 @@ -use hex; use serde::de::Error; use serde::{Deserialize, Deserializer, Serializer}; diff --git a/eth2/utils/bls/src/aggregate_public_key.rs b/eth2/utils/bls/src/aggregate_public_key.rs index 4f4040d12..4085b6f25 100644 --- a/eth2/utils/bls/src/aggregate_public_key.rs +++ b/eth2/utils/bls/src/aggregate_public_key.rs @@ -19,9 +19,7 @@ impl AggregatePublicKey { pub fn from_bytes(bytes: &[u8]) -> Result { let pubkey = RawAggregatePublicKey::from_bytes(&bytes).map_err(|_| { - DecodeError::BytesInvalid( - format!("Invalid AggregatePublicKey bytes: {:?}", bytes).to_string(), - ) + DecodeError::BytesInvalid(format!("Invalid AggregatePublicKey bytes: {:?}", bytes)) })?; Ok(AggregatePublicKey(pubkey)) diff --git a/eth2/utils/cached_tree_hash/src/cache.rs b/eth2/utils/cached_tree_hash/src/cache.rs index 782cedcbf..9c546e0ff 100644 --- a/eth2/utils/cached_tree_hash/src/cache.rs +++ b/eth2/utils/cached_tree_hash/src/cache.rs @@ -164,8 +164,8 @@ impl TreeHashCache { fn lift_dirty(dirty_indices: &[usize]) -> SmallVec8 { let mut new_dirty = SmallVec8::with_capacity(dirty_indices.len()); - for i in 0..dirty_indices.len() { - new_dirty.push(dirty_indices[i] / 2) + for index in dirty_indices { + new_dirty.push(index / 2) } new_dirty.dedup(); diff --git a/eth2/utils/cached_tree_hash/src/cache_arena.rs b/eth2/utils/cached_tree_hash/src/cache_arena.rs index 5923b386b..b6ade1474 100644 --- a/eth2/utils/cached_tree_hash/src/cache_arena.rs +++ b/eth2/utils/cached_tree_hash/src/cache_arena.rs @@ -89,6 +89,7 @@ impl CacheArena { /// To reiterate, the given `range` should be relative to the given `alloc_id`, not /// `self.backing`. E.g., if the allocation has an offset of `20` and the range is `0..1`, then /// the splice will translate to `self.backing[20..21]`. + #[allow(clippy::comparison_chain)] fn splice_forgetful>( &mut self, alloc_id: usize, diff --git a/eth2/utils/merkle_proof/Cargo.toml b/eth2/utils/merkle_proof/Cargo.toml index a342b5bea..355bf1bac 100644 --- a/eth2/utils/merkle_proof/Cargo.toml +++ b/eth2/utils/merkle_proof/Cargo.toml @@ -8,6 +8,7 @@ edition = "2018" ethereum-types = "0.8.0" eth2_hashing = "0.1.0" lazy_static = "1.4.0" +safe_arith = { path = "../safe_arith" } [dev-dependencies] quickcheck = "0.9.0" diff --git a/eth2/utils/merkle_proof/src/lib.rs b/eth2/utils/merkle_proof/src/lib.rs index 64f744be8..f80ee2a2d 100644 --- a/eth2/utils/merkle_proof/src/lib.rs +++ b/eth2/utils/merkle_proof/src/lib.rs @@ -1,6 +1,7 @@ use eth2_hashing::{hash, hash32_concat, ZERO_HASHES}; use ethereum_types::H256; use lazy_static::lazy_static; +use safe_arith::ArithError; const MAX_TREE_DEPTH: usize = 32; const EMPTY_SLICE: &[H256] = &[]; @@ -38,6 +39,8 @@ pub enum MerkleTreeError { Invalid, // Incorrect Depth provided DepthTooSmall, + // Overflow occurred + ArithError, } impl MerkleTree { @@ -232,6 +235,12 @@ fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usi H256::from_slice(&merkle_root) } +impl From for MerkleTreeError { + fn from(_: ArithError) -> Self { + MerkleTreeError::ArithError + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/eth2/utils/safe_arith/Cargo.toml b/eth2/utils/safe_arith/Cargo.toml new file mode 100644 index 000000000..7784a0392 --- /dev/null +++ b/eth2/utils/safe_arith/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "safe_arith" +version = "0.1.0" +authors = ["Michael Sproul "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/eth2/utils/safe_arith/src/lib.rs b/eth2/utils/safe_arith/src/lib.rs new file mode 100644 index 000000000..90387b223 --- /dev/null +++ b/eth2/utils/safe_arith/src/lib.rs @@ -0,0 +1,161 @@ +//! Library for safe arithmetic on integers, avoiding overflow and division by zero. + +/// Error representing the failure of an arithmetic operation. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum ArithError { + Overflow, + DivisionByZero, +} + +type Result = std::result::Result; + +macro_rules! assign_method { + ($name:ident, $op:ident, $doc_op:expr) => { + assign_method!($name, $op, Self, $doc_op); + }; + ($name:ident, $op:ident, $rhs_ty:ty, $doc_op:expr) => { + #[doc = "Safe variant of `"] + #[doc = $doc_op] + #[doc = "`."] + fn $name(&mut self, other: $rhs_ty) -> Result<()> { + *self = self.$op(other)?; + Ok(()) + } + }; +} + +/// Trait providing safe arithmetic operations for built-in types. +pub trait SafeArith: Sized + Copy { + const ZERO: Self; + const ONE: Self; + + /// Safe variant of `+` that guards against overflow. + fn safe_add(&self, other: Self) -> Result; + + /// Safe variant of `-` that guards against overflow. + fn safe_sub(&self, other: Self) -> Result; + + /// Safe variant of `*` that guards against overflow. + fn safe_mul(&self, other: Self) -> Result; + + /// Safe variant of `/` that guards against division by 0. + fn safe_div(&self, other: Self) -> Result; + + /// Safe variant of `%` that guards against division by 0. + fn safe_rem(&self, other: Self) -> Result; + + /// Safe variant of `<<` that guards against overflow. + fn safe_shl(&self, other: u32) -> Result; + + /// Safe variant of `>>` that guards against overflow. + fn safe_shr(&self, other: u32) -> Result; + + assign_method!(safe_add_assign, safe_add, "+="); + assign_method!(safe_sub_assign, safe_sub, "-="); + assign_method!(safe_mul_assign, safe_mul, "*="); + assign_method!(safe_div_assign, safe_div, "/="); + assign_method!(safe_rem_assign, safe_rem, "%="); + assign_method!(safe_shl_assign, safe_shl, u32, "<<="); + assign_method!(safe_shr_assign, safe_shr, u32, ">>="); + + /// Mutate `self` by adding 1, erroring on overflow. + fn increment(&mut self) -> Result<()> { + self.safe_add_assign(Self::ONE) + } +} + +macro_rules! impl_safe_arith { + ($typ:ty) => { + impl SafeArith for $typ { + const ZERO: Self = 0; + const ONE: Self = 1; + + fn safe_add(&self, other: Self) -> Result { + self.checked_add(other).ok_or(ArithError::Overflow) + } + + fn safe_sub(&self, other: Self) -> Result { + self.checked_sub(other).ok_or(ArithError::Overflow) + } + + fn safe_mul(&self, other: Self) -> Result { + self.checked_mul(other).ok_or(ArithError::Overflow) + } + + fn safe_div(&self, other: Self) -> Result { + self.checked_div(other).ok_or(ArithError::DivisionByZero) + } + + fn safe_rem(&self, other: Self) -> Result { + self.checked_rem(other).ok_or(ArithError::DivisionByZero) + } + + fn safe_shl(&self, other: u32) -> Result { + self.checked_shl(other).ok_or(ArithError::Overflow) + } + + fn safe_shr(&self, other: u32) -> Result { + self.checked_shr(other).ok_or(ArithError::Overflow) + } + } + }; +} + +impl_safe_arith!(u8); +impl_safe_arith!(u16); +impl_safe_arith!(u32); +impl_safe_arith!(u64); +impl_safe_arith!(usize); +impl_safe_arith!(i8); +impl_safe_arith!(i16); +impl_safe_arith!(i32); +impl_safe_arith!(i64); +impl_safe_arith!(isize); + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn basic() { + let x = 10u32; + let y = 11; + assert_eq!(x.safe_add(y), Ok(x + y)); + assert_eq!(y.safe_sub(x), Ok(y - x)); + assert_eq!(x.safe_mul(y), Ok(x * y)); + assert_eq!(x.safe_div(y), Ok(x / y)); + assert_eq!(x.safe_rem(y), Ok(x % y)); + + assert_eq!(x.safe_shl(1), Ok(x << 1)); + assert_eq!(x.safe_shr(1), Ok(x >> 1)); + } + + #[test] + fn mutate() { + let mut x = 0u8; + x.increment().unwrap(); + x.increment().unwrap(); + assert_eq!(x, 2); + x.safe_sub_assign(1).unwrap(); + assert_eq!(x, 1); + x.safe_shl_assign(1).unwrap(); + assert_eq!(x, 2); + x.safe_mul_assign(3).unwrap(); + assert_eq!(x, 6); + x.safe_div_assign(4).unwrap(); + assert_eq!(x, 1); + x.safe_shr_assign(1).unwrap(); + assert_eq!(x, 0); + } + + #[test] + fn errors() { + assert!(u32::max_value().safe_add(1).is_err()); + assert!(u32::min_value().safe_sub(1).is_err()); + assert!(u32::max_value().safe_mul(2).is_err()); + assert!(u32::max_value().safe_div(0).is_err()); + assert!(u32::max_value().safe_rem(0).is_err()); + assert!(u32::max_value().safe_shl(32).is_err()); + assert!(u32::max_value().safe_shr(32).is_err()); + } +} diff --git a/eth2/utils/serde_hex/src/lib.rs b/eth2/utils/serde_hex/src/lib.rs index dd76601ab..7b254cf88 100644 --- a/eth2/utils/serde_hex/src/lib.rs +++ b/eth2/utils/serde_hex/src/lib.rs @@ -1,4 +1,3 @@ -use hex; use hex::ToHex; use serde::de::{self, Visitor}; use std::fmt; diff --git a/eth2/utils/ssz_derive/src/lib.rs b/eth2/utils/ssz_derive/src/lib.rs index 8b341f38a..04ef8b982 100644 --- a/eth2/utils/ssz_derive/src/lib.rs +++ b/eth2/utils/ssz_derive/src/lib.rs @@ -86,6 +86,7 @@ pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { let field_types_f = field_types_a.clone(); let output = quote! { + #[allow(clippy::integer_arithmetic)] impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { fn is_ssz_fixed_len() -> bool { #( @@ -221,6 +222,7 @@ pub fn ssz_decode_derive(input: TokenStream) -> TokenStream { } let output = quote! { + #[allow(clippy::integer_arithmetic)] impl #impl_generics ssz::Decode for #name #ty_generics #where_clause { fn is_ssz_fixed_len() -> bool { #( diff --git a/eth2/utils/tree_hash/src/merkle_hasher.rs b/eth2/utils/tree_hash/src/merkle_hasher.rs index 9c921c075..02c349eb8 100644 --- a/eth2/utils/tree_hash/src/merkle_hasher.rs +++ b/eth2/utils/tree_hash/src/merkle_hasher.rs @@ -274,22 +274,20 @@ impl MerkleHasher { loop { if let Some(root) = self.root { break Ok(root); + } else if let Some(node) = self.half_nodes.last() { + let right_child = node.id * 2 + 1; + self.process_right_node(right_child, self.zero_hash(right_child)); + } else if self.next_leaf == 1 { + // The next_leaf can only be 1 if the tree has a depth of one. If have been no + // leaves supplied, assume a root of zero. + break Ok(Hash256::zero()); } else { - if let Some(node) = self.half_nodes.last() { - let right_child = node.id * 2 + 1; - self.process_right_node(right_child, self.zero_hash(right_child)); - } else if self.next_leaf == 1 { - // The next_leaf can only be 1 if the tree has a depth of one. If have been no - // leaves supplied, assume a root of zero. - break Ok(Hash256::zero()); - } else { - // The only scenario where there are (a) no half nodes and (b) a tree of depth - // two or more is where no leaves have been supplied at all. - // - // Once we supply this first zero-hash leaf then all future operations will be - // triggered via the `process_right_node` branch. - self.process_left_node(self.next_leaf, self.zero_hash(self.next_leaf)) - } + // The only scenario where there are (a) no half nodes and (b) a tree of depth + // two or more is where no leaves have been supplied at all. + // + // Once we supply this first zero-hash leaf then all future operations will be + // triggered via the `process_right_node` branch. + self.process_left_node(self.next_leaf, self.zero_hash(self.next_leaf)) } } } From b374ead24b052e57b492e8680bdd19d6cf251c71 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 20 Apr 2020 15:35:47 +1000 Subject: [PATCH 3/4] Protect against OOB offset in variable list SSZ decoding (#974) * Add "pretty-ssz" tool to lcli * Protect against OOB SSZ offset * Add more work on decoding * Fix benches * Add more decode fixes * Rename fixed_ptr * Add, fix tests * Add extra test * Increase SSZ decode error granularity * Ripples new error types across ssz crate * Add comment to `sanitize_offset` * Introduce max_len to SSZ list decoding * Restrict FixedVector, check for zero-len items * Double check for empty list * Address Michael's comment --- eth2/utils/ssz/src/decode.rs | 91 +++++++++++++---- eth2/utils/ssz/src/decode/impls.rs | 113 ++++++++++++---------- eth2/utils/ssz/tests/tests.rs | 6 +- eth2/utils/ssz_types/src/fixed_vector.rs | 21 +++- eth2/utils/ssz_types/src/variable_list.rs | 35 +++++-- lcli/src/helpers.rs | 8 ++ lcli/src/main.rs | 31 +++++- lcli/src/parse_ssz.rs | 40 ++++++++ 8 files changed, 263 insertions(+), 82 deletions(-) create mode 100644 lcli/src/parse_ssz.rs diff --git a/eth2/utils/ssz/src/decode.rs b/eth2/utils/ssz/src/decode.rs index 8a2fc5351..ec87935b4 100644 --- a/eth2/utils/ssz/src/decode.rs +++ b/eth2/utils/ssz/src/decode.rs @@ -21,10 +21,72 @@ pub enum DecodeError { /// length items (i.e., `length[0] < BYTES_PER_LENGTH_OFFSET`). /// - When decoding variable-length items, the `n`'th offset was less than the `n-1`'th offset. OutOfBoundsByte { i: usize }, + /// An offset points “backwards” into the fixed-bytes portion of the message, essentially + /// double-decoding bytes that will also be decoded as fixed-length. + /// + /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#1-Offset-into-fixed-portion + OffsetIntoFixedPortion(usize), + /// The first offset does not point to the byte that follows the fixed byte portion, + /// essentially skipping a variable-length byte. + /// + /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#2-Skip-first-variable-byte + OffsetSkipsVariableBytes(usize), + /// An offset points to bytes prior to the previous offset. Depending on how you look at it, + /// this either double-decodes bytes or makes the first offset a negative-length. + /// + /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#3-Offsets-are-decreasing + OffsetsAreDecreasing(usize), + /// An offset references byte indices that do not exist in the source bytes. + /// + /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#4-Offsets-are-out-of-bounds + OffsetOutOfBounds(usize), + /// A variable-length list does not have a fixed portion that is cleanly divisible by + /// `BYTES_PER_LENGTH_OFFSET`. + InvalidListFixedBytesLen(usize), + /// Some item has a `ssz_fixed_len` of zero. This is illegal. + ZeroLengthItem, /// The given bytes were invalid for some application-level reason. BytesInvalid(String), } +/// Performs checks on the `offset` based upon the other parameters provided. +/// +/// ## Detail +/// +/// - `offset`: the offset bytes (e.g., result of `read_offset(..)`). +/// - `previous_offset`: unless this is the first offset in the SSZ object, the value of the +/// previously-read offset. Used to ensure offsets are not decreasing. +/// - `num_bytes`: the total number of bytes in the SSZ object. Used to ensure the offset is not +/// out of bounds. +/// - `num_fixed_bytes`: the number of fixed-bytes in the struct, if it is known. Used to ensure +/// that the first offset doesn't skip any variable bytes. +/// +/// ## References +/// +/// The checks here are derived from this document: +/// +/// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view +pub fn sanitize_offset( + offset: usize, + previous_offset: Option, + num_bytes: usize, + num_fixed_bytes: Option, +) -> Result { + if num_fixed_bytes.map_or(false, |fixed_bytes| offset < fixed_bytes) { + Err(DecodeError::OffsetIntoFixedPortion(offset)) + } else if previous_offset.is_none() + && num_fixed_bytes.map_or(false, |fixed_bytes| offset != fixed_bytes) + { + Err(DecodeError::OffsetSkipsVariableBytes(offset)) + } else if offset > num_bytes { + Err(DecodeError::OffsetOutOfBounds(offset)) + } else if previous_offset.map_or(false, |prev| prev > offset) { + Err(DecodeError::OffsetsAreDecreasing(offset)) + } else { + Ok(offset) + } +} + /// Provides SSZ decoding (de-serialization) via the `from_ssz_bytes(&bytes)` method. /// /// See `examples/` for manual implementations or the crate root for implementations using @@ -97,21 +159,14 @@ impl<'a> SszDecoderBuilder<'a> { self.items.push(slice); } else { - let offset = read_offset(&self.bytes[self.items_index..])?; - - let previous_offset = self - .offsets - .last() - .map(|o| o.offset) - .unwrap_or_else(|| BYTES_PER_LENGTH_OFFSET); - - if (previous_offset > offset) || (offset > self.bytes.len()) { - return Err(DecodeError::OutOfBoundsByte { i: offset }); - } - self.offsets.push(Offset { position: self.items.len(), - offset, + offset: sanitize_offset( + read_offset(&self.bytes[self.items_index..])?, + self.offsets.last().map(|o| o.offset), + self.bytes.len(), + None, + )?, }); // Push an empty slice into items; it will be replaced later. @@ -124,13 +179,13 @@ impl<'a> SszDecoderBuilder<'a> { } fn finalize(&mut self) -> Result<(), DecodeError> { - if !self.offsets.is_empty() { + if let Some(first_offset) = self.offsets.first().map(|o| o.offset) { // Check to ensure the first offset points to the byte immediately following the // fixed-length bytes. - if self.offsets[0].offset != self.items_index { - return Err(DecodeError::OutOfBoundsByte { - i: self.offsets[0].offset, - }); + if first_offset < self.items_index { + return Err(DecodeError::OffsetIntoFixedPortion(first_offset)); + } else if first_offset > self.items_index { + return Err(DecodeError::OffsetSkipsVariableBytes(first_offset)); } // Iterate through each pair of offsets, grabbing the slice between each of the offsets. diff --git a/eth2/utils/ssz/src/decode/impls.rs b/eth2/utils/ssz/src/decode/impls.rs index a33fcac18..e039e2d16 100644 --- a/eth2/utils/ssz/src/decode/impls.rs +++ b/eth2/utils/ssz/src/decode/impls.rs @@ -366,7 +366,7 @@ impl_decodable_for_u8_array!(4); impl_decodable_for_u8_array!(32); macro_rules! impl_for_vec { - ($type: ty) => { + ($type: ty, $max_len: expr) => { impl Decode for $type { fn is_ssz_fixed_len() -> bool { false @@ -381,22 +381,22 @@ macro_rules! impl_for_vec { .map(|chunk| T::from_ssz_bytes(chunk)) .collect() } else { - decode_list_of_variable_length_items(bytes).map(|vec| vec.into()) + decode_list_of_variable_length_items(bytes, $max_len).map(|vec| vec.into()) } } } }; } -impl_for_vec!(Vec); -impl_for_vec!(SmallVec<[T; 1]>); -impl_for_vec!(SmallVec<[T; 2]>); -impl_for_vec!(SmallVec<[T; 3]>); -impl_for_vec!(SmallVec<[T; 4]>); -impl_for_vec!(SmallVec<[T; 5]>); -impl_for_vec!(SmallVec<[T; 6]>); -impl_for_vec!(SmallVec<[T; 7]>); -impl_for_vec!(SmallVec<[T; 8]>); +impl_for_vec!(Vec, None); +impl_for_vec!(SmallVec<[T; 1]>, Some(1)); +impl_for_vec!(SmallVec<[T; 2]>, Some(2)); +impl_for_vec!(SmallVec<[T; 3]>, Some(3)); +impl_for_vec!(SmallVec<[T; 4]>, Some(4)); +impl_for_vec!(SmallVec<[T; 5]>, Some(5)); +impl_for_vec!(SmallVec<[T; 6]>, Some(6)); +impl_for_vec!(SmallVec<[T; 7]>, Some(7)); +impl_for_vec!(SmallVec<[T; 8]>, Some(8)); /// Decodes `bytes` as if it were a list of variable-length items. /// @@ -405,43 +405,52 @@ impl_for_vec!(SmallVec<[T; 8]>); /// differing types. pub fn decode_list_of_variable_length_items( bytes: &[u8], + max_len: Option, ) -> Result, DecodeError> { - let mut next_variable_byte = read_offset(bytes)?; - - // The value of the first offset must not point back into the same bytes that defined - // it. - if next_variable_byte < BYTES_PER_LENGTH_OFFSET { - return Err(DecodeError::OutOfBoundsByte { - i: next_variable_byte, - }); + if bytes.is_empty() { + return Ok(vec![]); } - let num_items = next_variable_byte / BYTES_PER_LENGTH_OFFSET; + let first_offset = read_offset(bytes)?; + sanitize_offset(first_offset, None, bytes.len(), Some(first_offset))?; - // The fixed-length section must be a clean multiple of `BYTES_PER_LENGTH_OFFSET`. - if next_variable_byte != num_items * BYTES_PER_LENGTH_OFFSET { - return Err(DecodeError::InvalidByteLength { - len: next_variable_byte, - expected: num_items * BYTES_PER_LENGTH_OFFSET, - }); + if first_offset % BYTES_PER_LENGTH_OFFSET != 0 || first_offset < BYTES_PER_LENGTH_OFFSET { + return Err(DecodeError::InvalidListFixedBytesLen(first_offset)); } - let mut values = Vec::with_capacity(num_items); + let num_items = first_offset / BYTES_PER_LENGTH_OFFSET; + + if max_len.map_or(false, |max| num_items > max) { + return Err(DecodeError::BytesInvalid(format!( + "Variable length list of {} items exceeds maximum of {:?}", + num_items, max_len + ))); + } + + // Only initialize the vec with a capacity if a maximum length is provided. + // + // We assume that if a max length is provided then the application is able to handle an + // allocation of this size. + let mut values = if max_len.is_some() { + Vec::with_capacity(num_items) + } else { + vec![] + }; + + let mut offset = first_offset; for i in 1..=num_items { let slice_option = if i == num_items { - bytes.get(next_variable_byte..) + bytes.get(offset..) } else { - let offset = read_offset(&bytes[(i * BYTES_PER_LENGTH_OFFSET)..])?; + let start = offset; - let start = next_variable_byte; - next_variable_byte = offset; + let next_offset = read_offset(&bytes[(i * BYTES_PER_LENGTH_OFFSET)..])?; + offset = sanitize_offset(next_offset, Some(offset), bytes.len(), Some(first_offset))?; - bytes.get(start..next_variable_byte) + bytes.get(start..offset) }; - let slice = slice_option.ok_or_else(|| DecodeError::OutOfBoundsByte { - i: next_variable_byte, - })?; + let slice = slice_option.ok_or_else(|| DecodeError::OutOfBoundsByte { i: offset })?; values.push(T::from_ssz_bytes(slice)?); } @@ -519,26 +528,34 @@ mod tests { ); } + #[test] + fn empty_list() { + let vec: Vec> = vec![]; + let bytes = vec.as_ssz_bytes(); + assert!(bytes.is_empty()); + assert_eq!(Vec::from_ssz_bytes(&bytes), Ok(vec),); + } + #[test] fn first_length_points_backwards() { assert_eq!( >>::from_ssz_bytes(&[0, 0, 0, 0]), - Err(DecodeError::OutOfBoundsByte { i: 0 }) + Err(DecodeError::InvalidListFixedBytesLen(0)) ); assert_eq!( >>::from_ssz_bytes(&[1, 0, 0, 0]), - Err(DecodeError::OutOfBoundsByte { i: 1 }) + Err(DecodeError::InvalidListFixedBytesLen(1)) ); assert_eq!( >>::from_ssz_bytes(&[2, 0, 0, 0]), - Err(DecodeError::OutOfBoundsByte { i: 2 }) + Err(DecodeError::InvalidListFixedBytesLen(2)) ); assert_eq!( >>::from_ssz_bytes(&[3, 0, 0, 0]), - Err(DecodeError::OutOfBoundsByte { i: 3 }) + Err(DecodeError::InvalidListFixedBytesLen(3)) ); } @@ -546,7 +563,7 @@ mod tests { fn lengths_are_decreasing() { assert_eq!( >>::from_ssz_bytes(&[12, 0, 0, 0, 14, 0, 0, 0, 12, 0, 0, 0, 1, 0, 1, 0]), - Err(DecodeError::OutOfBoundsByte { i: 12 }) + Err(DecodeError::OffsetsAreDecreasing(12)) ); } @@ -554,10 +571,7 @@ mod tests { fn awkward_fixed_length_portion() { assert_eq!( >>::from_ssz_bytes(&[10, 0, 0, 0, 10, 0, 0, 0, 0, 0]), - Err(DecodeError::InvalidByteLength { - len: 10, - expected: 8 - }) + Err(DecodeError::InvalidListFixedBytesLen(10)) ); } @@ -565,14 +579,15 @@ mod tests { fn length_out_of_bounds() { assert_eq!( >>::from_ssz_bytes(&[5, 0, 0, 0]), - Err(DecodeError::InvalidByteLength { - len: 5, - expected: 4 - }) + Err(DecodeError::OffsetOutOfBounds(5)) ); assert_eq!( >>::from_ssz_bytes(&[8, 0, 0, 0, 9, 0, 0, 0]), - Err(DecodeError::OutOfBoundsByte { i: 9 }) + Err(DecodeError::OffsetOutOfBounds(9)) + ); + assert_eq!( + >>::from_ssz_bytes(&[8, 0, 0, 0, 16, 0, 0, 0]), + Err(DecodeError::OffsetOutOfBounds(16)) ); } diff --git a/eth2/utils/ssz/tests/tests.rs b/eth2/utils/ssz/tests/tests.rs index f82e5d6e3..2eada5c51 100644 --- a/eth2/utils/ssz/tests/tests.rs +++ b/eth2/utils/ssz/tests/tests.rs @@ -152,7 +152,7 @@ mod round_trip { assert_eq!( VariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OutOfBoundsByte { i: 9 }) + Err(DecodeError::OffsetIntoFixedPortion(9)) ); } @@ -182,7 +182,7 @@ mod round_trip { assert_eq!( VariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OutOfBoundsByte { i: 11 }) + Err(DecodeError::OffsetSkipsVariableBytes(11)) ); } @@ -284,7 +284,7 @@ mod round_trip { assert_eq!( ThreeVariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OutOfBoundsByte { i: 14 }) + Err(DecodeError::OffsetsAreDecreasing(14)) ); } diff --git a/eth2/utils/ssz_types/src/fixed_vector.rs b/eth2/utils/ssz_types/src/fixed_vector.rs index 91b6912f8..dffd2ad75 100644 --- a/eth2/utils/ssz_types/src/fixed_vector.rs +++ b/eth2/utils/ssz_types/src/fixed_vector.rs @@ -224,29 +224,44 @@ where } fn from_ssz_bytes(bytes: &[u8]) -> Result { + let fixed_len = N::to_usize(); + if bytes.is_empty() { Err(ssz::DecodeError::InvalidByteLength { len: 0, expected: 1, }) } else if T::is_ssz_fixed_len() { + let num_items = bytes + .len() + .checked_div(T::ssz_fixed_len()) + .ok_or_else(|| ssz::DecodeError::ZeroLengthItem)?; + + if num_items != fixed_len { + return Err(ssz::DecodeError::BytesInvalid(format!( + "FixedVector of {} items has {} items", + num_items, fixed_len + ))); + } + bytes .chunks(T::ssz_fixed_len()) .map(|chunk| T::from_ssz_bytes(chunk)) .collect::, _>>() .and_then(|vec| { - if vec.len() == N::to_usize() { + if vec.len() == fixed_len { Ok(vec.into()) } else { Err(ssz::DecodeError::BytesInvalid(format!( - "wrong number of vec elements, got: {}, expected: {}", + "Wrong number of FixedVector elements, got: {}, expected: {}", vec.len(), N::to_usize() ))) } }) } else { - ssz::decode_list_of_variable_length_items(bytes).and_then(|vec| Ok(vec.into())) + ssz::decode_list_of_variable_length_items(bytes, Some(fixed_len)) + .and_then(|vec| Ok(vec.into())) } } } diff --git a/eth2/utils/ssz_types/src/variable_list.rs b/eth2/utils/ssz_types/src/variable_list.rs index 65f72f236..c5cb18577 100644 --- a/eth2/utils/ssz_types/src/variable_list.rs +++ b/eth2/utils/ssz_types/src/variable_list.rs @@ -218,22 +218,41 @@ where } } -impl ssz::Decode for VariableList +impl ssz::Decode for VariableList where T: ssz::Decode, + N: Unsigned, { fn is_ssz_fixed_len() -> bool { - >::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - >::ssz_fixed_len() + false } fn from_ssz_bytes(bytes: &[u8]) -> Result { - let vec = >::from_ssz_bytes(bytes)?; + let max_len = N::to_usize(); - Self::new(vec).map_err(|e| ssz::DecodeError::BytesInvalid(format!("VariableList {:?}", e))) + if bytes.is_empty() { + Ok(vec![].into()) + } else if T::is_ssz_fixed_len() { + let num_items = bytes + .len() + .checked_div(T::ssz_fixed_len()) + .ok_or_else(|| ssz::DecodeError::ZeroLengthItem)?; + + if num_items > max_len { + return Err(ssz::DecodeError::BytesInvalid(format!( + "VariableList of {} items exceeds maximum of {}", + num_items, max_len + ))); + } + + bytes + .chunks(T::ssz_fixed_len()) + .map(|chunk| T::from_ssz_bytes(chunk)) + .collect::, _>>() + .map(Into::into) + } else { + ssz::decode_list_of_variable_length_items(bytes, Some(max_len)).map(|vec| vec.into()) + } } } diff --git a/lcli/src/helpers.rs b/lcli/src/helpers.rs index 6f7014f3a..441059cd1 100644 --- a/lcli/src/helpers.rs +++ b/lcli/src/helpers.rs @@ -29,6 +29,14 @@ pub fn parse_path_with_default_in_home_dir( }) } +pub fn parse_path(matches: &ArgMatches, name: &'static str) -> Result { + matches + .value_of(name) + .ok_or_else(|| format!("{} not specified", name))? + .parse::() + .map_err(|e| format!("Unable to parse {}: {}", name, e)) +} + pub fn parse_u64(matches: &ArgMatches, name: &'static str) -> Result { matches .value_of(name) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 9fb994207..4da64b826 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -9,6 +9,7 @@ mod helpers; mod interop_genesis; mod new_testnet; mod parse_hex; +mod parse_ssz; mod refund_deposit_contract; mod transition_blocks; @@ -36,7 +37,6 @@ fn main() { .long("spec") .value_name("STRING") .takes_value(true) - .required(true) .possible_values(&["minimal", "mainnet"]) .default_value("mainnet") ) @@ -94,6 +94,27 @@ fn main() { .help("Path to output a SSZ file."), ), ) + .subcommand( + SubCommand::with_name("pretty-ssz") + .about("Parses a file of raw (not hex-encoded) SSZ bytes") + .arg( + Arg::with_name("type") + .index(1) + .value_name("TYPE") + .takes_value(true) + .required(true) + .possible_values(&["SignedBeaconBlock"]) + .help("The schema of the supplied SSZ."), + ) + .arg( + Arg::with_name("path") + .index(2) + .value_name("SSZ_FILE") + .takes_value(true) + .required(true) + .help("A file contains SSZ bytes"), + ), + ) .subcommand( SubCommand::with_name("pretty-hex") .about("Parses SSZ encoded as ASCII 0x-prefixed hex") @@ -452,6 +473,14 @@ fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) { } ("transition-blocks", Some(matches)) => run_transition_blocks::(matches) .unwrap_or_else(|e| error!("Failed to transition blocks: {}", e)), + ("pretty-ssz", Some(sub_matches)) => { + let result = match matches.value_of("spec").expect("spec is required by slog") { + "minimal" => parse_ssz::run::(sub_matches), + "mainnet" => parse_ssz::run::(sub_matches), + _ => unreachable!("guarded by slog possible_values"), + }; + result.unwrap_or_else(|e| error!("Failed to run eth1-genesis command: {}", e)) + } ("pretty-hex", Some(matches)) => run_parse_hex::(matches) .unwrap_or_else(|e| error!("Failed to pretty print hex: {}", e)), ("deploy-deposit-contract", Some(matches)) => { diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs new file mode 100644 index 000000000..a6359c8b0 --- /dev/null +++ b/lcli/src/parse_ssz.rs @@ -0,0 +1,40 @@ +use crate::helpers::parse_path; +use clap::ArgMatches; +use serde::Serialize; +use ssz::Decode; +use std::fs::File; +use std::io::Read; +use types::{EthSpec, SignedBeaconBlock}; + +pub fn run(matches: &ArgMatches) -> Result<(), String> { + let type_str = matches + .value_of("type") + .ok_or_else(|| "No type supplied".to_string())?; + let path = parse_path(matches, "path")?; + + info!("Type: {:?}", type_str); + + let mut bytes = vec![]; + let mut file = File::open(&path).map_err(|e| format!("Unable to open {:?}: {}", path, e))?; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Unable to read {:?}: {}", path, e))?; + + match type_str { + "SignedBeaconBlock" => decode_and_print::>(&bytes)?, + other => return Err(format!("Unknown type: {}", other)), + }; + + Ok(()) +} + +fn decode_and_print(bytes: &[u8]) -> Result<(), String> { + let item = T::from_ssz_bytes(&bytes).map_err(|e| format!("Ssz decode failed: {:?}", e))?; + + println!( + "{}", + serde_yaml::to_string(&item) + .map_err(|e| format!("Unable to write object to YAML: {:?}", e))? + ); + + Ok(()) +} From 9c3f76a33b41cbcfd30a710753ff0284f743f701 Mon Sep 17 00:00:00 2001 From: Adam Szkoda Date: Mon, 20 Apr 2020 11:59:56 +0200 Subject: [PATCH 4/4] Prune abandoned forks (#916) * Address compiler warning * Prune abandoned fork choice forks * New approach to pruning * Wrap some block hashes in a newtype pattern For increased type safety. * Add Graphviz chain dump emitter for debugging * Fix broken test case * Make prunes_abandoned_forks use real DiskStore * Mark finalized blocks in the GraphViz output * Refine debug stringification of Slot and Epoch Before this commit: print!("{:?}", Slot(123)) == "Slot(\n123\n)". After this commit: print!("{:?", Slot(123)) == "Slot(123)". * Simplify build_block() * Rewrite test case using more composable test primitives * Working rewritten test case * Tighten fork prunning test checks * Add another pruning test case * Bugfix: Finalized blocks weren't always properly detected * Pruning: Add pruning_does_not_touch_blocks_prior_to_finalization test case * Tighten pruning tests: check if heads are tracked properly * Add a failing test case for a buggy scenario * Change name of function to a more accurate one * Fix failing test case * Test case: Were skipped slots' states pruned? * Style fix: Simplify dereferencing * Tighten pruning tests: check if abandoned states are deleted * Towards atomicity of db ops * Correct typo * Prune also skipped slots' states * New logic for handling skipped states * Make skipped slots test pass * Post conflict resolution fixes * Formatting fixes * Tests passing * Block hashes in Graphviz node labels * Removed unused changes * Fix bug with states having < SlotsPerHistoricalRoot roots * Consolidate State/BlockRootsIterator for pruning * Address review feedback * Fix a bug in pruning tests * Detach prune_abandoned_forks() from its object * Move migrate.rs from store to beacon_chain * Move forks pruning onto a background thread * Bugfix: Heads weren't pruned when prune set contained only the head * Rename: freeze_to_state() -> process_finalization() * Eliminate redundant function parameter Co-authored-by: Michael Sproul --- beacon_node/beacon_chain/src/beacon_chain.rs | 122 +++- beacon_node/beacon_chain/src/builder.rs | 17 +- beacon_node/beacon_chain/src/head_tracker.rs | 13 +- beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/migrate.rs | 349 +++++++++++ beacon_node/beacon_chain/src/test_utils.rs | 144 ++++- beacon_node/beacon_chain/tests/store_tests.rs | 566 +++++++++++++++++- beacon_node/client/src/builder.rs | 27 +- beacon_node/eth1/src/service.rs | 2 +- beacon_node/src/lib.rs | 2 +- beacon_node/store/src/hot_cold_store.rs | 2 +- beacon_node/store/src/iter.rs | 192 +++--- beacon_node/store/src/lib.rs | 6 +- beacon_node/store/src/migrate.rs | 153 ----- eth2/types/src/beacon_state.rs | 38 ++ eth2/types/src/lib.rs | 2 +- eth2/types/src/signed_beacon_block.rs | 29 + eth2/types/src/slot_epoch.rs | 4 +- eth2/types/src/slot_epoch_macros.rs | 11 + 19 files changed, 1398 insertions(+), 282 deletions(-) create mode 100644 beacon_node/beacon_chain/src/migrate.rs delete mode 100644 beacon_node/store/src/migrate.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 8f4ef7c64..c04e9b1e3 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4,6 +4,7 @@ use crate::events::{EventHandler, EventKind}; use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; use crate::head_tracker::HeadTracker; use crate::metrics; +use crate::migrate::Migrate; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::ShufflingCache; use crate::snapshot_cache::SnapshotCache; @@ -26,14 +27,16 @@ use state_processing::{ use std::borrow::Cow; use std::cmp::Ordering; use std::collections::HashMap; +use std::collections::HashSet; use std::fs; use std::io::prelude::*; use std::sync::Arc; use std::time::{Duration, Instant}; use store::iter::{ - BlockRootsIterator, ReverseBlockRootIterator, ReverseStateRootIterator, StateRootsIterator, + BlockRootsIterator, ParentRootBlockIterator, ReverseBlockRootIterator, + ReverseStateRootIterator, StateRootsIterator, }; -use store::{Error as DBError, Migrate, StateBatch, Store}; +use store::{Error as DBError, StateBatch, Store}; use tree_hash::TreeHash; use types::*; @@ -170,7 +173,7 @@ pub struct HeadInfo { pub trait BeaconChainTypes: Send + Sync + 'static { type Store: store::Store; - type StoreMigrator: store::Migrate; + type StoreMigrator: Migrate; type SlotClock: slot_clock::SlotClock; type Eth1Chain: Eth1ChainBackend; type EthSpec: types::EthSpec; @@ -202,7 +205,7 @@ pub struct BeaconChain { /// A handler for events generated by the beacon chain. pub event_handler: T::EventHandler, /// Used to track the heads of the beacon chain. - pub(crate) head_tracker: HeadTracker, + pub(crate) head_tracker: Arc, /// A cache dedicated to block processing. pub(crate) block_processing_cache: TimeoutRwLock>, /// Caches the shuffling for a given epoch and state root. @@ -498,6 +501,10 @@ impl BeaconChain { self.head_tracker.heads() } + pub fn knows_head(&self, block_hash: &SignedBeaconBlockHash) -> bool { + self.head_tracker.contains_head((*block_hash).into()) + } + /// Returns the `BeaconState` at the given slot. /// /// Returns `None` when the state is not found in the database or there is an error skipping @@ -1796,6 +1803,7 @@ impl BeaconChain { let beacon_block_root = self.fork_choice.find_head(&self)?; let current_head = self.head_info()?; + let old_finalized_root = current_head.finalized_checkpoint.root; if beacon_block_root == current_head.block_root { return Ok(()); @@ -1923,7 +1931,11 @@ impl BeaconChain { }); if new_finalized_epoch != old_finalized_epoch { - self.after_finalization(old_finalized_epoch, finalized_root)?; + self.after_finalization( + old_finalized_epoch, + finalized_root, + old_finalized_root.into(), + )?; } let _ = self.event_handler.register(EventKind::BeaconHeadChanged { @@ -1942,6 +1954,7 @@ impl BeaconChain { &self, old_finalized_epoch: Epoch, finalized_block_root: Hash256, + old_finalized_root: SignedBeaconBlockHash, ) -> Result<(), Error> { let finalized_block = self .store @@ -1981,10 +1994,13 @@ impl BeaconChain { // TODO: configurable max finality distance let max_finality_distance = 0; - self.store_migrator.freeze_to_state( + self.store_migrator.process_finalization( finalized_block.state_root, finalized_state, max_finality_distance, + Arc::clone(&self.head_tracker), + old_finalized_root, + finalized_block_root.into(), ); let _ = self.event_handler.register(EventKind::BeaconFinalization { @@ -2052,6 +2068,100 @@ impl BeaconChain { Ok(dump) } + + pub fn dump_as_dot(&self, output: &mut W) { + let canonical_head_hash = self + .canonical_head + .try_read_for(HEAD_LOCK_TIMEOUT) + .ok_or_else(|| Error::CanonicalHeadLockTimeout) + .unwrap() + .beacon_block_root; + let mut visited: HashSet = HashSet::new(); + let mut finalized_blocks: HashSet = HashSet::new(); + + let genesis_block_hash = Hash256::zero(); + write!(output, "digraph beacon {{\n").unwrap(); + write!(output, "\t_{:?}[label=\"genesis\"];\n", genesis_block_hash).unwrap(); + + // Canonical head needs to be processed first as otherwise finalized blocks aren't detected + // properly. + let heads = { + let mut heads = self.heads(); + let canonical_head_index = heads + .iter() + .position(|(block_hash, _)| *block_hash == canonical_head_hash) + .unwrap(); + let (canonical_head_hash, canonical_head_slot) = + heads.swap_remove(canonical_head_index); + heads.insert(0, (canonical_head_hash, canonical_head_slot)); + heads + }; + + for (head_hash, _head_slot) in heads { + for (block_hash, signed_beacon_block) in + ParentRootBlockIterator::new(&*self.store, head_hash) + { + if visited.contains(&block_hash) { + break; + } + visited.insert(block_hash); + + if signed_beacon_block.slot() % T::EthSpec::slots_per_epoch() == 0 { + let block = self.get_block(&block_hash).unwrap().unwrap(); + let state = self + .get_state(&block.state_root(), Some(block.slot())) + .unwrap() + .unwrap(); + finalized_blocks.insert(state.finalized_checkpoint.root); + } + + if block_hash == canonical_head_hash { + write!( + output, + "\t_{:?}[label=\"{} ({})\" shape=box3d];\n", + block_hash, + block_hash, + signed_beacon_block.slot() + ) + .unwrap(); + } else if finalized_blocks.contains(&block_hash) { + write!( + output, + "\t_{:?}[label=\"{} ({})\" shape=Msquare];\n", + block_hash, + block_hash, + signed_beacon_block.slot() + ) + .unwrap(); + } else { + write!( + output, + "\t_{:?}[label=\"{} ({})\" shape=box];\n", + block_hash, + block_hash, + signed_beacon_block.slot() + ) + .unwrap(); + } + write!( + output, + "\t_{:?} -> _{:?};\n", + block_hash, + signed_beacon_block.parent_root() + ) + .unwrap(); + } + } + + write!(output, "}}\n").unwrap(); + } + + // Used for debugging + #[allow(dead_code)] + pub fn dump_dot_file(&self, file_name: &str) { + let mut file = std::fs::File::create(file_name).unwrap(); + self.dump_as_dot(&mut file); + } } impl Drop for BeaconChain { diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 1339702a4..01bded409 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -5,6 +5,7 @@ use crate::eth1_chain::{CachingEth1Backend, SszEth1}; use crate::events::NullEventHandler; use crate::fork_choice::SszForkChoice; use crate::head_tracker::HeadTracker; +use crate::migrate::Migrate; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::ShufflingCache; use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE}; @@ -47,7 +48,7 @@ impl for Witness where TStore: Store + 'static, - TStoreMigrator: store::Migrate + 'static, + TStoreMigrator: Migrate + 'static, TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -96,7 +97,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate + 'static, + TStoreMigrator: Migrate + 'static, TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -431,7 +432,7 @@ where event_handler: self .event_handler .ok_or_else(|| "Cannot build without an event handler".to_string())?, - head_tracker: self.head_tracker.unwrap_or_default(), + head_tracker: Arc::new(self.head_tracker.unwrap_or_default()), block_processing_cache: TimeoutRwLock::new(SnapshotCache::new( DEFAULT_SNAPSHOT_CACHE_SIZE, canonical_head, @@ -463,7 +464,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate + 'static, + TStoreMigrator: Migrate + 'static, TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -533,7 +534,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate + 'static, + TStoreMigrator: Migrate + 'static, TSlotClock: SlotClock + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -571,7 +572,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate + 'static, + TStoreMigrator: Migrate + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -610,7 +611,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate + 'static, + TStoreMigrator: Migrate + 'static, TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -642,12 +643,12 @@ fn genesis_block( #[cfg(test)] mod test { use super::*; + use crate::migrate::{MemoryStore, NullMigrator}; use eth2_hashing::hash; use genesis::{generate_deterministic_keypairs, interop_genesis_state}; use sloggers::{null::NullLoggerBuilder, Build}; use ssz::Encode; use std::time::Duration; - use store::{migrate::NullMigrator, MemoryStore}; use tempfile::tempdir; use types::{EthSpec, MinimalEthSpec, Slot}; diff --git a/beacon_node/beacon_chain/src/head_tracker.rs b/beacon_node/beacon_chain/src/head_tracker.rs index 7bae0ce62..7f4e64122 100644 --- a/beacon_node/beacon_chain/src/head_tracker.rs +++ b/beacon_node/beacon_chain/src/head_tracker.rs @@ -25,11 +25,22 @@ impl HeadTracker { /// the upstream user. pub fn register_block(&self, block_root: Hash256, block: &BeaconBlock) { let mut map = self.0.write(); - map.remove(&block.parent_root); map.insert(block_root, block.slot); } + /// Removes abandoned head. + pub fn remove_head(&self, block_root: Hash256) { + let mut map = self.0.write(); + debug_assert!(map.contains_key(&block_root)); + map.remove(&block_root); + } + + /// Returns true iff `block_root` is a recognized head. + pub fn contains_head(&self, block_root: Hash256) -> bool { + self.0.read().contains_key(&block_root) + } + /// Returns the list of heads in the chain. pub fn heads(&self) -> Vec<(Hash256, Slot)> { self.0 diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 1b4297faa..97d419290 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -11,6 +11,7 @@ pub mod events; mod fork_choice; mod head_tracker; mod metrics; +pub mod migrate; mod persisted_beacon_chain; mod shuffling_cache; mod snapshot_cache; diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs new file mode 100644 index 000000000..5107a4b03 --- /dev/null +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -0,0 +1,349 @@ +use crate::errors::BeaconChainError; +use crate::head_tracker::HeadTracker; +use parking_lot::Mutex; +use slog::{debug, warn, Logger}; +use std::collections::{HashMap, HashSet}; +use std::iter::FromIterator; +use std::mem; +use std::sync::mpsc; +use std::sync::Arc; +use std::thread; +use store::iter::{ParentRootBlockIterator, RootsIterator}; +use store::{hot_cold_store::HotColdDBError, Error, SimpleDiskStore, Store}; +pub use store::{DiskStore, MemoryStore}; +use types::*; +use types::{BeaconState, EthSpec, Hash256, Slot}; + +/// Trait for migration processes that update the database upon finalization. +pub trait Migrate, E: EthSpec>: Send + Sync + 'static { + fn new(db: Arc, log: Logger) -> Self; + + fn process_finalization( + &self, + _state_root: Hash256, + _new_finalized_state: BeaconState, + _max_finality_distance: u64, + _head_tracker: Arc, + _old_finalized_block_hash: SignedBeaconBlockHash, + _new_finalized_block_hash: SignedBeaconBlockHash, + ) { + } + + /// Traverses live heads and prunes blocks and states of chains that we know can't be built + /// upon because finalization would prohibit it. This is a optimisation intended to save disk + /// space. + /// + /// Assumptions: + /// * It is called after every finalization. + fn prune_abandoned_forks( + store: Arc, + head_tracker: Arc, + old_finalized_block_hash: SignedBeaconBlockHash, + new_finalized_block_hash: SignedBeaconBlockHash, + new_finalized_slot: Slot, + ) -> Result<(), BeaconChainError> { + let old_finalized_slot = store + .get_block(&old_finalized_block_hash.into())? + .ok_or_else(|| BeaconChainError::MissingBeaconBlock(old_finalized_block_hash.into()))? + .slot(); + + // Collect hashes from new_finalized_block back to old_finalized_block (inclusive) + let mut found_block = false; // hack for `take_until` + let newly_finalized_blocks: HashMap = HashMap::from_iter( + ParentRootBlockIterator::new(&*store, new_finalized_block_hash.into()) + .take_while(|(block_hash, _)| { + if found_block { + false + } else { + found_block |= *block_hash == old_finalized_block_hash.into(); + true + } + }) + .map(|(block_hash, block)| (block_hash.into(), block.slot())), + ); + + // We don't know which blocks are shared among abandoned chains, so we buffer and delete + // everything in one fell swoop. + let mut abandoned_blocks: HashSet = HashSet::new(); + let mut abandoned_states: HashSet<(Slot, BeaconStateHash)> = HashSet::new(); + let mut abandoned_heads: HashSet = HashSet::new(); + + for (head_hash, head_slot) in head_tracker.heads() { + let mut potentially_abandoned_head: Option = Some(head_hash); + let mut potentially_abandoned_blocks: Vec<( + Slot, + Option, + Option, + )> = Vec::new(); + + let head_state_hash = store + .get_block(&head_hash)? + .ok_or_else(|| BeaconStateError::MissingBeaconBlock(head_hash.into()))? + .state_root(); + + let iterator = std::iter::once((head_hash, head_state_hash, head_slot)) + .chain(RootsIterator::from_block(Arc::clone(&store), head_hash)?); + for (block_hash, state_hash, slot) in iterator { + if slot < old_finalized_slot { + // We must assume here any candidate chains include old_finalized_block_hash, + // i.e. there aren't any forks starting at a block that is a strict ancestor of + // old_finalized_block_hash. + break; + } + match newly_finalized_blocks.get(&block_hash.into()).copied() { + // Block is not finalized, mark it and its state for deletion + None => { + potentially_abandoned_blocks.push(( + slot, + Some(block_hash.into()), + Some(state_hash.into()), + )); + } + Some(finalized_slot) => { + // Block root is finalized, and we have reached the slot it was finalized + // at: we've hit a shared part of the chain. + if finalized_slot == slot { + // The first finalized block of a candidate chain lies after (in terms + // of slots order) the newly finalized block. It's not a candidate for + // prunning. + if finalized_slot == new_finalized_slot { + potentially_abandoned_blocks.clear(); + potentially_abandoned_head.take(); + } + + break; + } + // Block root is finalized, but we're at a skip slot: delete the state only. + else { + potentially_abandoned_blocks.push(( + slot, + None, + Some(state_hash.into()), + )); + } + } + } + } + + abandoned_heads.extend(potentially_abandoned_head.into_iter()); + if !potentially_abandoned_blocks.is_empty() { + abandoned_blocks.extend( + potentially_abandoned_blocks + .iter() + .filter_map(|(_, maybe_block_hash, _)| *maybe_block_hash), + ); + abandoned_states.extend(potentially_abandoned_blocks.iter().filter_map( + |(slot, _, maybe_state_hash)| match maybe_state_hash { + None => None, + Some(state_hash) => Some((*slot, *state_hash)), + }, + )); + } + } + + // XXX Should be performed atomically, see + // https://github.com/sigp/lighthouse/issues/692 + for block_hash in abandoned_blocks.into_iter() { + store.delete_block(&block_hash.into())?; + } + for (slot, state_hash) in abandoned_states.into_iter() { + store.delete_state(&state_hash.into(), slot)?; + } + for head_hash in abandoned_heads.into_iter() { + head_tracker.remove_head(head_hash); + } + + Ok(()) + } +} + +/// Migrator that does nothing, for stores that don't need migration. +pub struct NullMigrator; + +impl Migrate, E> for NullMigrator { + fn new(_: Arc>, _: Logger) -> Self { + NullMigrator + } +} + +impl Migrate, E> for NullMigrator { + fn new(_: Arc>, _: Logger) -> Self { + NullMigrator + } +} + +/// Migrator that immediately calls the store's migration function, blocking the current execution. +/// +/// Mostly useful for tests. +pub struct BlockingMigrator { + db: Arc, +} + +impl> Migrate for BlockingMigrator { + fn new(db: Arc, _: Logger) -> Self { + BlockingMigrator { db } + } + + fn process_finalization( + &self, + state_root: Hash256, + new_finalized_state: BeaconState, + _max_finality_distance: u64, + head_tracker: Arc, + old_finalized_block_hash: SignedBeaconBlockHash, + new_finalized_block_hash: SignedBeaconBlockHash, + ) { + if let Err(e) = S::process_finalization(self.db.clone(), state_root, &new_finalized_state) { + // This migrator is only used for testing, so we just log to stderr without a logger. + eprintln!("Migration error: {:?}", e); + } + + if let Err(e) = Self::prune_abandoned_forks( + self.db.clone(), + head_tracker, + old_finalized_block_hash, + new_finalized_block_hash, + new_finalized_state.slot, + ) { + eprintln!("Pruning error: {:?}", e); + } + } +} + +type MpscSender = mpsc::Sender<( + Hash256, + BeaconState, + Arc, + SignedBeaconBlockHash, + SignedBeaconBlockHash, + Slot, +)>; + +/// Migrator that runs a background thread to migrate state from the hot to the cold database. +pub struct BackgroundMigrator { + db: Arc>, + tx_thread: Mutex<(MpscSender, thread::JoinHandle<()>)>, + log: Logger, +} + +impl Migrate, E> for BackgroundMigrator { + fn new(db: Arc>, log: Logger) -> Self { + let tx_thread = Mutex::new(Self::spawn_thread(db.clone(), log.clone())); + Self { db, tx_thread, log } + } + + /// Perform the freezing operation on the database, + fn process_finalization( + &self, + finalized_state_root: Hash256, + new_finalized_state: BeaconState, + max_finality_distance: u64, + head_tracker: Arc, + old_finalized_block_hash: SignedBeaconBlockHash, + new_finalized_block_hash: SignedBeaconBlockHash, + ) { + if !self.needs_migration(new_finalized_state.slot, max_finality_distance) { + return; + } + + let (ref mut tx, ref mut thread) = *self.tx_thread.lock(); + + let new_finalized_slot = new_finalized_state.slot; + if let Err(tx_err) = tx.send(( + finalized_state_root, + new_finalized_state, + head_tracker, + old_finalized_block_hash, + new_finalized_block_hash, + new_finalized_slot, + )) { + let (new_tx, new_thread) = Self::spawn_thread(self.db.clone(), self.log.clone()); + + drop(mem::replace(tx, new_tx)); + let old_thread = mem::replace(thread, new_thread); + + // Join the old thread, which will probably have panicked, or may have + // halted normally just now as a result of us dropping the old `mpsc::Sender`. + if let Err(thread_err) = old_thread.join() { + warn!( + self.log, + "Migration thread died, so it was restarted"; + "reason" => format!("{:?}", thread_err) + ); + } + + // Retry at most once, we could recurse but that would risk overflowing the stack. + let _ = tx.send(tx_err.0); + } + } +} + +impl BackgroundMigrator { + /// Return true if a migration needs to be performed, given a new `finalized_slot`. + fn needs_migration(&self, finalized_slot: Slot, max_finality_distance: u64) -> bool { + let finality_distance = finalized_slot - self.db.get_split_slot(); + finality_distance > max_finality_distance + } + + /// Spawn a new child thread to run the migration process. + /// + /// Return a channel handle for sending new finalized states to the thread. + fn spawn_thread( + db: Arc>, + log: Logger, + ) -> ( + mpsc::Sender<( + Hash256, + BeaconState, + Arc, + SignedBeaconBlockHash, + SignedBeaconBlockHash, + Slot, + )>, + thread::JoinHandle<()>, + ) { + let (tx, rx) = mpsc::channel(); + let thread = thread::spawn(move || { + while let Ok(( + state_root, + state, + head_tracker, + old_finalized_block_hash, + new_finalized_block_hash, + new_finalized_slot, + )) = rx.recv() + { + match DiskStore::process_finalization(db.clone(), state_root, &state) { + Ok(()) => {} + Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { + debug!( + log, + "Database migration postponed, unaligned finalized block"; + "slot" => slot.as_u64() + ); + } + Err(e) => { + warn!( + log, + "Database migration failed"; + "error" => format!("{:?}", e) + ); + } + }; + + match Self::prune_abandoned_forks( + db.clone(), + head_tracker, + old_finalized_block_hash, + new_finalized_block_hash, + new_finalized_slot, + ) { + Ok(()) => {} + Err(e) => warn!(log, "Block pruning failed: {:?}", e), + } + } + }); + + (tx, thread) + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index ad957c874..bc4271aec 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,6 +1,7 @@ pub use crate::beacon_chain::{ BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY, }; +use crate::migrate::{BlockingMigrator, Migrate, NullMigrator}; pub use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::{ builder::{BeaconChainBuilder, Witness}, @@ -15,16 +16,16 @@ use sloggers::{null::NullLoggerBuilder, Build}; use slot_clock::TestingSlotClock; use state_processing::per_slot_processing; use std::borrow::Cow; +use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; -use store::{ - migrate::{BlockingMigrator, NullMigrator}, - DiskStore, MemoryStore, Migrate, Store, -}; +use store::{DiskStore, MemoryStore, Store}; use tempfile::{tempdir, TempDir}; +use tree_hash::TreeHash; use types::{ - AggregateSignature, Attestation, BeaconState, ChainSpec, Domain, EthSpec, Hash256, Keypair, - SecretKey, Signature, SignedBeaconBlock, SignedRoot, Slot, + AggregateSignature, Attestation, BeaconState, BeaconStateHash, ChainSpec, Domain, EthSpec, + Hash256, Keypair, SecretKey, Signature, SignedBeaconBlock, SignedBeaconBlockHash, SignedRoot, + Slot, }; pub use types::test_utils::generate_deterministic_keypairs; @@ -136,7 +137,10 @@ impl BeaconChainHarness> { .logger(log.clone()) .custom_spec(spec.clone()) .store(store.clone()) - .store_migrator( as Migrate<_, E>>::new(store)) + .store_migrator( as Migrate<_, E>>::new( + store, + log.clone(), + )) .data_dir(data_dir.path().to_path_buf()) .genesis_state( interop_genesis_state::(&keypairs, HARNESS_GENESIS_TIME, &spec) @@ -176,7 +180,10 @@ impl BeaconChainHarness> { .logger(log.clone()) .custom_spec(spec) .store(store.clone()) - .store_migrator( as Migrate<_, E>>::new(store)) + .store_migrator( as Migrate<_, E>>::new( + store, + log.clone(), + )) .data_dir(data_dir.path().to_path_buf()) .resume_from_db() .expect("should resume beacon chain from db") @@ -278,6 +285,127 @@ where head_block_root.expect("did not produce any blocks") } + /// Returns current canonical head slot + pub fn get_chain_slot(&self) -> Slot { + self.chain.slot().unwrap() + } + + /// Returns current canonical head state + pub fn get_head_state(&self) -> BeaconState { + self.chain.head().unwrap().beacon_state + } + + /// Adds a single block (synchronously) onto either the canonical chain (block_strategy == + /// OnCanonicalHead) or a fork (block_strategy == ForkCanonicalChainAt). + pub fn add_block( + &self, + state: &BeaconState, + block_strategy: BlockStrategy, + slot: Slot, + validators: &[usize], + ) -> (SignedBeaconBlockHash, BeaconState) { + while self.chain.slot().expect("should have a slot") < slot { + self.advance_slot(); + } + + let (block, new_state) = self.build_block(state.clone(), slot, block_strategy); + + let outcome = self + .chain + .process_block(block) + .expect("should not error during block processing"); + + self.chain.fork_choice().expect("should find head"); + + if let BlockProcessingOutcome::Processed { block_root } = outcome { + let attestation_strategy = AttestationStrategy::SomeValidators(validators.to_vec()); + self.add_free_attestations(&attestation_strategy, &new_state, block_root, slot); + (block_root.into(), new_state) + } else { + panic!("block should be successfully processed: {:?}", outcome); + } + } + + /// `add_block()` repeated `num_blocks` times. + pub fn add_blocks( + &self, + mut state: BeaconState, + mut slot: Slot, + num_blocks: usize, + attesting_validators: &[usize], + block_strategy: BlockStrategy, + ) -> ( + HashMap, + HashMap, + Slot, + SignedBeaconBlockHash, + BeaconState, + ) { + let mut blocks: HashMap = HashMap::with_capacity(num_blocks); + let mut states: HashMap = HashMap::with_capacity(num_blocks); + for _ in 0..num_blocks { + let (new_root_hash, new_state) = + self.add_block(&state, block_strategy, slot, attesting_validators); + blocks.insert(slot, new_root_hash); + states.insert(slot, new_state.tree_hash_root().into()); + state = new_state; + slot += 1; + } + let head_hash = blocks[&(slot - 1)]; + (blocks, states, slot, head_hash, state) + } + + /// A wrapper on `add_blocks()` to avoid passing enums explicitly. + pub fn add_canonical_chain_blocks( + &self, + state: BeaconState, + slot: Slot, + num_blocks: usize, + attesting_validators: &[usize], + ) -> ( + HashMap, + HashMap, + Slot, + SignedBeaconBlockHash, + BeaconState, + ) { + let block_strategy = BlockStrategy::OnCanonicalHead; + self.add_blocks( + state, + slot, + num_blocks, + attesting_validators, + block_strategy, + ) + } + + /// A wrapper on `add_blocks()` to avoid passing enums explicitly. + pub fn add_stray_blocks( + &self, + state: BeaconState, + slot: Slot, + num_blocks: usize, + attesting_validators: &[usize], + ) -> ( + HashMap, + HashMap, + Slot, + SignedBeaconBlockHash, + BeaconState, + ) { + let block_strategy = BlockStrategy::ForkCanonicalChainAt { + previous_slot: slot, + first_slot: slot + 2, + }; + self.add_blocks( + state, + slot + 2, + num_blocks, + attesting_validators, + block_strategy, + ) + } + /// Returns a newly created block, signed by the proposer for the given slot. fn build_block( &self, diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index c4dd0b58c..f7d3abe46 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -6,9 +6,12 @@ extern crate lazy_static; use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, }; -use beacon_chain::AttestationProcessingOutcome; +use beacon_chain::BeaconSnapshot; +use beacon_chain::{AttestationProcessingOutcome, StateSkipConfig}; use rand::Rng; use sloggers::{null::NullLoggerBuilder, Build}; +use std::collections::HashMap; +use std::collections::HashSet; use std::sync::Arc; use store::{ iter::{BlockRootsIterator, StateRootsIterator}, @@ -700,6 +703,551 @@ fn check_shuffling_compatible( } } +// Ensure blocks from abandoned forks are pruned from the Hot DB +#[test] +fn prunes_abandoned_fork_between_two_finalized_checkpoints() { + const VALIDATOR_COUNT: usize = 24; + const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT); + const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY; + let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); + let faulty_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize; + + let slot = harness.get_chain_slot(); + let state = harness.get_head_state(); + let (canonical_blocks_pre_finalization, _, slot, _, state) = + harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators); + let (stray_blocks, stray_states, _, stray_head, _) = harness.add_stray_blocks( + harness.get_head_state(), + slot, + slots_per_epoch - 1, + &faulty_validators, + ); + + // Precondition: Ensure all stray_blocks blocks are still known + for &block_hash in stray_blocks.values() { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_some(), + "stray block {} should be still present", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness + .chain + .get_state(&state_hash.into(), Some(slot)) + .unwrap(); + assert!( + state.is_some(), + "stray state {} at slot {} should be still present", + state_hash, + slot + ); + } + + // Precondition: Only genesis is finalized + let chain_dump = harness.chain.chain_dump().unwrap(); + assert_eq!( + get_finalized_epoch_boundary_blocks(&chain_dump), + vec![Hash256::zero().into()].into_iter().collect(), + ); + + assert!(harness.chain.knows_head(&stray_head)); + + // Trigger finalization + let (canonical_blocks_post_finalization, _, _, _, _) = + harness.add_canonical_chain_blocks(state, slot, slots_per_epoch * 5, &honest_validators); + + // Postcondition: New blocks got finalized + let chain_dump = harness.chain.chain_dump().unwrap(); + let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump); + assert_eq!( + finalized_blocks, + vec![ + Hash256::zero().into(), + canonical_blocks_pre_finalization[&Slot::new(slots_per_epoch as u64)], + canonical_blocks_post_finalization[&Slot::new((slots_per_epoch * 2) as u64)], + ] + .into_iter() + .collect() + ); + + // Postcondition: Ensure all stray_blocks blocks have been pruned + for &block_hash in stray_blocks.values() { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_none(), + "abandoned block {} should have been pruned", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_none(), + "stray state {} at slot {} should have been deleted", + state_hash, + slot + ); + } + + assert!(!harness.chain.knows_head(&stray_head)); +} + +#[test] +fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { + const VALIDATOR_COUNT: usize = 24; + const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT); + const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY; + let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); + let faulty_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + let all_validators: Vec = (0..VALIDATOR_COUNT).collect(); + let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize; + + // Fill up 0th epoch + let slot = harness.get_chain_slot(); + let state = harness.get_head_state(); + let (canonical_blocks_zeroth_epoch, _, slot, _, state) = + harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators); + + // Fill up 1st epoch + let (_, _, canonical_slot, shared_head, canonical_state) = + harness.add_canonical_chain_blocks(state, slot, 1, &all_validators); + let (stray_blocks, stray_states, _, stray_head, _) = harness.add_stray_blocks( + canonical_state.clone(), + canonical_slot, + 1, + &faulty_validators, + ); + + // Preconditions + for &block_hash in stray_blocks.values() { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_some(), + "stray block {} should be still present", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_some(), + "stray state {} at slot {} should be still present", + state_hash, + slot + ); + } + + let chain_dump = harness.chain.chain_dump().unwrap(); + assert_eq!( + get_finalized_epoch_boundary_blocks(&chain_dump), + vec![Hash256::zero().into()].into_iter().collect(), + ); + + assert!(get_blocks(&chain_dump).contains(&shared_head)); + + // Trigger finalization + let (canonical_blocks, _, _, _, _) = harness.add_canonical_chain_blocks( + canonical_state, + canonical_slot, + slots_per_epoch * 5, + &honest_validators, + ); + + // Postconditions + let chain_dump = harness.chain.chain_dump().unwrap(); + let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump); + assert_eq!( + finalized_blocks, + vec![ + Hash256::zero().into(), + canonical_blocks_zeroth_epoch[&Slot::new(slots_per_epoch as u64)], + canonical_blocks[&Slot::new((slots_per_epoch * 2) as u64)], + ] + .into_iter() + .collect() + ); + + for &block_hash in stray_blocks.values() { + assert!( + harness + .chain + .get_block(&block_hash.into()) + .unwrap() + .is_none(), + "stray block {} should have been pruned", + block_hash, + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_none(), + "stray state {} at slot {} should have been deleted", + state_hash, + slot + ); + } + + assert!(!harness.chain.knows_head(&stray_head)); + assert!(get_blocks(&chain_dump).contains(&shared_head)); +} + +#[test] +fn pruning_does_not_touch_blocks_prior_to_finalization() { + const VALIDATOR_COUNT: usize = 24; + const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT); + const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY; + let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); + let faulty_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize; + + // Fill up 0th epoch with canonical chain blocks + let slot = harness.get_chain_slot(); + let state = harness.get_head_state(); + let (canonical_blocks_zeroth_epoch, _, slot, _, state) = + harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators); + + // Fill up 1st epoch. Contains a fork. + let (stray_blocks, stray_states, _, stray_head, _) = + harness.add_stray_blocks(state.clone(), slot, slots_per_epoch - 1, &faulty_validators); + + // Preconditions + for &block_hash in stray_blocks.values() { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_some(), + "stray block {} should be still present", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_some(), + "stray state {} at slot {} should be still present", + state_hash, + slot + ); + } + + let chain_dump = harness.chain.chain_dump().unwrap(); + assert_eq!( + get_finalized_epoch_boundary_blocks(&chain_dump), + vec![Hash256::zero().into()].into_iter().collect(), + ); + + // Trigger finalization + let (_, _, _, _, _) = + harness.add_canonical_chain_blocks(state, slot, slots_per_epoch * 4, &honest_validators); + + // Postconditions + let chain_dump = harness.chain.chain_dump().unwrap(); + let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump); + assert_eq!( + finalized_blocks, + vec![ + Hash256::zero().into(), + canonical_blocks_zeroth_epoch[&Slot::new(slots_per_epoch as u64)], + ] + .into_iter() + .collect() + ); + + for &block_hash in stray_blocks.values() { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_some(), + "stray block {} should be still present", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_some(), + "stray state {} at slot {} should be still present", + state_hash, + slot + ); + } + + assert!(harness.chain.knows_head(&stray_head)); +} + +#[test] +fn prunes_fork_running_past_finalized_checkpoint() { + const VALIDATOR_COUNT: usize = 24; + const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT); + const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY; + let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); + let faulty_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize; + + // Fill up 0th epoch with canonical chain blocks + let slot = harness.get_chain_slot(); + let state = harness.get_head_state(); + let (canonical_blocks_zeroth_epoch, _, slot, _, state) = + harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators); + + // Fill up 1st epoch. Contains a fork. + let (stray_blocks_first_epoch, stray_states_first_epoch, stray_slot, _, stray_state) = + harness.add_stray_blocks(state.clone(), slot, slots_per_epoch, &faulty_validators); + + let (canonical_blocks_first_epoch, _, canonical_slot, _, canonical_state) = + harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators); + + // Fill up 2nd epoch. Extends both the canonical chain and the fork. + let (stray_blocks_second_epoch, stray_states_second_epoch, _, stray_head, _) = harness + .add_stray_blocks( + stray_state, + stray_slot, + slots_per_epoch - 1, + &faulty_validators, + ); + + // Precondition: Ensure all stray_blocks blocks are still known + let stray_blocks: HashMap = stray_blocks_first_epoch + .into_iter() + .chain(stray_blocks_second_epoch.into_iter()) + .collect(); + + let stray_states: HashMap = stray_states_first_epoch + .into_iter() + .chain(stray_states_second_epoch.into_iter()) + .collect(); + + for &block_hash in stray_blocks.values() { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_some(), + "stray block {} should be still present", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_some(), + "stray state {} at slot {} should be still present", + state_hash, + slot + ); + } + + // Precondition: Only genesis is finalized + let chain_dump = harness.chain.chain_dump().unwrap(); + assert_eq!( + get_finalized_epoch_boundary_blocks(&chain_dump), + vec![Hash256::zero().into()].into_iter().collect(), + ); + + assert!(harness.chain.knows_head(&stray_head)); + + // Trigger finalization + let (canonical_blocks_second_epoch, _, _, _, _) = harness.add_canonical_chain_blocks( + canonical_state, + canonical_slot, + slots_per_epoch * 4, + &honest_validators, + ); + + // Postconditions + let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch + .into_iter() + .chain(canonical_blocks_first_epoch.into_iter()) + .chain(canonical_blocks_second_epoch.into_iter()) + .collect(); + + // Postcondition: New blocks got finalized + let chain_dump = harness.chain.chain_dump().unwrap(); + let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump); + assert_eq!( + finalized_blocks, + vec![ + Hash256::zero().into(), + canonical_blocks[&Slot::new(slots_per_epoch as u64)], + canonical_blocks[&Slot::new((slots_per_epoch * 2) as u64)], + ] + .into_iter() + .collect() + ); + + // Postcondition: Ensure all stray_blocks blocks have been pruned + for &block_hash in stray_blocks.values() { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_none(), + "abandoned block {} should have been pruned", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_none(), + "stray state {} at slot {} should have been deleted", + state_hash, + slot + ); + } + + assert!(!harness.chain.knows_head(&stray_head)); +} + +// This is to check if state outside of normal block processing are pruned correctly. +#[test] +fn prunes_skipped_slots_states() { + const VALIDATOR_COUNT: usize = 24; + const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT); + const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY; + let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); + let faulty_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize; + + // Arrange skipped slots so as to cross the epoch boundary. That way, we excercise the code + // responsible for storing state outside of normal block processing. + + let canonical_slot = harness.get_chain_slot(); + let canonical_state = harness.get_head_state(); + let (canonical_blocks_zeroth_epoch, _, canonical_slot, _, canonical_state) = harness + .add_canonical_chain_blocks( + canonical_state, + canonical_slot, + slots_per_epoch - 1, + &honest_validators, + ); + + let (stray_blocks, stray_states, stray_slot, _, _) = harness.add_stray_blocks( + canonical_state.clone(), + canonical_slot, + slots_per_epoch, + &faulty_validators, + ); + + // Preconditions + for &block_hash in stray_blocks.values() { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_some(), + "stray block {} should be still present", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_some(), + "stray state {} at slot {} should be still present", + state_hash, + slot + ); + } + + let chain_dump = harness.chain.chain_dump().unwrap(); + assert_eq!( + get_finalized_epoch_boundary_blocks(&chain_dump), + vec![Hash256::zero().into()].into_iter().collect(), + ); + + // Make sure slots were skipped + let stray_state = harness + .chain + .state_at_slot(stray_slot, StateSkipConfig::WithoutStateRoots) + .unwrap(); + let block_root = stray_state.get_block_root(canonical_slot - 1); + assert_eq!(stray_state.get_block_root(canonical_slot), block_root); + assert_eq!(stray_state.get_block_root(canonical_slot + 1), block_root); + + let skipped_slots = vec![canonical_slot, canonical_slot + 1]; + for &slot in &skipped_slots { + assert_eq!(stray_state.get_block_root(slot), block_root); + let state_hash = stray_state.get_state_root(slot).unwrap(); + assert!( + harness + .chain + .get_state(&state_hash, Some(slot)) + .unwrap() + .is_some(), + "skipped slots state should be still present" + ); + } + + // Trigger finalization + let (canonical_blocks_post_finalization, _, _, _, _) = harness.add_canonical_chain_blocks( + canonical_state, + canonical_slot, + slots_per_epoch * 5, + &honest_validators, + ); + + // Postconditions + let chain_dump = harness.chain.chain_dump().unwrap(); + let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump); + let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch + .into_iter() + .chain(canonical_blocks_post_finalization.into_iter()) + .collect(); + assert_eq!( + finalized_blocks, + vec![ + Hash256::zero().into(), + canonical_blocks[&Slot::new(slots_per_epoch as u64)], + ] + .into_iter() + .collect() + ); + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_none(), + "stray state {} at slot {} should have been deleted", + state_hash, + slot + ); + } + + for &slot in &skipped_slots { + assert_eq!(stray_state.get_block_root(slot), block_root); + let state_hash = stray_state.get_state_root(slot).unwrap(); + assert!( + harness + .chain + .get_state(&state_hash, Some(slot)) + .unwrap() + .is_none(), + "skipped slot states should have been pruned" + ); + } +} + /// Check that the head state's slot matches `expected_slot`. fn check_slot(harness: &TestHarness, expected_slot: u64) { let state = &harness.chain.head().expect("should get head").beacon_state; @@ -823,3 +1371,19 @@ fn check_iterators(harness: &TestHarness) { Some(Slot::new(0)) ); } + +fn get_finalized_epoch_boundary_blocks( + dump: &[BeaconSnapshot], +) -> HashSet { + dump.iter() + .cloned() + .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint.root.into()) + .collect() +} + +fn get_blocks(dump: &[BeaconSnapshot]) -> HashSet { + dump.iter() + .cloned() + .map(|checkpoint| checkpoint.beacon_block_root.into()) + .collect() +} diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index b1c50dca2..eac55d309 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -4,11 +4,9 @@ use crate::Client; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::{CachingEth1Backend, Eth1Chain}, + migrate::{BackgroundMigrator, Migrate, NullMigrator}, slot_clock::{SlotClock, SystemTimeSlotClock}, - store::{ - migrate::{BackgroundMigrator, Migrate, NullMigrator}, - DiskStore, MemoryStore, SimpleDiskStore, Store, StoreConfig, - }, + store::{DiskStore, MemoryStore, SimpleDiskStore, Store, StoreConfig}, BeaconChain, BeaconChainTypes, Eth1ChainBackend, EventHandler, }; use environment::RuntimeContext; @@ -68,7 +66,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate, + TStoreMigrator: Migrate, TSlotClock: SlotClock + Clone + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -365,7 +363,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate, + TStoreMigrator: Migrate, TSlotClock: SlotClock + Clone + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -411,7 +409,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate, + TStoreMigrator: Migrate, TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -459,7 +457,7 @@ impl > where TSlotClock: SlotClock + 'static, - TStoreMigrator: store::Migrate, TEthSpec> + 'static, + TStoreMigrator: Migrate, TEthSpec> + 'static, TEth1Backend: Eth1ChainBackend> + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -501,7 +499,7 @@ impl > where TSlotClock: SlotClock + 'static, - TStoreMigrator: store::Migrate, TEthSpec> + 'static, + TStoreMigrator: Migrate, TEthSpec> + 'static, TEth1Backend: Eth1ChainBackend> + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -561,10 +559,15 @@ where TEventHandler: EventHandler + 'static, { pub fn background_migrator(mut self) -> Result { + let context = self + .runtime_context + .as_ref() + .ok_or_else(|| "disk_store requires a log".to_string())? + .service_context("freezer_db".into()); let store = self.store.clone().ok_or_else(|| { "background_migrator requires the store to be initialized".to_string() })?; - self.store_migrator = Some(BackgroundMigrator::new(store)); + self.store_migrator = Some(BackgroundMigrator::new(store, context.log.clone())); Ok(self) } } @@ -582,7 +585,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate, + TStoreMigrator: Migrate, TSlotClock: SlotClock + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -688,7 +691,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate, + TStoreMigrator: Migrate, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 292f777fe..be47d4627 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -408,7 +408,7 @@ impl Service { .map(|vec| { let first = vec.first().cloned().unwrap_or_else(|| 0); let last = vec.last().map(|n| n + 1).unwrap_or_else(|| 0); - (first..last) + first..last }) .collect::>>() }) diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index d281b8d2b..2b4200142 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -10,6 +10,7 @@ pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; pub use config::{get_data_dir, get_eth2_testnet_config, get_testnet_dir}; pub use eth2_config::Eth2Config; +use beacon_chain::migrate::{BackgroundMigrator, DiskStore}; use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, events::WebSocketSender, slot_clock::SystemTimeSlotClock, @@ -20,7 +21,6 @@ use environment::RuntimeContext; use futures::{Future, IntoFuture}; use slog::{info, warn}; use std::ops::{Deref, DerefMut}; -use store::{migrate::BackgroundMigrator, DiskStore}; use types::EthSpec; /// A type-alias to the tighten the definition of a production-intended `Client`. diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 362b866ba..194461868 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -204,7 +204,7 @@ impl Store for HotColdDB { } /// Advance the split point of the store, moving new finalized states to the freezer. - fn freeze_to_state( + fn process_finalization( store: Arc, frozen_head_root: Hash256, frozen_head: &BeaconState, diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 43bdd164d..772f0ae30 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -1,4 +1,4 @@ -use crate::Store; +use crate::{Error, Store}; use std::borrow::Cow; use std::marker::PhantomData; use std::sync::Arc; @@ -43,12 +43,95 @@ impl<'a, U: Store, E: EthSpec> AncestorIter { + inner: RootsIterator<'a, T, U>, +} + +impl<'a, T: EthSpec, U> Clone for StateRootsIterator<'a, T, U> { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + +impl<'a, T: EthSpec, U: Store> StateRootsIterator<'a, T, U> { + pub fn new(store: Arc, beacon_state: &'a BeaconState) -> Self { + Self { + inner: RootsIterator::new(store, beacon_state), + } + } + + pub fn owned(store: Arc, beacon_state: BeaconState) -> Self { + Self { + inner: RootsIterator::owned(store, beacon_state), + } + } +} + +impl<'a, T: EthSpec, U: Store> Iterator for StateRootsIterator<'a, T, U> { + type Item = (Hash256, Slot); + + fn next(&mut self) -> Option { + self.inner + .next() + .map(|(_, state_root, slot)| (state_root, slot)) + } +} + +/// Iterates backwards through block roots. If any specified slot is unable to be retrieved, the +/// iterator returns `None` indefinitely. +/// +/// Uses the `block_roots` field of `BeaconState` as the source of block roots and will +/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been +/// exhausted. +/// +/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. +pub struct BlockRootsIterator<'a, T: EthSpec, U> { + inner: RootsIterator<'a, T, U>, +} + +impl<'a, T: EthSpec, U> Clone for BlockRootsIterator<'a, T, U> { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + +impl<'a, T: EthSpec, U: Store> BlockRootsIterator<'a, T, U> { + /// Create a new iterator over all block roots in the given `beacon_state` and prior states. + pub fn new(store: Arc, beacon_state: &'a BeaconState) -> Self { + Self { + inner: RootsIterator::new(store, beacon_state), + } + } + + /// Create a new iterator over all block roots in the given `beacon_state` and prior states. + pub fn owned(store: Arc, beacon_state: BeaconState) -> Self { + Self { + inner: RootsIterator::owned(store, beacon_state), + } + } +} + +impl<'a, T: EthSpec, U: Store> Iterator for BlockRootsIterator<'a, T, U> { + type Item = (Hash256, Slot); + + fn next(&mut self) -> Option { + self.inner + .next() + .map(|(block_root, _, slot)| (block_root, slot)) + } +} + +/// Iterator over state and block roots that backtracks using the vectors from a `BeaconState`. +pub struct RootsIterator<'a, T: EthSpec, U> { store: Arc, beacon_state: Cow<'a, BeaconState>, slot: Slot, } -impl<'a, T: EthSpec, U> Clone for StateRootsIterator<'a, T, U> { +impl<'a, T: EthSpec, U> Clone for RootsIterator<'a, T, U> { fn clone(&self) -> Self { Self { store: self.store.clone(), @@ -58,7 +141,7 @@ impl<'a, T: EthSpec, U> Clone for StateRootsIterator<'a, T, U> { } } -impl<'a, T: EthSpec, U: Store> StateRootsIterator<'a, T, U> { +impl<'a, T: EthSpec, U: Store> RootsIterator<'a, T, U> { pub fn new(store: Arc, beacon_state: &'a BeaconState) -> Self { Self { store, @@ -74,10 +157,21 @@ impl<'a, T: EthSpec, U: Store> StateRootsIterator<'a, T, U> { beacon_state: Cow::Owned(beacon_state), } } + + pub fn from_block(store: Arc, block_hash: Hash256) -> Result { + let block = store + .get_block(&block_hash)? + .ok_or_else(|| BeaconStateError::MissingBeaconBlock(block_hash.into()))?; + let state = store + .get_state(&block.state_root(), Some(block.slot()))? + .ok_or_else(|| BeaconStateError::MissingBeaconState(block.state_root().into()))?; + Ok(Self::owned(store, state)) + } } -impl<'a, T: EthSpec, U: Store> Iterator for StateRootsIterator<'a, T, U> { - type Item = (Hash256, Slot); +impl<'a, T: EthSpec, U: Store> Iterator for RootsIterator<'a, T, U> { + /// (block_root, state_root, slot) + type Item = (Hash256, Hash256, Slot); fn next(&mut self) -> Option { if self.slot == 0 || self.slot > self.beacon_state.slot { @@ -86,18 +180,22 @@ impl<'a, T: EthSpec, U: Store> Iterator for StateRootsIterator<'a, T, U> { self.slot -= 1; - match self.beacon_state.get_state_root(self.slot) { - Ok(root) => Some((*root, self.slot)), - Err(BeaconStateError::SlotOutOfBounds) => { + match ( + self.beacon_state.get_block_root(self.slot), + self.beacon_state.get_state_root(self.slot), + ) { + (Ok(block_root), Ok(state_root)) => Some((*block_root, *state_root, self.slot)), + (Err(BeaconStateError::SlotOutOfBounds), Err(BeaconStateError::SlotOutOfBounds)) => { // Read a `BeaconState` from the store that has access to prior historical roots. let beacon_state = next_historical_root_backtrack_state(&*self.store, &self.beacon_state)?; self.beacon_state = Cow::Owned(beacon_state); - let root = self.beacon_state.get_state_root(self.slot).ok()?; + let block_root = *self.beacon_state.get_block_root(self.slot).ok()?; + let state_root = *self.beacon_state.get_state_root(self.slot).ok()?; - Some((*root, self.slot)) + Some((block_root, state_root, self.slot)) } _ => None, } @@ -165,79 +263,7 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockIterator<'a, T, U> { fn next(&mut self) -> Option { let (root, _slot) = self.roots.next()?; - self.roots.store.get_block(&root).ok()? - } -} - -/// Iterates backwards through block roots. If any specified slot is unable to be retrieved, the -/// iterator returns `None` indefinitely. -/// -/// Uses the `block_roots` field of `BeaconState` to as the source of block roots and will -/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been -/// exhausted. -/// -/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. -pub struct BlockRootsIterator<'a, T: EthSpec, U> { - store: Arc, - beacon_state: Cow<'a, BeaconState>, - slot: Slot, -} - -impl<'a, T: EthSpec, U> Clone for BlockRootsIterator<'a, T, U> { - fn clone(&self) -> Self { - Self { - store: self.store.clone(), - beacon_state: self.beacon_state.clone(), - slot: self.slot, - } - } -} - -impl<'a, T: EthSpec, U: Store> BlockRootsIterator<'a, T, U> { - /// Create a new iterator over all block roots in the given `beacon_state` and prior states. - pub fn new(store: Arc, beacon_state: &'a BeaconState) -> Self { - Self { - store, - slot: beacon_state.slot, - beacon_state: Cow::Borrowed(beacon_state), - } - } - - /// Create a new iterator over all block roots in the given `beacon_state` and prior states. - pub fn owned(store: Arc, beacon_state: BeaconState) -> Self { - Self { - store, - slot: beacon_state.slot, - beacon_state: Cow::Owned(beacon_state), - } - } -} - -impl<'a, T: EthSpec, U: Store> Iterator for BlockRootsIterator<'a, T, U> { - type Item = (Hash256, Slot); - - fn next(&mut self) -> Option { - if self.slot == 0 || self.slot > self.beacon_state.slot { - return None; - } - - self.slot -= 1; - - match self.beacon_state.get_block_root(self.slot) { - Ok(root) => Some((*root, self.slot)), - Err(BeaconStateError::SlotOutOfBounds) => { - // Read a `BeaconState` from the store that has access to prior historical roots. - let beacon_state = - next_historical_root_backtrack_state(&*self.store, &self.beacon_state)?; - - self.beacon_state = Cow::Owned(beacon_state); - - let root = self.beacon_state.get_block_root(self.slot).ok()?; - - Some((*root, self.slot)) - } - _ => None, - } + self.roots.inner.store.get_block(&root).ok()? } } diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index a8220b08c..31c948eb0 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -15,7 +15,7 @@ pub mod chunked_vector; pub mod config; mod errors; mod forwards_iter; -mod hot_cold_store; +pub mod hot_cold_store; mod impls; mod leveldb_store; mod memory_store; @@ -24,7 +24,6 @@ mod partial_beacon_state; mod state_batch; pub mod iter; -pub mod migrate; use std::sync::Arc; @@ -32,7 +31,6 @@ pub use self::config::StoreConfig; pub use self::hot_cold_store::{HotColdDB as DiskStore, HotStateSummary}; pub use self::leveldb_store::LevelDB as SimpleDiskStore; pub use self::memory_store::MemoryStore; -pub use self::migrate::Migrate; pub use self::partial_beacon_state::PartialBeaconState; pub use errors::Error; pub use impls::beacon_state::StorageContainer as BeaconStateStorageContainer; @@ -132,7 +130,7 @@ pub trait Store: Sync + Send + Sized + 'static { } /// (Optionally) Move all data before the frozen slot to the freezer database. - fn freeze_to_state( + fn process_finalization( _store: Arc, _frozen_head_root: Hash256, _frozen_head: &BeaconState, diff --git a/beacon_node/store/src/migrate.rs b/beacon_node/store/src/migrate.rs deleted file mode 100644 index 5fd617a22..000000000 --- a/beacon_node/store/src/migrate.rs +++ /dev/null @@ -1,153 +0,0 @@ -use crate::{ - hot_cold_store::HotColdDBError, DiskStore, Error, MemoryStore, SimpleDiskStore, Store, -}; -use parking_lot::Mutex; -use slog::{debug, warn}; -use std::mem; -use std::sync::mpsc; -use std::sync::Arc; -use std::thread; -use types::{BeaconState, EthSpec, Hash256, Slot}; - -/// Trait for migration processes that update the database upon finalization. -pub trait Migrate: Send + Sync + 'static { - fn new(db: Arc) -> Self; - - fn freeze_to_state( - &self, - _state_root: Hash256, - _state: BeaconState, - _max_finality_distance: u64, - ) { - } -} - -/// Migrator that does nothing, for stores that don't need migration. -pub struct NullMigrator; - -impl Migrate, E> for NullMigrator { - fn new(_: Arc>) -> Self { - NullMigrator - } -} - -impl Migrate, E> for NullMigrator { - fn new(_: Arc>) -> Self { - NullMigrator - } -} - -/// Migrator that immediately calls the store's migration function, blocking the current execution. -/// -/// Mostly useful for tests. -pub struct BlockingMigrator(Arc); - -impl> Migrate for BlockingMigrator { - fn new(db: Arc) -> Self { - BlockingMigrator(db) - } - - fn freeze_to_state( - &self, - state_root: Hash256, - state: BeaconState, - _max_finality_distance: u64, - ) { - if let Err(e) = S::freeze_to_state(self.0.clone(), state_root, &state) { - // This migrator is only used for testing, so we just log to stderr without a logger. - eprintln!("Migration error: {:?}", e); - } - } -} - -type MpscSender = mpsc::Sender<(Hash256, BeaconState)>; - -/// Migrator that runs a background thread to migrate state from the hot to the cold database. -pub struct BackgroundMigrator { - db: Arc>, - tx_thread: Mutex<(MpscSender, thread::JoinHandle<()>)>, -} - -impl Migrate, E> for BackgroundMigrator { - fn new(db: Arc>) -> Self { - let tx_thread = Mutex::new(Self::spawn_thread(db.clone())); - Self { db, tx_thread } - } - - /// Perform the freezing operation on the database, - fn freeze_to_state( - &self, - finalized_state_root: Hash256, - finalized_state: BeaconState, - max_finality_distance: u64, - ) { - if !self.needs_migration(finalized_state.slot, max_finality_distance) { - return; - } - - let (ref mut tx, ref mut thread) = *self.tx_thread.lock(); - - if let Err(tx_err) = tx.send((finalized_state_root, finalized_state)) { - let (new_tx, new_thread) = Self::spawn_thread(self.db.clone()); - - drop(mem::replace(tx, new_tx)); - let old_thread = mem::replace(thread, new_thread); - - // Join the old thread, which will probably have panicked, or may have - // halted normally just now as a result of us dropping the old `mpsc::Sender`. - if let Err(thread_err) = old_thread.join() { - warn!( - self.db.log, - "Migration thread died, so it was restarted"; - "reason" => format!("{:?}", thread_err) - ); - } - - // Retry at most once, we could recurse but that would risk overflowing the stack. - let _ = tx.send(tx_err.0); - } - } -} - -impl BackgroundMigrator { - /// Return true if a migration needs to be performed, given a new `finalized_slot`. - fn needs_migration(&self, finalized_slot: Slot, max_finality_distance: u64) -> bool { - let finality_distance = finalized_slot - self.db.get_split_slot(); - finality_distance > max_finality_distance - } - - /// Spawn a new child thread to run the migration process. - /// - /// Return a channel handle for sending new finalized states to the thread. - fn spawn_thread( - db: Arc>, - ) -> ( - mpsc::Sender<(Hash256, BeaconState)>, - thread::JoinHandle<()>, - ) { - let (tx, rx) = mpsc::channel(); - let thread = thread::spawn(move || { - while let Ok((state_root, state)) = rx.recv() { - match DiskStore::freeze_to_state(db.clone(), state_root, &state) { - Ok(()) => {} - Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { - debug!( - db.log, - "Database migration postponed, unaligned finalized block"; - "slot" => slot.as_u64() - ); - } - Err(e) => { - warn!( - db.log, - "Database migration failed"; - "error" => format!("{:?}", e) - ); - } - } - } - }); - - (tx, thread) - } -} diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 7d2d6d445..1f8e9145d 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -12,6 +12,7 @@ use serde_derive::{Deserialize, Serialize}; use ssz::ssz_encode; use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::Unsigned, BitVector, FixedVector}; +use std::fmt; use swap_or_not_shuffle::compute_shuffled_index; use test_random_derive::TestRandom; use tree_hash::TreeHash; @@ -80,6 +81,8 @@ pub enum Error { /// /// This represents a serious bug in either the spec or Lighthouse! ArithError(ArithError), + MissingBeaconBlock(SignedBeaconBlockHash), + MissingBeaconState(BeaconStateHash), } /// Control whether an epoch-indexed field can be indexed at the next epoch or not. @@ -98,6 +101,33 @@ impl AllowNextEpoch { } } +#[derive(PartialEq, Eq, Hash, Clone, Copy)] +pub struct BeaconStateHash(Hash256); + +impl fmt::Debug for BeaconStateHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "BeaconStateHash({:?})", self.0) + } +} + +impl fmt::Display for BeaconStateHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for BeaconStateHash { + fn from(hash: Hash256) -> BeaconStateHash { + BeaconStateHash(hash) + } +} + +impl From for Hash256 { + fn from(beacon_state_hash: BeaconStateHash) -> Hash256 { + beacon_state_hash.0 + } +} + /// The state of the `BeaconChain` at some slot. /// /// Spec v0.11.1 @@ -614,6 +644,14 @@ impl BeaconState { Ok(&self.block_roots[i]) } + pub fn get_block_state_roots( + &self, + slot: Slot, + ) -> Result<(SignedBeaconBlockHash, BeaconStateHash), Error> { + let i = self.get_latest_block_roots_index(slot)?; + Ok((self.block_roots[i].into(), self.state_roots[i].into())) + } + /// Sets the latest state root for slot. /// /// Spec v0.11.1 diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 3391f3132..91d5d6580 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -69,7 +69,7 @@ pub use crate::indexed_attestation::IndexedAttestation; pub use crate::pending_attestation::PendingAttestation; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; -pub use crate::signed_beacon_block::SignedBeaconBlock; +pub use crate::signed_beacon_block::{SignedBeaconBlock, SignedBeaconBlockHash}; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; pub use crate::signing_root::{SignedRoot, SigningRoot}; diff --git a/eth2/types/src/signed_beacon_block.rs b/eth2/types/src/signed_beacon_block.rs index 02f376fc2..81abc0dfd 100644 --- a/eth2/types/src/signed_beacon_block.rs +++ b/eth2/types/src/signed_beacon_block.rs @@ -1,11 +1,40 @@ use crate::{test_utils::TestRandom, BeaconBlock, EthSpec, Hash256, Slot}; use bls::Signature; +use std::fmt; + use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; +#[derive(PartialEq, Eq, Hash, Clone, Copy)] +pub struct SignedBeaconBlockHash(Hash256); + +impl fmt::Debug for SignedBeaconBlockHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "SignedBeaconBlockHash({:?})", self.0) + } +} + +impl fmt::Display for SignedBeaconBlockHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for SignedBeaconBlockHash { + fn from(hash: Hash256) -> SignedBeaconBlockHash { + SignedBeaconBlockHash(hash) + } +} + +impl From for Hash256 { + fn from(signed_beacon_block_hash: SignedBeaconBlockHash) -> Hash256 { + signed_beacon_block_hash.0 + } +} + /// A `BeaconBlock` and a signature from its proposer. /// /// Spec v0.11.1 diff --git a/eth2/types/src/slot_epoch.rs b/eth2/types/src/slot_epoch.rs index bf43e2b09..290dfdc65 100644 --- a/eth2/types/src/slot_epoch.rs +++ b/eth2/types/src/slot_epoch.rs @@ -21,11 +21,11 @@ use std::hash::{Hash, Hasher}; use std::iter::Iterator; use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; -#[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)] +#[derive(Eq, Clone, Copy, Default, Serialize, Deserialize)] #[serde(transparent)] pub struct Slot(u64); -#[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)] +#[derive(Eq, Clone, Copy, Default, Serialize, Deserialize)] pub struct Epoch(u64); impl_common!(Slot); diff --git a/eth2/types/src/slot_epoch_macros.rs b/eth2/types/src/slot_epoch_macros.rs index 0050fb5d6..15263f654 100644 --- a/eth2/types/src/slot_epoch_macros.rs +++ b/eth2/types/src/slot_epoch_macros.rs @@ -195,6 +195,16 @@ macro_rules! impl_display { }; } +macro_rules! impl_debug { + ($type: ident) => { + impl fmt::Debug for $type { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}({:?})", stringify!($type), self.0) + } + } + }; +} + macro_rules! impl_ssz { ($type: ident) => { impl Encode for $type { @@ -275,6 +285,7 @@ macro_rules! impl_common { impl_math_between!($type, u64); impl_math!($type); impl_display!($type); + impl_debug!($type); impl_ssz!($type); impl_hash!($type); };