Fork choice bug fixes (#449)
* Change reduced tree for adding weightless node * Add more comments for reduced tree fork choice * Small refactor on reduced tree for readability * Move test_harness forking logic into itself * Add new `AncestorIter` trait to store * Add unfinished tests to fork choice * Make `beacon_state.genesis_block_root` public * Add failing lmd_ghost fork choice tests * Extend fork_choice tests, create failing test * Implement Debug for generic ReducedTree * Add lazy_static to fork choice tests * Add verify_integrity fn to reduced tree * Fix bugs in reduced tree * Ensure all reduced tree tests verify integrity * Slightly alter reduce tree test params * Add (failing) reduced tree test * Fix bug in fork choice Iter ancestors was not working well with skip slots * Put maximum depth for common ancestor search Ensures that we don't search back past the finalized root. * Add basic finalization tests for reduced tree * Change fork choice to use beacon_block_root Previously it was using target_root, which was wrong * Make ancestor iter return option * Disable fork choice test when !debug_assertions * Fix type, removed code fragment * Tidy some borrow-checker evading * Lower reduced tree random test iterations
This commit is contained in:
parent
6de9e5bd6f
commit
7458022fcf
@ -77,7 +77,7 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
||||
/// to `per_slot_processing`.
|
||||
state: RwLock<BeaconState<T::EthSpec>>,
|
||||
/// The root of the genesis block.
|
||||
genesis_block_root: Hash256,
|
||||
pub genesis_block_root: Hash256,
|
||||
/// A state-machine that is updated with information from the network and chooses a canonical
|
||||
/// head block.
|
||||
pub fork_choice: ForkChoice<T>,
|
||||
|
@ -125,14 +125,14 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
attestation: &Attestation,
|
||||
) -> Result<()> {
|
||||
// Note: `get_attesting_indices_unsorted` requires that the beacon state caches be built.
|
||||
let validator_indices = get_attesting_indices_unsorted(
|
||||
state,
|
||||
&attestation.data,
|
||||
&attestation.aggregation_bitfield,
|
||||
)?;
|
||||
let block_slot = state.get_attestation_slot(&attestation.data)?;
|
||||
|
||||
let block_hash = attestation.data.target_root;
|
||||
let block_hash = attestation.data.beacon_block_root;
|
||||
|
||||
// Ignore any attestations to the zero hash.
|
||||
//
|
||||
@ -148,11 +148,6 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
|
||||
// fine because votes to the genesis block are not useful; all validators implicitly attest
|
||||
// to genesis just by being present in the chain.
|
||||
if block_hash != Hash256::zero() {
|
||||
let block_slot = attestation
|
||||
.data
|
||||
.target_epoch
|
||||
.start_slot(T::EthSpec::slots_per_epoch());
|
||||
|
||||
for validator_index in validator_indices {
|
||||
self.backend
|
||||
.process_attestation(validator_index, block_hash, block_slot)?;
|
||||
|
@ -64,6 +64,8 @@ where
|
||||
|
||||
/// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and
|
||||
/// attestations.
|
||||
///
|
||||
/// Used for testing.
|
||||
pub struct BeaconChainHarness<L, E>
|
||||
where
|
||||
L: LmdGhost<MemoryStore, E>,
|
||||
@ -337,6 +339,50 @@ where
|
||||
});
|
||||
}
|
||||
|
||||
/// Creates two forks:
|
||||
///
|
||||
/// - The "honest" fork: created by the `honest_validators` who have built `honest_fork_blocks`
|
||||
/// on the head
|
||||
/// - The "faulty" fork: created by the `faulty_validators` who skipped a slot and
|
||||
/// then built `faulty_fork_blocks`.
|
||||
///
|
||||
/// Returns `(honest_head, faulty_head)`, the roots of the blocks at the top of each chain.
|
||||
pub fn generate_two_forks_by_skipping_a_block(
|
||||
&self,
|
||||
honest_validators: &[usize],
|
||||
faulty_validators: &[usize],
|
||||
honest_fork_blocks: usize,
|
||||
faulty_fork_blocks: usize,
|
||||
) -> (Hash256, Hash256) {
|
||||
let initial_head_slot = self.chain.head().beacon_block.slot;
|
||||
|
||||
// Move to the next slot so we may produce some more blocks on the head.
|
||||
self.advance_slot();
|
||||
|
||||
// Extend the chain with blocks where only honest validators agree.
|
||||
let honest_head = self.extend_chain(
|
||||
honest_fork_blocks,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::SomeValidators(honest_validators.to_vec()),
|
||||
);
|
||||
|
||||
// Go back to the last block where all agreed, and build blocks upon it where only faulty nodes
|
||||
// agree.
|
||||
let faulty_head = self.extend_chain(
|
||||
faulty_fork_blocks,
|
||||
BlockStrategy::ForkCanonicalChainAt {
|
||||
previous_slot: Slot::from(initial_head_slot),
|
||||
// `initial_head_slot + 2` means one slot is skipped.
|
||||
first_slot: Slot::from(initial_head_slot + 2),
|
||||
},
|
||||
AttestationStrategy::SomeValidators(faulty_validators.to_vec()),
|
||||
);
|
||||
|
||||
assert!(honest_head != faulty_head, "forks should be distinct");
|
||||
|
||||
(honest_head, faulty_head)
|
||||
}
|
||||
|
||||
/// Returns the secret key for the given validator index.
|
||||
fn get_sk(&self, validator_index: usize) -> &SecretKey {
|
||||
&self.keypairs[validator_index].sk
|
||||
|
@ -25,7 +25,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<TestForkChoice, Min
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fork() {
|
||||
fn chooses_fork() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
|
||||
@ -45,25 +45,11 @@ fn fork() {
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
// Move to the next slot so we may produce some more blocks on the head.
|
||||
harness.advance_slot();
|
||||
|
||||
// Extend the chain with blocks where only honest validators agree.
|
||||
let honest_head = harness.extend_chain(
|
||||
let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block(
|
||||
&honest_validators,
|
||||
&faulty_validators,
|
||||
honest_fork_blocks,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::SomeValidators(honest_validators.clone()),
|
||||
);
|
||||
|
||||
// Go back to the last block where all agreed, and build blocks upon it where only faulty nodes
|
||||
// agree.
|
||||
let faulty_head = harness.extend_chain(
|
||||
faulty_fork_blocks,
|
||||
BlockStrategy::ForkCanonicalChainAt {
|
||||
previous_slot: Slot::from(initial_blocks),
|
||||
first_slot: Slot::from(initial_blocks + 2),
|
||||
},
|
||||
AttestationStrategy::SomeValidators(faulty_validators.clone()),
|
||||
);
|
||||
|
||||
assert!(honest_head != faulty_head, "forks should be distinct");
|
||||
|
@ -3,6 +3,22 @@ use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
use types::{BeaconBlock, BeaconState, BeaconStateError, EthSpec, Hash256, Slot};
|
||||
|
||||
/// Implemented for types that have ancestors (e.g., blocks, states) that may be iterated over.
|
||||
pub trait AncestorIter<U: Store, I: Iterator> {
|
||||
/// Returns an iterator over the roots of the ancestors of `self`.
|
||||
fn try_iter_ancestor_roots(&self, store: Arc<U>) -> Option<I>;
|
||||
}
|
||||
|
||||
impl<'a, U: Store, E: EthSpec> AncestorIter<U, BestBlockRootsIterator<'a, E, U>> for BeaconBlock {
|
||||
/// Iterates across all the prior block roots of `self`, starting at the most recent and ending
|
||||
/// at genesis.
|
||||
fn try_iter_ancestor_roots(&self, store: Arc<U>) -> Option<BestBlockRootsIterator<'a, E, U>> {
|
||||
let state = store.get::<BeaconState<E>>(&self.state_root).ok()??;
|
||||
|
||||
Some(BestBlockRootsIterator::owned(store, state, self.slot))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct StateRootsIterator<'a, T: EthSpec, U> {
|
||||
store: Arc<U>,
|
||||
|
@ -17,3 +17,5 @@ bls = { path = "../utils/bls" }
|
||||
slot_clock = { path = "../utils/slot_clock" }
|
||||
beacon_chain = { path = "../../beacon_node/beacon_chain" }
|
||||
env_logger = "0.6.0"
|
||||
lazy_static = "1.3.0"
|
||||
rand = "0.7"
|
||||
|
@ -6,9 +6,10 @@
|
||||
use super::{LmdGhost, Result as SuperResult};
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
use store::{iter::BestBlockRootsIterator, Error as StoreError, Store};
|
||||
use store::{iter::BlockRootsIterator, Error as StoreError, Store};
|
||||
use types::{BeaconBlock, BeaconState, EthSpec, Hash256, Slot};
|
||||
|
||||
type Result<T> = std::result::Result<T, Error>;
|
||||
@ -35,6 +36,23 @@ pub struct ThreadSafeReducedTree<T, E> {
|
||||
core: RwLock<ReducedTree<T, E>>,
|
||||
}
|
||||
|
||||
impl<T, E> fmt::Debug for ThreadSafeReducedTree<T, E> {
|
||||
/// `Debug` just defers to the implementation of `self.core`.
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.core.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> ThreadSafeReducedTree<T, E>
|
||||
where
|
||||
T: Store,
|
||||
E: EthSpec,
|
||||
{
|
||||
pub fn verify_integrity(&self) -> std::result::Result<(), String> {
|
||||
self.core.read().verify_integrity()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> LmdGhost<T, E> for ThreadSafeReducedTree<T, E>
|
||||
where
|
||||
T: Store,
|
||||
@ -100,6 +118,12 @@ struct ReducedTree<T, E> {
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<T, E> fmt::Debug for ReducedTree<T, E> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.nodes.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> ReducedTree<T, E>
|
||||
where
|
||||
T: Store,
|
||||
@ -126,6 +150,10 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the root node (the node without any parents) to the given `new_slot` and `new_root`.
|
||||
///
|
||||
/// The given `new_root` must be in the block tree (but not necessarily in the reduced tree).
|
||||
/// Any nodes which are not a descendant of `new_root` will be removed from the store.
|
||||
pub fn update_root(&mut self, new_slot: Slot, new_root: Hash256) -> Result<()> {
|
||||
if !self.nodes.contains_key(&new_root) {
|
||||
let node = Node {
|
||||
@ -276,55 +304,54 @@ where
|
||||
Ok(weight)
|
||||
}
|
||||
|
||||
/// Removes the vote from `validator_index` from the reduced tree.
|
||||
///
|
||||
/// If the validator had a vote in the tree, the removal of that vote may cause a node to
|
||||
/// become redundant and removed from the reduced tree.
|
||||
fn remove_latest_message(&mut self, validator_index: usize) -> Result<()> {
|
||||
if self.latest_votes.get(validator_index).is_some() {
|
||||
// Unwrap is safe as prior `if` statements ensures the result is `Some`.
|
||||
let vote = self.latest_votes.get(validator_index).unwrap();
|
||||
if let Some(vote) = self.latest_votes.get(validator_index).clone() {
|
||||
self.get_mut_node(vote.hash)?.remove_voter(validator_index);
|
||||
let node = self.get_node(vote.hash)?.clone();
|
||||
|
||||
let should_delete = {
|
||||
self.get_mut_node(vote.hash)?.remove_voter(validator_index);
|
||||
let node = self.get_node(vote.hash)?.clone();
|
||||
if let Some(parent_hash) = node.parent_hash {
|
||||
if node.has_votes() || node.children.len() > 1 {
|
||||
// A node with votes or more than one child is never removed.
|
||||
} else if node.children.len() == 1 {
|
||||
// A node which has only one child may be removed.
|
||||
//
|
||||
// Load the child of the node and set it's parent to be the parent of this
|
||||
// node (viz., graft the node's child to the node's parent)
|
||||
let child = self.get_mut_node(node.children[0])?;
|
||||
child.parent_hash = node.parent_hash;
|
||||
|
||||
if let Some(parent_hash) = node.parent_hash {
|
||||
if node.has_votes() || node.children.len() > 1 {
|
||||
// A node with votes or more than one child is never removed.
|
||||
false
|
||||
} else if node.children.len() == 1 {
|
||||
// A node which has only one child may be removed.
|
||||
//
|
||||
// Load the child of the node and set it's parent to be the parent of this
|
||||
// node (viz., graft the node's child to the node's parent)
|
||||
let child = self.get_mut_node(node.children[0])?;
|
||||
child.parent_hash = node.parent_hash;
|
||||
|
||||
// Graft the parent of this node to it's child.
|
||||
if let Some(parent_hash) = node.parent_hash {
|
||||
let parent = self.get_mut_node(parent_hash)?;
|
||||
parent.replace_child(node.block_hash, node.children[0])?;
|
||||
}
|
||||
|
||||
true
|
||||
} else if node.children.is_empty() {
|
||||
// A node which has no children may be deleted and potentially it's parent
|
||||
// too.
|
||||
self.maybe_delete_node(parent_hash)?;
|
||||
|
||||
true
|
||||
} else {
|
||||
// It is impossible for a node to have a number of children that is not 0, 1 or
|
||||
// greater than one.
|
||||
//
|
||||
// This code is strictly unnecessary, however we keep it for readability.
|
||||
unreachable!();
|
||||
// Graft the parent of this node to it's child.
|
||||
if let Some(parent_hash) = node.parent_hash {
|
||||
let parent = self.get_mut_node(parent_hash)?;
|
||||
parent.replace_child(node.block_hash, node.children[0])?;
|
||||
}
|
||||
} else {
|
||||
// A node without a parent is the genesis/finalized node and should never be removed.
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if should_delete {
|
||||
self.nodes.remove(&vote.hash);
|
||||
self.nodes.remove(&vote.hash);
|
||||
} else if node.children.is_empty() {
|
||||
// Remove the to-be-deleted node from it's parent.
|
||||
if let Some(parent_hash) = node.parent_hash {
|
||||
self.get_mut_node(parent_hash)?
|
||||
.remove_child(node.block_hash)?;
|
||||
}
|
||||
|
||||
self.nodes.remove(&vote.hash);
|
||||
|
||||
// A node which has no children may be deleted and potentially it's parent
|
||||
// too.
|
||||
self.maybe_delete_node(parent_hash)?;
|
||||
} else {
|
||||
// It is impossible for a node to have a number of children that is not 0, 1 or
|
||||
// greater than one.
|
||||
//
|
||||
// This code is strictly unnecessary, however we keep it for readability.
|
||||
unreachable!();
|
||||
}
|
||||
} else {
|
||||
// A node without a parent is the genesis/finalized node and should never be removed.
|
||||
}
|
||||
|
||||
self.latest_votes.insert(validator_index, Some(vote));
|
||||
@ -333,23 +360,27 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Deletes a node if it is unnecessary.
|
||||
///
|
||||
/// Any node is unnecessary if all of the following are true:
|
||||
///
|
||||
/// - it is not the root node.
|
||||
/// - it only has one child.
|
||||
/// - it does not have any votes.
|
||||
fn maybe_delete_node(&mut self, hash: Hash256) -> Result<()> {
|
||||
let should_delete = {
|
||||
let node = self.get_node(hash)?.clone();
|
||||
|
||||
if let Some(parent_hash) = node.parent_hash {
|
||||
if (node.children.len() == 1) && !node.has_votes() {
|
||||
// Graft the child to it's grandparent.
|
||||
let child_hash = {
|
||||
let child_node = self.get_mut_node(node.children[0])?;
|
||||
child_node.parent_hash = node.parent_hash;
|
||||
let child_hash = node.children[0];
|
||||
|
||||
child_node.block_hash
|
||||
};
|
||||
// Graft the single descendant `node` to the `parent` of node.
|
||||
self.get_mut_node(child_hash)?.parent_hash = Some(parent_hash);
|
||||
|
||||
// Graft the grandparent to it's grandchild.
|
||||
let parent_node = self.get_mut_node(parent_hash)?;
|
||||
parent_node.replace_child(node.block_hash, child_hash)?;
|
||||
// Detach `node` from `parent`, replacing it with `child`.
|
||||
self.get_mut_node(parent_hash)?
|
||||
.replace_child(hash, child_hash)?;
|
||||
|
||||
true
|
||||
} else {
|
||||
@ -385,7 +416,7 @@ where
|
||||
}
|
||||
|
||||
fn add_weightless_node(&mut self, slot: Slot, hash: Hash256) -> Result<()> {
|
||||
if slot >= self.root_slot() && !self.nodes.contains_key(&hash) {
|
||||
if slot > self.root_slot() && !self.nodes.contains_key(&hash) {
|
||||
let node = Node {
|
||||
block_hash: hash,
|
||||
..Node::default()
|
||||
@ -393,6 +424,8 @@ where
|
||||
|
||||
self.add_node(node)?;
|
||||
|
||||
// Read the `parent_hash` from the newly created node. If it has a parent (i.e., it's
|
||||
// not the root), see if it is superfluous.
|
||||
if let Some(parent_hash) = self.get_node(hash)?.parent_hash {
|
||||
self.maybe_delete_node(parent_hash)?;
|
||||
}
|
||||
@ -401,75 +434,108 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Add `node` to the reduced tree, returning an error if `node` is not rooted in the tree.
|
||||
fn add_node(&mut self, mut node: Node) -> Result<()> {
|
||||
// Find the highest (by slot) ancestor of the given hash/block that is in the reduced tree.
|
||||
let mut prev_in_tree = {
|
||||
let hash = self
|
||||
.find_prev_in_tree(node.block_hash)
|
||||
.ok_or_else(|| Error::NotInTree(node.block_hash))?;
|
||||
self.get_mut_node(hash)?.clone()
|
||||
};
|
||||
|
||||
let mut added = false;
|
||||
// Find the highest (by slot) ancestor of the given node in the reduced tree.
|
||||
//
|
||||
// If this node has no ancestor in the tree, exit early.
|
||||
let mut prev_in_tree = self
|
||||
.find_prev_in_tree(node.block_hash)
|
||||
.ok_or_else(|| Error::NotInTree(node.block_hash))
|
||||
.and_then(|hash| self.get_node(hash))?
|
||||
.clone();
|
||||
|
||||
// If the ancestor of `node` has children, there are three possible operations:
|
||||
//
|
||||
// 1. Graft the `node` between two existing nodes.
|
||||
// 2. Create another node that will be grafted between two existing nodes, then graft
|
||||
// `node` to it.
|
||||
// 3. Graft `node` to an existing node.
|
||||
if !prev_in_tree.children.is_empty() {
|
||||
for &child_hash in &prev_in_tree.children {
|
||||
// 1. Graft the new node between two existing nodes.
|
||||
//
|
||||
// If `node` is a descendant of `prev_in_tree` but an ancestor of a child connected to
|
||||
// `prev_in_tree`.
|
||||
//
|
||||
// This means that `node` can be grafted between `prev_in_tree` and the child that is a
|
||||
// descendant of both `node` and `prev_in_tree`.
|
||||
if self
|
||||
.iter_ancestors(child_hash)?
|
||||
.any(|(ancestor, _slot)| ancestor == node.block_hash)
|
||||
{
|
||||
let child = self.get_mut_node(child_hash)?;
|
||||
|
||||
// Graft `child` to `node`.
|
||||
child.parent_hash = Some(node.block_hash);
|
||||
// Graft `node` to `child`.
|
||||
node.children.push(child_hash);
|
||||
// Detach `child` from `prev_in_tree`, replacing it with `node`.
|
||||
prev_in_tree.replace_child(child_hash, node.block_hash)?;
|
||||
// Graft `node` to `prev_in_tree`.
|
||||
node.parent_hash = Some(prev_in_tree.block_hash);
|
||||
|
||||
added = true;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if !added {
|
||||
// 2. Create another node that will be grafted between two existing nodes, then graft
|
||||
// `node` to it.
|
||||
//
|
||||
// Note: given that `prev_in_tree` has children and that `node` is not an ancestor of
|
||||
// any of the children of `prev_in_tree`, we know that `node` is on a different fork to
|
||||
// all of the children of `prev_in_tree`.
|
||||
if node.parent_hash.is_none() {
|
||||
for &child_hash in &prev_in_tree.children {
|
||||
// Find the highest (by slot) common ancestor between `node` and `child`.
|
||||
//
|
||||
// The common ancestor is the last block before `node` and `child` forked.
|
||||
let ancestor_hash =
|
||||
self.find_least_common_ancestor(node.block_hash, child_hash)?;
|
||||
self.find_highest_common_ancestor(node.block_hash, child_hash)?;
|
||||
|
||||
// If the block before `node` and `child` forked is _not_ `prev_in_tree` we
|
||||
// must add this new block into the tree (because it is a decision node
|
||||
// between two forks).
|
||||
if ancestor_hash != prev_in_tree.block_hash {
|
||||
let child = self.get_mut_node(child_hash)?;
|
||||
|
||||
// Create a new `common_ancestor` node which represents the `ancestor_hash`
|
||||
// block, has `prev_in_tree` as the parent and has both `node` and `child`
|
||||
// as children.
|
||||
let common_ancestor = Node {
|
||||
block_hash: ancestor_hash,
|
||||
parent_hash: Some(prev_in_tree.block_hash),
|
||||
children: vec![node.block_hash, child_hash],
|
||||
..Node::default()
|
||||
};
|
||||
|
||||
// Graft `child` and `node` to `common_ancestor`.
|
||||
child.parent_hash = Some(common_ancestor.block_hash);
|
||||
node.parent_hash = Some(common_ancestor.block_hash);
|
||||
|
||||
prev_in_tree.replace_child(child_hash, ancestor_hash)?;
|
||||
// Detach `child` from `prev_in_tree`, replacing it with `common_ancestor`.
|
||||
prev_in_tree.replace_child(child_hash, common_ancestor.block_hash)?;
|
||||
|
||||
// Store the new `common_ancestor` node.
|
||||
self.nodes
|
||||
.insert(common_ancestor.block_hash, common_ancestor);
|
||||
|
||||
added = true;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !added {
|
||||
if node.parent_hash.is_none() {
|
||||
// 3. Graft `node` to an existing node.
|
||||
//
|
||||
// Graft `node` to `prev_in_tree` and `prev_in_tree` to `node`
|
||||
node.parent_hash = Some(prev_in_tree.block_hash);
|
||||
prev_in_tree.children.push(node.block_hash);
|
||||
}
|
||||
|
||||
// Update `prev_in_tree`. A mutable reference was not maintained to satisfy the borrow
|
||||
// checker.
|
||||
//
|
||||
// This is not an ideal solution and results in unnecessary memory copies -- a better
|
||||
// solution is certainly possible.
|
||||
// checker. Perhaps there's a better way?
|
||||
self.nodes.insert(prev_in_tree.block_hash, prev_in_tree);
|
||||
self.nodes.insert(node.block_hash, node);
|
||||
|
||||
@ -485,62 +551,112 @@ where
|
||||
.and_then(|(root, _slot)| Some(root))
|
||||
}
|
||||
|
||||
/// For the given `child` block hash, return the block's ancestor at the given `target` slot.
|
||||
fn find_ancestor_at_slot(&self, child: Hash256, target: Slot) -> Result<Hash256> {
|
||||
let (root, slot) = self
|
||||
.iter_ancestors(child)?
|
||||
.find(|(_block, slot)| *slot <= target)
|
||||
.ok_or_else(|| Error::NotInTree(child))?;
|
||||
|
||||
// Explicitly check that the slot is the target in the case that the given child has a slot
|
||||
// above target.
|
||||
if slot == target {
|
||||
Ok(root)
|
||||
} else {
|
||||
Err(Error::NotInTree(child))
|
||||
}
|
||||
}
|
||||
|
||||
/// For the two given block roots (`a_root` and `b_root`), find the first block they share in
|
||||
/// the tree. Viz, find the block that these two distinct blocks forked from.
|
||||
fn find_least_common_ancestor(&self, a_root: Hash256, b_root: Hash256) -> Result<Hash256> {
|
||||
// If the blocks behind `a_root` and `b_root` are not at the same slot, take the highest
|
||||
// block (by slot) down to be equal with the lower slot.
|
||||
//
|
||||
// The result is two roots which identify two blocks at the same height.
|
||||
let (a_root, b_root) = {
|
||||
let a = self.get_block(a_root)?;
|
||||
let b = self.get_block(b_root)?;
|
||||
fn find_highest_common_ancestor(&self, a_root: Hash256, b_root: Hash256) -> Result<Hash256> {
|
||||
let mut a_iter = self.iter_ancestors(a_root)?;
|
||||
let mut b_iter = self.iter_ancestors(b_root)?;
|
||||
|
||||
if a.slot > b.slot {
|
||||
(self.find_ancestor_at_slot(a_root, b.slot)?, b_root)
|
||||
} else if b.slot > a.slot {
|
||||
(a_root, self.find_ancestor_at_slot(b_root, a.slot)?)
|
||||
} else {
|
||||
(a_root, b_root)
|
||||
// Combines the `next()` fns on the `a_iter` and `b_iter` and returns the roots of two
|
||||
// blocks at the same slot, or `None` if we have gone past genesis or the root of this tree.
|
||||
let mut iter_blocks_at_same_height = || -> Option<(Hash256, Hash256)> {
|
||||
match (a_iter.next(), b_iter.next()) {
|
||||
(Some((mut a_root, a_slot)), Some((mut b_root, b_slot))) => {
|
||||
// If either of the slots are lower than the root of this tree, exit early.
|
||||
if a_slot < self.root.1 || b_slot < self.root.1 {
|
||||
None
|
||||
} else {
|
||||
if a_slot < b_slot {
|
||||
for _ in a_slot.as_u64()..b_slot.as_u64() {
|
||||
b_root = b_iter.next()?.0;
|
||||
}
|
||||
} else if a_slot > b_slot {
|
||||
for _ in b_slot.as_u64()..a_slot.as_u64() {
|
||||
a_root = a_iter.next()?.0;
|
||||
}
|
||||
}
|
||||
|
||||
Some((a_root, b_root))
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
};
|
||||
|
||||
let ((a_root, _a_slot), (_b_root, _b_slot)) = self
|
||||
.iter_ancestors(a_root)?
|
||||
.zip(self.iter_ancestors(b_root)?)
|
||||
.find(|((a_root, _), (b_root, _))| a_root == b_root)
|
||||
.ok_or_else(|| Error::NoCommonAncestor((a_root, b_root)))?;
|
||||
|
||||
Ok(a_root)
|
||||
loop {
|
||||
match iter_blocks_at_same_height() {
|
||||
Some((a_root, b_root)) if a_root == b_root => break Ok(a_root),
|
||||
Some(_) => (),
|
||||
None => break Err(Error::NoCommonAncestor((a_root, b_root))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn iter_ancestors(&self, child: Hash256) -> Result<BestBlockRootsIterator<E, T>> {
|
||||
fn iter_ancestors(&self, child: Hash256) -> Result<BlockRootsIterator<E, T>> {
|
||||
let block = self.get_block(child)?;
|
||||
let state = self.get_state(block.state_root)?;
|
||||
|
||||
Ok(BestBlockRootsIterator::owned(
|
||||
Ok(BlockRootsIterator::owned(
|
||||
self.store.clone(),
|
||||
state,
|
||||
block.slot - 1,
|
||||
))
|
||||
}
|
||||
|
||||
/// Verify the integrity of `self`. Returns `Ok(())` if the tree has integrity, otherwise returns `Err(description)`.
|
||||
///
|
||||
/// Tries to detect the following erroneous conditions:
|
||||
///
|
||||
/// - Dangling references inside the tree.
|
||||
/// - Any scenario where there's not exactly one root node.
|
||||
///
|
||||
/// ## Notes
|
||||
///
|
||||
/// Computationally intensive, likely only useful during testing.
|
||||
pub fn verify_integrity(&self) -> std::result::Result<(), String> {
|
||||
let num_root_nodes = self
|
||||
.nodes
|
||||
.iter()
|
||||
.filter(|(_key, node)| node.parent_hash.is_none())
|
||||
.count();
|
||||
|
||||
if num_root_nodes != 1 {
|
||||
return Err(format!(
|
||||
"Tree has {} roots, should have exactly one.",
|
||||
num_root_nodes
|
||||
));
|
||||
}
|
||||
|
||||
let verify_node_exists = |key: Hash256, msg: String| -> std::result::Result<(), String> {
|
||||
if self.nodes.contains_key(&key) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(msg)
|
||||
}
|
||||
};
|
||||
|
||||
// Iterate through all the nodes and ensure all references they store are valid.
|
||||
self.nodes
|
||||
.iter()
|
||||
.map(|(_key, node)| {
|
||||
if let Some(parent_hash) = node.parent_hash {
|
||||
verify_node_exists(parent_hash, "parent must exist".to_string())?;
|
||||
}
|
||||
|
||||
node.children
|
||||
.iter()
|
||||
.map(|child| verify_node_exists(*child, "child_must_exist".to_string()))
|
||||
.collect::<std::result::Result<(), String>>()?;
|
||||
|
||||
verify_node_exists(node.block_hash, "block hash must exist".to_string())?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.collect::<std::result::Result<(), String>>()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_node(&self, hash: Hash256) -> Result<&Node> {
|
||||
self.nodes
|
||||
.get(&hash)
|
||||
@ -595,6 +711,18 @@ impl Node {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn remove_child(&mut self, child: Hash256) -> Result<()> {
|
||||
let i = self
|
||||
.children
|
||||
.iter()
|
||||
.position(|&c| c == child)
|
||||
.ok_or_else(|| Error::MissingChild(child))?;
|
||||
|
||||
self.children.remove(i);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn remove_voter(&mut self, voter: usize) -> Option<usize> {
|
||||
let i = self.voters.iter().position(|&v| v == voter)?;
|
||||
Some(self.voters.remove(i))
|
||||
|
359
eth2/lmd_ghost/tests/test.rs
Normal file
359
eth2/lmd_ghost/tests/test.rs
Normal file
@ -0,0 +1,359 @@
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
use beacon_chain::test_utils::{
|
||||
AttestationStrategy, BeaconChainHarness as BaseBeaconChainHarness, BlockStrategy,
|
||||
};
|
||||
use lmd_ghost::{LmdGhost, ThreadSafeReducedTree as BaseThreadSafeReducedTree};
|
||||
use rand::{prelude::*, rngs::StdRng};
|
||||
use std::sync::Arc;
|
||||
use store::{
|
||||
iter::{AncestorIter, BestBlockRootsIterator},
|
||||
MemoryStore, Store,
|
||||
};
|
||||
use types::{BeaconBlock, EthSpec, Hash256, MinimalEthSpec, Slot};
|
||||
|
||||
// Should ideally be divisible by 3.
|
||||
pub const VALIDATOR_COUNT: usize = 3 * 8;
|
||||
|
||||
type TestEthSpec = MinimalEthSpec;
|
||||
type ThreadSafeReducedTree = BaseThreadSafeReducedTree<MemoryStore, TestEthSpec>;
|
||||
type BeaconChainHarness = BaseBeaconChainHarness<ThreadSafeReducedTree, TestEthSpec>;
|
||||
type RootAndSlot = (Hash256, Slot);
|
||||
|
||||
lazy_static! {
|
||||
/// A lazy-static instance of a `BeaconChainHarness` that contains two forks.
|
||||
///
|
||||
/// Reduces test setup time by providing a common harness.
|
||||
static ref FORKED_HARNESS: ForkedHarness = ForkedHarness::new();
|
||||
}
|
||||
|
||||
/// Contains a `BeaconChainHarness` that has two forks, caused by a validator skipping a slot and
|
||||
/// then some validators building on one head and some on the other.
|
||||
///
|
||||
/// Care should be taken to ensure that the `ForkedHarness` does not expose any interior mutability
|
||||
/// from it's fields. This would cause cross-contamination between tests when used with
|
||||
/// `lazy_static`.
|
||||
struct ForkedHarness {
|
||||
/// Private (not `pub`) because the `BeaconChainHarness` has interior mutability. We
|
||||
/// don't expose it to avoid contamination between tests.
|
||||
harness: BeaconChainHarness,
|
||||
pub genesis_block_root: Hash256,
|
||||
pub genesis_block: BeaconBlock,
|
||||
pub honest_head: RootAndSlot,
|
||||
pub faulty_head: RootAndSlot,
|
||||
pub honest_roots: Vec<RootAndSlot>,
|
||||
pub faulty_roots: Vec<RootAndSlot>,
|
||||
}
|
||||
|
||||
impl ForkedHarness {
|
||||
/// A new standard instance of with constant parameters.
|
||||
pub fn new() -> Self {
|
||||
// let (harness, honest_roots, faulty_roots) = get_harness_containing_two_forks();
|
||||
let harness = BeaconChainHarness::new(VALIDATOR_COUNT);
|
||||
|
||||
// Move past the zero slot.
|
||||
harness.advance_slot();
|
||||
|
||||
let delay = TestEthSpec::default_spec().min_attestation_inclusion_delay as usize;
|
||||
|
||||
let initial_blocks = delay + 5;
|
||||
|
||||
// Build an initial chain where all validators agree.
|
||||
harness.extend_chain(
|
||||
initial_blocks,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
|
||||
let honest_validators: Vec<usize> = (0..two_thirds).collect();
|
||||
let faulty_validators: Vec<usize> = (two_thirds..VALIDATOR_COUNT).collect();
|
||||
let honest_fork_blocks = delay + 5;
|
||||
let faulty_fork_blocks = delay + 5;
|
||||
|
||||
let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block(
|
||||
&honest_validators,
|
||||
&faulty_validators,
|
||||
honest_fork_blocks,
|
||||
faulty_fork_blocks,
|
||||
);
|
||||
|
||||
let mut honest_roots =
|
||||
get_ancestor_roots::<TestEthSpec, _>(harness.chain.store.clone(), honest_head);
|
||||
|
||||
honest_roots.insert(
|
||||
0,
|
||||
(honest_head, get_slot_for_block_root(&harness, honest_head)),
|
||||
);
|
||||
|
||||
let mut faulty_roots =
|
||||
get_ancestor_roots::<TestEthSpec, _>(harness.chain.store.clone(), faulty_head);
|
||||
|
||||
faulty_roots.insert(
|
||||
0,
|
||||
(faulty_head, get_slot_for_block_root(&harness, faulty_head)),
|
||||
);
|
||||
|
||||
let genesis_block_root = harness.chain.genesis_block_root;
|
||||
let genesis_block = harness
|
||||
.chain
|
||||
.store
|
||||
.get::<BeaconBlock>(&genesis_block_root)
|
||||
.expect("Genesis block should exist")
|
||||
.expect("DB should not error");
|
||||
|
||||
Self {
|
||||
harness,
|
||||
genesis_block_root,
|
||||
genesis_block,
|
||||
honest_head: *honest_roots.last().expect("Chain cannot be empty"),
|
||||
faulty_head: *faulty_roots.last().expect("Chain cannot be empty"),
|
||||
honest_roots,
|
||||
faulty_roots,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn store_clone(&self) -> MemoryStore {
|
||||
(*self.harness.chain.store).clone()
|
||||
}
|
||||
|
||||
/// Return a brand-new, empty fork choice with a reference to `harness.store`.
|
||||
pub fn new_fork_choice(&self) -> ThreadSafeReducedTree {
|
||||
// Take a full clone of the store built by the harness.
|
||||
//
|
||||
// Taking a clone here ensures that each fork choice gets it's own store so there is no
|
||||
// cross-contamination between tests.
|
||||
let store: MemoryStore = self.store_clone();
|
||||
|
||||
ThreadSafeReducedTree::new(
|
||||
Arc::new(store),
|
||||
&self.genesis_block,
|
||||
self.genesis_block_root,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn all_block_roots(&self) -> Vec<RootAndSlot> {
|
||||
let mut all_roots = self.honest_roots.clone();
|
||||
all_roots.append(&mut self.faulty_roots.clone());
|
||||
|
||||
all_roots.dedup();
|
||||
|
||||
all_roots
|
||||
}
|
||||
|
||||
pub fn weight_function(_validator_index: usize) -> Option<u64> {
|
||||
Some(1)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper: returns all the ancestor roots and slots for a given block_root.
|
||||
fn get_ancestor_roots<E: EthSpec, U: Store>(
|
||||
store: Arc<U>,
|
||||
block_root: Hash256,
|
||||
) -> Vec<(Hash256, Slot)> {
|
||||
let block = store
|
||||
.get::<BeaconBlock>(&block_root)
|
||||
.expect("block should exist")
|
||||
.expect("store should not error");
|
||||
|
||||
<BeaconBlock as AncestorIter<_, BestBlockRootsIterator<E, _>>>::try_iter_ancestor_roots(
|
||||
&block, store,
|
||||
)
|
||||
.expect("should be able to create ancestor iter")
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Helper: returns the slot for some block_root.
|
||||
fn get_slot_for_block_root(harness: &BeaconChainHarness, block_root: Hash256) -> Slot {
|
||||
harness
|
||||
.chain
|
||||
.store
|
||||
.get::<BeaconBlock>(&block_root)
|
||||
.expect("head block should exist")
|
||||
.expect("DB should not error")
|
||||
.slot
|
||||
}
|
||||
|
||||
const RANDOM_ITERATIONS: usize = 50;
|
||||
const RANDOM_ACTIONS_PER_ITERATION: usize = 100;
|
||||
|
||||
/// Create a single LMD instance and have one validator vote in reverse (highest to lowest slot)
|
||||
/// down the chain.
|
||||
#[test]
|
||||
fn random_scenario() {
|
||||
let harness = &FORKED_HARNESS;
|
||||
let block_roots = harness.all_block_roots();
|
||||
let validators: Vec<usize> = (0..VALIDATOR_COUNT).collect();
|
||||
let mut rng = StdRng::seed_from_u64(9375205782030385); // Keyboard mash.
|
||||
|
||||
for _ in 0..RANDOM_ITERATIONS {
|
||||
let lmd = harness.new_fork_choice();
|
||||
|
||||
for _ in 0..RANDOM_ACTIONS_PER_ITERATION {
|
||||
let (root, slot) = block_roots[rng.next_u64() as usize % block_roots.len()];
|
||||
let validator_index = validators[rng.next_u64() as usize % validators.len()];
|
||||
|
||||
lmd.process_attestation(validator_index, root, slot)
|
||||
.expect("fork choice should accept randomly-placed attestations");
|
||||
|
||||
assert_eq!(
|
||||
lmd.verify_integrity(),
|
||||
Ok(()),
|
||||
"New tree should have integrity"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a single LMD instance and have one validator vote in reverse (highest to lowest slot)
|
||||
/// down the chain.
|
||||
#[test]
|
||||
fn single_voter_persistent_instance_reverse_order() {
|
||||
let harness = &FORKED_HARNESS;
|
||||
|
||||
let lmd = harness.new_fork_choice();
|
||||
|
||||
assert_eq!(
|
||||
lmd.verify_integrity(),
|
||||
Ok(()),
|
||||
"New tree should have integrity"
|
||||
);
|
||||
|
||||
for (root, slot) in harness.honest_roots.iter().rev() {
|
||||
lmd.process_attestation(0, *root, *slot)
|
||||
.expect("fork choice should accept attestations to honest roots in reverse");
|
||||
|
||||
assert_eq!(
|
||||
lmd.verify_integrity(),
|
||||
Ok(()),
|
||||
"Tree integrity should be maintained whilst processing attestations"
|
||||
);
|
||||
}
|
||||
|
||||
// The honest head should be selected.
|
||||
let (head_root, head_slot) = harness.honest_roots.first().unwrap();
|
||||
let (finalized_root, _) = harness.honest_roots.last().unwrap();
|
||||
|
||||
assert_eq!(
|
||||
lmd.find_head(*head_slot, *finalized_root, ForkedHarness::weight_function),
|
||||
Ok(*head_root),
|
||||
"Honest head should be selected"
|
||||
);
|
||||
}
|
||||
|
||||
/// A single validator applies a single vote to each block in the honest fork, using a new tree
|
||||
/// each time.
|
||||
#[test]
|
||||
fn single_voter_many_instance_honest_blocks_voting_forwards() {
|
||||
let harness = &FORKED_HARNESS;
|
||||
|
||||
for (root, slot) in &harness.honest_roots {
|
||||
let lmd = harness.new_fork_choice();
|
||||
lmd.process_attestation(0, *root, *slot)
|
||||
.expect("fork choice should accept attestations to honest roots");
|
||||
|
||||
assert_eq!(
|
||||
lmd.verify_integrity(),
|
||||
Ok(()),
|
||||
"Tree integrity should be maintained whilst processing attestations"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Same as above, but in reverse order (votes on the highest honest block first).
|
||||
#[test]
|
||||
fn single_voter_many_instance_honest_blocks_voting_in_reverse() {
|
||||
let harness = &FORKED_HARNESS;
|
||||
|
||||
// Same as above, but in reverse order (votes on the highest honest block first).
|
||||
for (root, slot) in harness.honest_roots.iter().rev() {
|
||||
let lmd = harness.new_fork_choice();
|
||||
lmd.process_attestation(0, *root, *slot)
|
||||
.expect("fork choice should accept attestations to honest roots in reverse");
|
||||
|
||||
assert_eq!(
|
||||
lmd.verify_integrity(),
|
||||
Ok(()),
|
||||
"Tree integrity should be maintained whilst processing attestations"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// A single validator applies a single vote to each block in the faulty fork, using a new tree
|
||||
/// each time.
|
||||
#[test]
|
||||
fn single_voter_many_instance_faulty_blocks_voting_forwards() {
|
||||
let harness = &FORKED_HARNESS;
|
||||
|
||||
for (root, slot) in &harness.faulty_roots {
|
||||
let lmd = harness.new_fork_choice();
|
||||
lmd.process_attestation(0, *root, *slot)
|
||||
.expect("fork choice should accept attestations to faulty roots");
|
||||
|
||||
assert_eq!(
|
||||
lmd.verify_integrity(),
|
||||
Ok(()),
|
||||
"Tree integrity should be maintained whilst processing attestations"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Same as above, but in reverse order (votes on the highest faulty block first).
|
||||
#[test]
|
||||
fn single_voter_many_instance_faulty_blocks_voting_in_reverse() {
|
||||
let harness = &FORKED_HARNESS;
|
||||
|
||||
for (root, slot) in harness.faulty_roots.iter().rev() {
|
||||
let lmd = harness.new_fork_choice();
|
||||
lmd.process_attestation(0, *root, *slot)
|
||||
.expect("fork choice should accept attestations to faulty roots in reverse");
|
||||
|
||||
assert_eq!(
|
||||
lmd.verify_integrity(),
|
||||
Ok(()),
|
||||
"Tree integrity should be maintained whilst processing attestations"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensures that the finalized root can be set to all values in `roots`.
|
||||
fn test_update_finalized_root(roots: &[(Hash256, Slot)]) {
|
||||
let harness = &FORKED_HARNESS;
|
||||
|
||||
let lmd = harness.new_fork_choice();
|
||||
|
||||
for (root, _slot) in roots.iter().rev() {
|
||||
let block = harness
|
||||
.store_clone()
|
||||
.get::<BeaconBlock>(root)
|
||||
.expect("block should exist")
|
||||
.expect("db should not error");
|
||||
lmd.update_finalized_root(&block, *root)
|
||||
.expect("finalized root should update for faulty fork");
|
||||
|
||||
assert_eq!(
|
||||
lmd.verify_integrity(),
|
||||
Ok(()),
|
||||
"Tree integrity should be maintained after updating the finalized root"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterates from low-to-high slot through the faulty roots, updating the finalized root.
|
||||
#[test]
|
||||
fn update_finalized_root_faulty() {
|
||||
let harness = &FORKED_HARNESS;
|
||||
|
||||
test_update_finalized_root(&harness.faulty_roots)
|
||||
}
|
||||
|
||||
/// Iterates from low-to-high slot through the honest roots, updating the finalized root.
|
||||
#[test]
|
||||
fn update_finalized_root_honest() {
|
||||
let harness = &FORKED_HARNESS;
|
||||
|
||||
test_update_finalized_root(&harness.honest_roots)
|
||||
}
|
Loading…
Reference in New Issue
Block a user