Add progress on reduced tree fork choice
This commit is contained in:
parent
4b4c9a98df
commit
4a3d54761a
@ -1,6 +1,7 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"eth2/fork_choice",
|
||||
"eth2/fork_choice_2",
|
||||
"eth2/operation_pool",
|
||||
"eth2/state_processing",
|
||||
"eth2/types",
|
||||
|
135
beacon_node/store/src/iter.rs
Normal file
135
beacon_node/store/src/iter.rs
Normal file
@ -0,0 +1,135 @@
|
||||
use crate::Store;
|
||||
use std::sync::Arc;
|
||||
use types::{BeaconBlock, BeaconState, BeaconStateError, EthSpec, Hash256, Slot};
|
||||
|
||||
/// Extends `BlockRootsIterator`, returning `BeaconBlock` instances, instead of their roots.
|
||||
pub struct BlockIterator<T: EthSpec, U> {
|
||||
roots: BlockRootsIterator<T, U>,
|
||||
}
|
||||
|
||||
impl<T: EthSpec, U: Store> BlockIterator<T, U> {
|
||||
/// Create a new iterator over all blocks in the given `beacon_state` and prior states.
|
||||
pub fn new(store: Arc<U>, beacon_state: BeaconState<T>, start_slot: Slot) -> Self {
|
||||
Self {
|
||||
roots: BlockRootsIterator::new(store, beacon_state, start_slot),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec, U: Store> Iterator for BlockIterator<T, U> {
|
||||
type Item = BeaconBlock;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let (root, _slot) = self.roots.next()?;
|
||||
self.roots.store.get(&root).ok()?
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterates backwards through block roots.
|
||||
///
|
||||
/// Uses the `latest_block_roots` field of `BeaconState` to as the source of block roots and will
|
||||
/// perform a lookup on the `Store` for a prior `BeaconState` if `latest_block_roots` has been
|
||||
/// exhausted.
|
||||
///
|
||||
/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`.
|
||||
pub struct BlockRootsIterator<T: EthSpec, U> {
|
||||
store: Arc<U>,
|
||||
beacon_state: BeaconState<T>,
|
||||
slot: Slot,
|
||||
}
|
||||
|
||||
impl<T: EthSpec, U: Store> BlockRootsIterator<T, U> {
|
||||
/// Create a new iterator over all block roots in the given `beacon_state` and prior states.
|
||||
pub fn new(store: Arc<U>, beacon_state: BeaconState<T>, start_slot: Slot) -> Self {
|
||||
Self {
|
||||
slot: start_slot,
|
||||
beacon_state,
|
||||
store,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec, U: Store> Iterator for BlockRootsIterator<T, U> {
|
||||
type Item = (Hash256, Slot);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if (self.slot == 0) || (self.slot > self.beacon_state.slot) {
|
||||
return None;
|
||||
}
|
||||
|
||||
self.slot -= 1;
|
||||
|
||||
match self.beacon_state.get_block_root(self.slot) {
|
||||
Ok(root) => Some((*root, self.slot)),
|
||||
Err(BeaconStateError::SlotOutOfBounds) => {
|
||||
// Read a `BeaconState` from the store that has access to prior historical root.
|
||||
self.beacon_state = {
|
||||
// Load the earlier state from disk. Skip forward one slot, because a state
|
||||
// doesn't return it's own state root.
|
||||
let new_state_root = self.beacon_state.get_state_root(self.slot + 1).ok()?;
|
||||
|
||||
self.store.get(&new_state_root).ok()?
|
||||
}?;
|
||||
|
||||
let root = self.beacon_state.get_block_root(self.slot).ok()?;
|
||||
|
||||
Some((*root, self.slot))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::MemoryStore;
|
||||
use types::{test_utils::TestingBeaconStateBuilder, Keypair, MainnetEthSpec};
|
||||
|
||||
fn get_state<T: EthSpec>() -> BeaconState<T> {
|
||||
let builder = TestingBeaconStateBuilder::from_single_keypair(
|
||||
0,
|
||||
&Keypair::random(),
|
||||
&T::default_spec(),
|
||||
);
|
||||
let (state, _keypairs) = builder.build();
|
||||
state
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn root_iter() {
|
||||
let store = Arc::new(MemoryStore::open());
|
||||
let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root();
|
||||
|
||||
let mut state_a: BeaconState<MainnetEthSpec> = get_state();
|
||||
let mut state_b: BeaconState<MainnetEthSpec> = get_state();
|
||||
|
||||
state_a.slot = Slot::from(slots_per_historical_root);
|
||||
state_b.slot = Slot::from(slots_per_historical_root * 2);
|
||||
|
||||
let mut hashes = (0..).into_iter().map(|i| Hash256::from(i));
|
||||
|
||||
for root in &mut state_a.latest_block_roots[..] {
|
||||
*root = hashes.next().unwrap()
|
||||
}
|
||||
for root in &mut state_b.latest_block_roots[..] {
|
||||
*root = hashes.next().unwrap()
|
||||
}
|
||||
|
||||
let state_a_root = hashes.next().unwrap();
|
||||
state_b.latest_state_roots[0] = state_a_root;
|
||||
store.put(&state_a_root, &state_a).unwrap();
|
||||
|
||||
let iter = BlockRootsIterator::new(store.clone(), state_b.clone(), state_b.slot - 1);
|
||||
let mut collected: Vec<(Hash256, Slot)> = iter.collect();
|
||||
collected.reverse();
|
||||
|
||||
let expected_len = 2 * MainnetEthSpec::slots_per_historical_root() - 1;
|
||||
|
||||
assert_eq!(collected.len(), expected_len);
|
||||
|
||||
for i in 0..expected_len {
|
||||
assert_eq!(collected[i].0, Hash256::from(i as u64));
|
||||
}
|
||||
}
|
||||
}
|
@ -14,6 +14,8 @@ mod impls;
|
||||
mod leveldb_store;
|
||||
mod memory_store;
|
||||
|
||||
pub mod iter;
|
||||
|
||||
pub use self::leveldb_store::LevelDB as DiskStore;
|
||||
pub use self::memory_store::MemoryStore;
|
||||
pub use errors::Error;
|
||||
|
25
eth2/fork_choice/fork_choice/Cargo.toml
Normal file
25
eth2/fork_choice/fork_choice/Cargo.toml
Normal file
@ -0,0 +1,25 @@
|
||||
[package]
|
||||
name = "fork_choice"
|
||||
version = "0.1.0"
|
||||
authors = ["Age Manning <Age@AgeManning.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[[bench]]
|
||||
name = "benches"
|
||||
harness = false
|
||||
|
||||
[dependencies]
|
||||
store = { path = "../../beacon_node/store" }
|
||||
ssz = { path = "../utils/ssz" }
|
||||
types = { path = "../types" }
|
||||
log = "0.4.6"
|
||||
bit-vec = "0.5.0"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.2"
|
||||
hex = "0.3.2"
|
||||
yaml-rust = "0.4.2"
|
||||
bls = { path = "../utils/bls" }
|
||||
slot_clock = { path = "../utils/slot_clock" }
|
||||
beacon_chain = { path = "../../beacon_node/beacon_chain" }
|
||||
env_logger = "0.6.0"
|
75
eth2/fork_choice/fork_choice/benches/benches.rs
Normal file
75
eth2/fork_choice/fork_choice/benches/benches.rs
Normal file
@ -0,0 +1,75 @@
|
||||
use criterion::Criterion;
|
||||
use criterion::{criterion_group, criterion_main, Benchmark};
|
||||
use fork_choice::{test_utils::TestingForkChoiceBuilder, ForkChoice, OptimizedLMDGhost};
|
||||
use std::sync::Arc;
|
||||
use store::MemoryStore;
|
||||
use types::{ChainSpec, EthSpec, MainnetEthSpec};
|
||||
|
||||
pub type TestedForkChoice<T, U> = OptimizedLMDGhost<T, U>;
|
||||
pub type TestedEthSpec = MainnetEthSpec;
|
||||
|
||||
/// Helper function to setup a builder and spec.
|
||||
fn setup(
|
||||
validator_count: usize,
|
||||
chain_length: usize,
|
||||
) -> (
|
||||
TestingForkChoiceBuilder<MemoryStore, TestedEthSpec>,
|
||||
ChainSpec,
|
||||
) {
|
||||
let store = MemoryStore::open();
|
||||
let builder: TestingForkChoiceBuilder<MemoryStore, TestedEthSpec> =
|
||||
TestingForkChoiceBuilder::new(validator_count, chain_length, Arc::new(store));
|
||||
let spec = TestedEthSpec::default_spec();
|
||||
|
||||
(builder, spec)
|
||||
}
|
||||
|
||||
/// Benches adding blocks to fork_choice.
|
||||
fn add_block(c: &mut Criterion) {
|
||||
let validator_count = 16;
|
||||
let chain_length = 100;
|
||||
|
||||
let (builder, spec) = setup(validator_count, chain_length);
|
||||
|
||||
c.bench(
|
||||
&format!("{}_blocks", chain_length),
|
||||
Benchmark::new("add_blocks", move |b| {
|
||||
b.iter(|| {
|
||||
let mut fc = builder.build::<TestedForkChoice<MemoryStore, TestedEthSpec>>();
|
||||
for (root, block) in builder.chain.iter().skip(1) {
|
||||
fc.add_block(block, root, &spec).unwrap();
|
||||
}
|
||||
})
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
}
|
||||
|
||||
/// Benches fork choice head finding.
|
||||
fn find_head(c: &mut Criterion) {
|
||||
let validator_count = 16;
|
||||
let chain_length = 64 * 2;
|
||||
|
||||
let (builder, spec) = setup(validator_count, chain_length);
|
||||
|
||||
let mut fc = builder.build::<TestedForkChoice<MemoryStore, TestedEthSpec>>();
|
||||
for (root, block) in builder.chain.iter().skip(1) {
|
||||
fc.add_block(block, root, &spec).unwrap();
|
||||
}
|
||||
|
||||
let head_root = builder.chain.last().unwrap().0;
|
||||
for i in 0..validator_count {
|
||||
fc.add_attestation(i as u64, &head_root, &spec).unwrap();
|
||||
}
|
||||
|
||||
c.bench(
|
||||
&format!("{}_blocks", chain_length),
|
||||
Benchmark::new("find_head", move |b| {
|
||||
b.iter(|| fc.find_head(&builder.genesis_root(), &spec).unwrap())
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
}
|
||||
|
||||
criterion_group!(benches, add_block, find_head);
|
||||
criterion_main!(benches);
|
40
eth2/fork_choice/fork_choice/examples/example.rs
Normal file
40
eth2/fork_choice/fork_choice/examples/example.rs
Normal file
@ -0,0 +1,40 @@
|
||||
use fork_choice::{test_utils::TestingForkChoiceBuilder, ForkChoice, OptimizedLMDGhost};
|
||||
use std::sync::Arc;
|
||||
use store::{MemoryStore, Store};
|
||||
use types::{BeaconBlock, ChainSpec, EthSpec, Hash256, MainnetEthSpec};
|
||||
|
||||
fn main() {
|
||||
let validator_count = 16;
|
||||
let chain_length = 100;
|
||||
let repetitions = 50;
|
||||
|
||||
let store = MemoryStore::open();
|
||||
let builder: TestingForkChoiceBuilder<MemoryStore, MainnetEthSpec> =
|
||||
TestingForkChoiceBuilder::new(validator_count, chain_length, Arc::new(store));
|
||||
|
||||
let fork_choosers: Vec<OptimizedLMDGhost<MemoryStore, MainnetEthSpec>> = (0..repetitions)
|
||||
.into_iter()
|
||||
.map(|_| builder.build())
|
||||
.collect();
|
||||
|
||||
let spec = &MainnetEthSpec::default_spec();
|
||||
|
||||
println!("Running {} times...", repetitions);
|
||||
for fc in fork_choosers {
|
||||
do_thing(fc, &builder.chain, builder.genesis_root(), spec);
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn do_thing<F: ForkChoice<S>, S: Store>(
|
||||
mut fc: F,
|
||||
chain: &[(Hash256, BeaconBlock)],
|
||||
genesis_root: Hash256,
|
||||
spec: &ChainSpec,
|
||||
) {
|
||||
for (root, block) in chain.iter().skip(1) {
|
||||
fc.add_block(block, root, spec).unwrap();
|
||||
}
|
||||
|
||||
let _head = fc.find_head(&genesis_root, spec).unwrap();
|
||||
}
|
@ -0,0 +1,144 @@
|
||||
title: Fork-choice Tests
|
||||
summary: A collection of abstract fork-choice tests for bitwise lmd ghost.
|
||||
test_suite: Fork-Choice
|
||||
|
||||
test_cases:
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b1'
|
||||
- id: 'b3'
|
||||
parent: 'b1'
|
||||
weights:
|
||||
- b0: 0
|
||||
- b1: 0
|
||||
- b2: 5
|
||||
- b3: 10
|
||||
heads:
|
||||
- id: 'b3'
|
||||
# bitwise LMD ghost example. bitwise GHOST gives b2
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
- id: 'b3'
|
||||
parent: 'b0'
|
||||
weights:
|
||||
- b1: 5
|
||||
- b2: 4
|
||||
- b3: 3
|
||||
heads:
|
||||
- id: 'b2'
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
- id: 'b3'
|
||||
parent: 'b1'
|
||||
- id: 'b4'
|
||||
parent: 'b1'
|
||||
- id: 'b5'
|
||||
parent: 'b1'
|
||||
- id: 'b6'
|
||||
parent: 'b2'
|
||||
- id: 'b7'
|
||||
parent: 'b6'
|
||||
weights:
|
||||
- b0: 0
|
||||
- b1: 3
|
||||
- b2: 2
|
||||
- b3: 1
|
||||
- b4: 1
|
||||
- b5: 1
|
||||
- b6: 2
|
||||
- b7: 2
|
||||
heads:
|
||||
- id: 'b4'
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
- id: 'b3'
|
||||
parent: 'b0'
|
||||
- id: 'b4'
|
||||
parent: 'b1'
|
||||
- id: 'b5'
|
||||
parent: 'b1'
|
||||
- id: 'b6'
|
||||
parent: 'b2'
|
||||
- id: 'b7'
|
||||
parent: 'b2'
|
||||
- id: 'b8'
|
||||
parent: 'b3'
|
||||
- id: 'b9'
|
||||
parent: 'b3'
|
||||
weights:
|
||||
- b1: 2
|
||||
- b2: 1
|
||||
- b3: 1
|
||||
- b4: 7
|
||||
- b5: 5
|
||||
- b6: 2
|
||||
- b7: 4
|
||||
- b8: 4
|
||||
- b9: 2
|
||||
heads:
|
||||
- id: 'b4'
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
- id: 'b3'
|
||||
parent: 'b0'
|
||||
- id: 'b4'
|
||||
parent: 'b1'
|
||||
- id: 'b5'
|
||||
parent: 'b1'
|
||||
- id: 'b6'
|
||||
parent: 'b2'
|
||||
- id: 'b7'
|
||||
parent: 'b2'
|
||||
- id: 'b8'
|
||||
parent: 'b3'
|
||||
- id: 'b9'
|
||||
parent: 'b3'
|
||||
weights:
|
||||
- b1: 1
|
||||
- b2: 1
|
||||
- b3: 1
|
||||
- b4: 7
|
||||
- b5: 5
|
||||
- b6: 2
|
||||
- b7: 4
|
||||
- b8: 4
|
||||
- b9: 2
|
||||
heads:
|
||||
- id: 'b7'
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
weights:
|
||||
- b1: 0
|
||||
- b2: 0
|
||||
heads:
|
||||
- id: 'b1'
|
||||
|
@ -0,0 +1,65 @@
|
||||
title: Fork-choice Tests
|
||||
summary: A collection of abstract fork-choice tests for lmd ghost.
|
||||
test_suite: Fork-Choice
|
||||
|
||||
test_cases:
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b1'
|
||||
- id: 'b3'
|
||||
parent: 'b1'
|
||||
weights:
|
||||
- b0: 0
|
||||
- b1: 0
|
||||
- b2: 5
|
||||
- b3: 10
|
||||
heads:
|
||||
- id: 'b3'
|
||||
# bitwise LMD ghost example. GHOST gives b1
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
- id: 'b3'
|
||||
parent: 'b0'
|
||||
weights:
|
||||
- b1: 5
|
||||
- b2: 4
|
||||
- b3: 3
|
||||
heads:
|
||||
- id: 'b1'
|
||||
# equal weights children. Should choose lower hash b2
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
- id: 'b3'
|
||||
parent: 'b0'
|
||||
weights:
|
||||
- b1: 5
|
||||
- b2: 6
|
||||
- b3: 6
|
||||
heads:
|
||||
- id: 'b2'
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
weights:
|
||||
- b1: 0
|
||||
- b2: 0
|
||||
heads:
|
||||
- id: 'b1'
|
@ -0,0 +1,51 @@
|
||||
title: Fork-choice Tests
|
||||
summary: A collection of abstract fork-choice tests to verify the longest chain fork-choice rule.
|
||||
test_suite: Fork-Choice
|
||||
|
||||
test_cases:
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b1'
|
||||
- id: 'b3'
|
||||
parent: 'b1'
|
||||
- id: 'b4'
|
||||
parent: 'b3'
|
||||
weights:
|
||||
- b0: 0
|
||||
- b1: 0
|
||||
- b2: 10
|
||||
- b3: 1
|
||||
heads:
|
||||
- id: 'b4'
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b1'
|
||||
- id: 'b3'
|
||||
parent: 'b2'
|
||||
- id: 'b4'
|
||||
parent: 'b3'
|
||||
- id: 'b5'
|
||||
parent: 'b0'
|
||||
- id: 'b6'
|
||||
parent: 'b5'
|
||||
- id: 'b7'
|
||||
parent: 'b6'
|
||||
- id: 'b8'
|
||||
parent: 'b7'
|
||||
- id: 'b9'
|
||||
parent: 'b8'
|
||||
weights:
|
||||
- b0: 5
|
||||
- b1: 20
|
||||
- b2: 10
|
||||
- b3: 10
|
||||
heads:
|
||||
- id: 'b9'
|
231
eth2/fork_choice/fork_choice/tests/tests.rs
Normal file
231
eth2/fork_choice/fork_choice/tests/tests.rs
Normal file
@ -0,0 +1,231 @@
|
||||
#![cfg(not(debug_assertions))]
|
||||
/// Tests the available fork-choice algorithms
|
||||
pub use beacon_chain::BeaconChain;
|
||||
use bls::Signature;
|
||||
use store::MemoryStore;
|
||||
use store::Store;
|
||||
// use env_logger::{Builder, Env};
|
||||
use fork_choice::{BitwiseLMDGhost, ForkChoice, LongestChain, OptimizedLMDGhost, SlowLMDGhost};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::{fs::File, io::prelude::*, path::PathBuf};
|
||||
use types::test_utils::TestingBeaconStateBuilder;
|
||||
use types::{
|
||||
BeaconBlock, BeaconBlockBody, Eth1Data, EthSpec, Hash256, Keypair, MainnetEthSpec, Slot,
|
||||
};
|
||||
use yaml_rust::yaml;
|
||||
|
||||
// Note: We Assume the block Id's are hex-encoded.
|
||||
|
||||
#[test]
|
||||
fn test_optimized_lmd_ghost() {
|
||||
// set up logging
|
||||
// Builder::from_env(Env::default().default_filter_or("trace")).init();
|
||||
|
||||
test_yaml_vectors::<OptimizedLMDGhost<MemoryStore, MainnetEthSpec>>(
|
||||
"tests/lmd_ghost_test_vectors.yaml",
|
||||
100,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitwise_lmd_ghost() {
|
||||
// set up logging
|
||||
//Builder::from_env(Env::default().default_filter_or("trace")).init();
|
||||
|
||||
test_yaml_vectors::<BitwiseLMDGhost<MemoryStore, MainnetEthSpec>>(
|
||||
"tests/bitwise_lmd_ghost_test_vectors.yaml",
|
||||
100,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_slow_lmd_ghost() {
|
||||
test_yaml_vectors::<SlowLMDGhost<MemoryStore, MainnetEthSpec>>(
|
||||
"tests/lmd_ghost_test_vectors.yaml",
|
||||
100,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_longest_chain() {
|
||||
test_yaml_vectors::<LongestChain<MemoryStore>>("tests/longest_chain_test_vectors.yaml", 100);
|
||||
}
|
||||
|
||||
// run a generic test over given YAML test vectors
|
||||
fn test_yaml_vectors<T: ForkChoice<MemoryStore>>(
|
||||
yaml_file_path: &str,
|
||||
emulated_validators: usize, // the number of validators used to give weights.
|
||||
) {
|
||||
// load test cases from yaml
|
||||
let test_cases = load_test_cases_from_yaml(yaml_file_path);
|
||||
|
||||
// default vars
|
||||
let spec = MainnetEthSpec::default_spec();
|
||||
let zero_hash = Hash256::zero();
|
||||
let eth1_data = Eth1Data {
|
||||
deposit_count: 0,
|
||||
deposit_root: zero_hash.clone(),
|
||||
block_hash: zero_hash.clone(),
|
||||
};
|
||||
let randao_reveal = Signature::empty_signature();
|
||||
let signature = Signature::empty_signature();
|
||||
let body = BeaconBlockBody {
|
||||
eth1_data,
|
||||
randao_reveal,
|
||||
graffiti: [0; 32],
|
||||
proposer_slashings: vec![],
|
||||
attester_slashings: vec![],
|
||||
attestations: vec![],
|
||||
deposits: vec![],
|
||||
voluntary_exits: vec![],
|
||||
transfers: vec![],
|
||||
};
|
||||
|
||||
// process the tests
|
||||
for test_case in test_cases {
|
||||
// setup a fresh test
|
||||
let (mut fork_choice, store, state_root) = setup_inital_state::<T>(emulated_validators);
|
||||
|
||||
// keep a hashmap of block_id's to block_hashes (random hashes to abstract block_id)
|
||||
//let mut block_id_map: HashMap<String, Hash256> = HashMap::new();
|
||||
// keep a list of hash to slot
|
||||
let mut block_slot: HashMap<Hash256, Slot> = HashMap::new();
|
||||
// assume the block tree is given to us in order.
|
||||
let mut genesis_hash = None;
|
||||
for block in test_case["blocks"].clone().into_vec().unwrap() {
|
||||
let block_id = block["id"].as_str().unwrap().to_string();
|
||||
let parent_id = block["parent"].as_str().unwrap().to_string();
|
||||
|
||||
// default params for genesis
|
||||
let block_hash = id_to_hash(&block_id);
|
||||
let mut slot = spec.genesis_slot;
|
||||
let previous_block_root = id_to_hash(&parent_id);
|
||||
|
||||
// set the slot and parent based off the YAML. Start with genesis;
|
||||
// if not the genesis, update slot
|
||||
if parent_id != block_id {
|
||||
// find parent slot
|
||||
slot = *(block_slot
|
||||
.get(&previous_block_root)
|
||||
.expect("Parent should have a slot number"))
|
||||
+ 1;
|
||||
} else {
|
||||
genesis_hash = Some(block_hash);
|
||||
}
|
||||
|
||||
// update slot mapping
|
||||
block_slot.insert(block_hash, slot);
|
||||
|
||||
// build the BeaconBlock
|
||||
let beacon_block = BeaconBlock {
|
||||
slot,
|
||||
previous_block_root,
|
||||
state_root: state_root.clone(),
|
||||
signature: signature.clone(),
|
||||
body: body.clone(),
|
||||
};
|
||||
|
||||
// Store the block.
|
||||
store.put(&block_hash, &beacon_block).unwrap();
|
||||
|
||||
// run add block for fork choice if not genesis
|
||||
if parent_id != block_id {
|
||||
fork_choice
|
||||
.add_block(&beacon_block, &block_hash, &spec)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
// add the weights (attestations)
|
||||
let mut current_validator = 0;
|
||||
for id_map in test_case["weights"].clone().into_vec().unwrap() {
|
||||
// get the block id and weights
|
||||
for (map_id, map_weight) in id_map.as_hash().unwrap().iter() {
|
||||
let id = map_id.as_str().unwrap();
|
||||
let block_root = id_to_hash(&id.to_string());
|
||||
let weight = map_weight.as_i64().unwrap();
|
||||
// we assume a validator has a value 1 and add an attestation for to achieve the
|
||||
// correct weight
|
||||
for _ in 0..weight {
|
||||
assert!(
|
||||
current_validator <= emulated_validators,
|
||||
"Not enough validators to emulate weights"
|
||||
);
|
||||
fork_choice
|
||||
.add_attestation(current_validator as u64, &block_root, &spec)
|
||||
.unwrap();
|
||||
current_validator += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// everything is set up, run the fork choice, using genesis as the head
|
||||
let head = fork_choice
|
||||
.find_head(&genesis_hash.unwrap(), &spec)
|
||||
.unwrap();
|
||||
|
||||
// compare the result to the expected test
|
||||
let success = test_case["heads"]
|
||||
.clone()
|
||||
.into_vec()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.find(|heads| id_to_hash(&heads["id"].as_str().unwrap().to_string()) == head)
|
||||
.is_some();
|
||||
|
||||
println!("Head found: {}", head);
|
||||
assert!(success, "Did not find one of the possible heads");
|
||||
}
|
||||
}
|
||||
|
||||
// loads the test_cases from the supplied yaml file
|
||||
fn load_test_cases_from_yaml(file_path: &str) -> Vec<yaml_rust::Yaml> {
|
||||
// load the yaml
|
||||
let mut file = {
|
||||
let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
file_path_buf.push(file_path);
|
||||
File::open(file_path_buf).unwrap()
|
||||
};
|
||||
let mut yaml_str = String::new();
|
||||
file.read_to_string(&mut yaml_str).unwrap();
|
||||
let docs = yaml::YamlLoader::load_from_str(&yaml_str).unwrap();
|
||||
let doc = &docs[0];
|
||||
doc["test_cases"].as_vec().unwrap().clone()
|
||||
}
|
||||
|
||||
fn setup_inital_state<T>(
|
||||
// fork_choice_algo: &ForkChoiceAlgorithm,
|
||||
num_validators: usize
|
||||
) -> (T, Arc<MemoryStore>, Hash256)
|
||||
where
|
||||
T: ForkChoice<MemoryStore>,
|
||||
{
|
||||
let store = Arc::new(MemoryStore::open());
|
||||
|
||||
let fork_choice = ForkChoice::new(store.clone());
|
||||
let spec = MainnetEthSpec::default_spec();
|
||||
|
||||
let mut state_builder: TestingBeaconStateBuilder<MainnetEthSpec> =
|
||||
TestingBeaconStateBuilder::from_single_keypair(num_validators, &Keypair::random(), &spec);
|
||||
state_builder.build_caches(&spec).unwrap();
|
||||
let (state, _keypairs) = state_builder.build();
|
||||
|
||||
let state_root = state.canonical_root();
|
||||
store.put(&state_root, &state).unwrap();
|
||||
|
||||
// return initialised vars
|
||||
(fork_choice, store, state_root)
|
||||
}
|
||||
|
||||
// convert a block_id into a Hash256 -- assume input is hex encoded;
|
||||
fn id_to_hash(id: &String) -> Hash256 {
|
||||
let bytes = hex::decode(id).expect("Block ID should be hex");
|
||||
|
||||
let len = std::cmp::min(bytes.len(), 32);
|
||||
let mut fixed_bytes = [0u8; 32];
|
||||
for (index, byte) in bytes.iter().take(32).enumerate() {
|
||||
fixed_bytes[32 - len + index] = *byte;
|
||||
}
|
||||
Hash256::from(fixed_bytes)
|
||||
}
|
@ -19,7 +19,6 @@
|
||||
pub mod bitwise_lmd_ghost;
|
||||
pub mod longest_chain;
|
||||
pub mod optimized_lmd_ghost;
|
||||
pub mod reduced_tree;
|
||||
pub mod slow_lmd_ghost;
|
||||
pub mod test_utils;
|
||||
|
||||
|
@ -1,315 +0,0 @@
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::ops::Range;
|
||||
use types::Hash256;
|
||||
|
||||
pub const SKIP_LIST_LEN: usize = 16;
|
||||
|
||||
pub type Height = usize;
|
||||
pub type Slot = u64;
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct Node {
|
||||
pub parent_hash: Option<Hash256>,
|
||||
pub children: Vec<Hash256>,
|
||||
pub score: u64,
|
||||
pub height: Height,
|
||||
pub block_hash: Hash256,
|
||||
}
|
||||
|
||||
impl Node {
|
||||
fn does_not_have_children(&self) -> bool {
|
||||
self.children.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ReducedTree {
|
||||
store: Store,
|
||||
nodes: HashMap<Hash256, Node>,
|
||||
root: Hash256,
|
||||
slots_at_height: SortedList<Slot>,
|
||||
blocks_at_height: HashMap<Height, Vec<Hash256>>,
|
||||
}
|
||||
|
||||
impl ReducedTree {
|
||||
pub fn new(root: Hash256, height: Height) -> Self {
|
||||
let mut node: Node = Node::default();
|
||||
node.height = 0;
|
||||
|
||||
let mut nodes = HashMap::new();
|
||||
nodes.insert(root, Node::default());
|
||||
|
||||
let mut blocks_at_height = HashMap::new();
|
||||
blocks_at_height.insert(height, vec![root]);
|
||||
|
||||
Self {
|
||||
store: Store::default(),
|
||||
nodes,
|
||||
root,
|
||||
slots_at_height: SortedList::new(),
|
||||
blocks_at_height,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_node(&mut self, hash: Hash256, block_hash: Hash256) -> Option<()> {
|
||||
// TODO: resolve clone.
|
||||
let mut prev_in_tree = self
|
||||
.find_prev_in_tree(hash, 0..self.slots_at_height.len())?
|
||||
.clone();
|
||||
|
||||
let mut node = Node {
|
||||
block_hash,
|
||||
parent_hash: Some(prev_in_tree.block_hash),
|
||||
..Node::default()
|
||||
};
|
||||
|
||||
if prev_in_tree.does_not_have_children() {
|
||||
node.parent_hash = Some(prev_in_tree.block_hash);
|
||||
prev_in_tree.children.push(hash);
|
||||
} else {
|
||||
for child_hash in prev_in_tree.children {
|
||||
let ancestor_hash = self.find_least_common_ancestor(hash, child_hash)?;
|
||||
if ancestor_hash != prev_in_tree.block_hash {
|
||||
let child = self.nodes.get_mut(&child_hash)?;
|
||||
let common_ancestor = Node {
|
||||
block_hash: ancestor_hash,
|
||||
parent_hash: Some(prev_in_tree.block_hash),
|
||||
..Node::default()
|
||||
};
|
||||
child.parent_hash = Some(common_ancestor.block_hash);
|
||||
node.parent_hash = Some(common_ancestor.block_hash);
|
||||
|
||||
self.nodes
|
||||
.insert(common_ancestor.block_hash, common_ancestor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.nodes.insert(hash, node);
|
||||
|
||||
Some(())
|
||||
}
|
||||
|
||||
fn find_prev_in_tree(&mut self, hash: Hash256, range: Range<Height>) -> Option<&mut Node> {
|
||||
if range.len() == 0 || range.end > self.slots_at_height.len() {
|
||||
None
|
||||
} else {
|
||||
let mid_height = range.len() / 2;
|
||||
let mid_slot = self.slot_at_height(mid_height)?;
|
||||
let mid_ancestor = self.find_ancestor_at_slot(hash, mid_slot)?;
|
||||
|
||||
if self.exists_above_height(hash, mid_height)? {
|
||||
if self.exists_between_heights(hash, mid_height..mid_height + 1)? {
|
||||
self.nodes.get_mut(&mid_ancestor)
|
||||
} else {
|
||||
self.find_prev_in_tree(hash, mid_height..range.end)
|
||||
}
|
||||
} else {
|
||||
self.find_prev_in_tree(hash, range.start..mid_height)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn exists_above_height(&self, hash: Hash256, height: Height) -> Option<bool> {
|
||||
let ancestor_at_height = self.find_ancestor_at_height(hash, height)?;
|
||||
let blocks_at_height = self.blocks_at_height.get(&height)?;
|
||||
|
||||
Some(blocks_at_height.contains(&ancestor_at_height))
|
||||
}
|
||||
|
||||
fn exists_between_heights(&self, hash: Hash256, range: Range<Height>) -> Option<bool> {
|
||||
let low_blocks = self.blocks_at_height.get(&range.start)?;
|
||||
let high_blocks = self.blocks_at_height.get(&range.end)?;
|
||||
|
||||
let low_ancestor = self.find_ancestor_at_height(hash, range.start)?;
|
||||
let high_ancestor = self.find_ancestor_at_height(hash, range.end)?;
|
||||
|
||||
Some(low_blocks.contains(&low_ancestor) && !high_blocks.contains(&high_ancestor))
|
||||
}
|
||||
|
||||
fn find_ancestor_at_height(&self, child: Hash256, height: Height) -> Option<Hash256> {
|
||||
self.find_ancestor_at_slot(child, self.slot_at_height(height)?)
|
||||
}
|
||||
|
||||
fn find_ancestor_at_slot(&self, child: Hash256, slot: Slot) -> Option<Hash256> {
|
||||
get_ancestor_hash_at_slot(slot, child, &self.store)
|
||||
}
|
||||
|
||||
fn find_least_common_ancestor(&self, a: Hash256, b: Hash256) -> Option<Hash256> {
|
||||
find_least_common_ancestor(a, b, &self.store)
|
||||
}
|
||||
|
||||
fn slot_at_height(&self, height: Height) -> Option<Slot> {
|
||||
self.slots_at_height.nth(height).cloned()
|
||||
}
|
||||
}
|
||||
|
||||
fn get_ancestor_hash_at_slot(slot: Slot, start: Hash256, store: &Store) -> Option<Hash256> {
|
||||
let mut block = store.get(&start)?;
|
||||
|
||||
loop {
|
||||
if slot >= block.slot {
|
||||
break None;
|
||||
} else {
|
||||
let delta = block.slot - slot;
|
||||
|
||||
if delta >= 1 << SKIP_LIST_LEN as u64 {
|
||||
block = store.get(&block.ancestor_skip_list[SKIP_LIST_LEN - 1])?;
|
||||
} else if delta.is_power_of_two() {
|
||||
break Some(block.ancestor_skip_list[delta.trailing_zeros() as usize]);
|
||||
} else {
|
||||
let i = delta.next_power_of_two().trailing_zeros().saturating_sub(1);
|
||||
block = store.get(&block.ancestor_skip_list[i as usize])?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn find_least_common_ancestor(a_root: Hash256, b_root: Hash256, store: &Store) -> Option<Hash256> {
|
||||
let mut a = store.get(&a_root)?;
|
||||
let mut b = store.get(&b_root)?;
|
||||
|
||||
if a.slot > b.slot {
|
||||
a = store.get(&get_ancestor_hash_at_slot(b.slot, a_root, store)?)?;
|
||||
} else if b.slot > a.slot {
|
||||
b = store.get(&get_ancestor_hash_at_slot(a.slot, b_root, store)?)?;
|
||||
}
|
||||
|
||||
loop {
|
||||
if a.ancestor_skip_list[0] == b.ancestor_skip_list[0] {
|
||||
break Some(a.ancestor_skip_list[0]);
|
||||
} else if a.slot == 0 || b.slot == 0 {
|
||||
break None;
|
||||
} else {
|
||||
a = store.get(&a.ancestor_skip_list[0])?;
|
||||
b = store.get(&b.ancestor_skip_list[0])?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone, Debug)]
|
||||
pub struct Block {
|
||||
pub slot: Slot,
|
||||
ancestor_skip_list: [Hash256; SKIP_LIST_LEN],
|
||||
}
|
||||
|
||||
pub type Store = HashMap<Hash256, Block>;
|
||||
|
||||
pub struct SortedList<K>(BTreeMap<K, ()>);
|
||||
|
||||
impl<K: Ord> SortedList<K> {
|
||||
pub fn new() -> Self {
|
||||
SortedList(BTreeMap::new())
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, key: K) {
|
||||
self.0.insert(key, ());
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub fn nth(&self, n: usize) -> Option<&K> {
|
||||
self.0.iter().nth(n).and_then(|(k, _v)| Some(k))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn new() {
|
||||
let genesis_root = Hash256::random();
|
||||
let genesis_slot = 0;
|
||||
|
||||
let _t = Tree::new(genesis_root, genesis_slot);
|
||||
}
|
||||
|
||||
/// Creates a new "hash" from the `u64`.
|
||||
///
|
||||
/// Does not _actually_ perform a hash, just generates bytes that are some serialization of the
|
||||
/// the `u64`.
|
||||
fn get_hash(i: u64) -> Hash256 {
|
||||
Hash256::from_low_u64_le(i)
|
||||
}
|
||||
|
||||
fn hash_to_u64(hash: Hash256) -> u64 {
|
||||
hash.to_low_u64_le()
|
||||
}
|
||||
|
||||
fn store_chain(store: &mut Store, roots: &[Hash256], slots: &[Slot]) {
|
||||
for i in 0..roots.len() {
|
||||
let mut block = Block::default();
|
||||
block.slot = slots[i];
|
||||
|
||||
// Build the skip list.
|
||||
for j in 0..SKIP_LIST_LEN {
|
||||
let skip = 2_usize.pow(j as u32);
|
||||
block.ancestor_skip_list[j as usize] = roots[i.saturating_sub(skip)];
|
||||
}
|
||||
|
||||
store.insert(roots[i as usize], block);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn common_ancestor() {
|
||||
let common_chain_len = (2_u64 << SKIP_LIST_LEN) - 3;
|
||||
let forked_blocks = 2_u64 << SKIP_LIST_LEN;
|
||||
|
||||
let common_roots: Vec<Hash256> = (0..common_chain_len).map(get_hash).collect();
|
||||
let common_slots: Vec<Slot> = (0..common_chain_len).collect();
|
||||
|
||||
let mut fork_a_roots = common_roots.clone();
|
||||
fork_a_roots.append(
|
||||
&mut (common_chain_len..common_chain_len + forked_blocks)
|
||||
.map(get_hash)
|
||||
.collect(),
|
||||
);
|
||||
let mut fork_a_slots = common_slots.clone();
|
||||
fork_a_slots.append(&mut (common_chain_len..common_chain_len + forked_blocks).collect());
|
||||
|
||||
let mut fork_b_roots = common_roots.clone();
|
||||
fork_b_roots.append(
|
||||
&mut (common_chain_len..common_chain_len + forked_blocks)
|
||||
.map(|i| get_hash(i * 10))
|
||||
.collect(),
|
||||
);
|
||||
let mut fork_b_slots = common_slots.clone();
|
||||
fork_b_slots.append(&mut (common_chain_len..common_chain_len + forked_blocks).collect());
|
||||
|
||||
let fork_a_head = *fork_a_roots.iter().last().unwrap();
|
||||
let fork_b_head = *fork_b_roots.iter().last().unwrap();
|
||||
|
||||
let mut store = Store::default();
|
||||
store_chain(&mut store, &fork_a_roots, &fork_a_slots);
|
||||
store_chain(&mut store, &fork_b_roots, &fork_b_slots);
|
||||
|
||||
assert_eq!(
|
||||
find_least_common_ancestor(fork_a_head, fork_b_head, &store)
|
||||
.and_then(|i| Some(hash_to_u64(i))),
|
||||
Some(hash_to_u64(*common_roots.iter().last().unwrap()))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_at_slot() {
|
||||
let n = 2_u64.pow(SKIP_LIST_LEN as u32) * 2;
|
||||
let mut store = Store::default();
|
||||
|
||||
let roots: Vec<Hash256> = (0..n).map(get_hash).collect();
|
||||
let slots: Vec<Slot> = (0..n).collect();
|
||||
|
||||
store_chain(&mut store, &roots, &slots);
|
||||
|
||||
for i in 0..n - 1 {
|
||||
let key = roots.last().unwrap();
|
||||
|
||||
assert_eq!(
|
||||
get_ancestor_hash_at_slot(i as u64, *key, &store),
|
||||
Some(get_hash(i as u64))
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
21
eth2/fork_choice_2/Cargo.toml
Normal file
21
eth2/fork_choice_2/Cargo.toml
Normal file
@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "fork_choice_2"
|
||||
version = "0.1.0"
|
||||
authors = ["Age Manning <Age@AgeManning.com>", "Paul Hauner <paul@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
store = { path = "../../beacon_node/store" }
|
||||
ssz = { path = "../utils/ssz" }
|
||||
types = { path = "../types" }
|
||||
log = "0.4.6"
|
||||
bit-vec = "0.5.0"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.2"
|
||||
hex = "0.3.2"
|
||||
yaml-rust = "0.4.2"
|
||||
bls = { path = "../utils/bls" }
|
||||
slot_clock = { path = "../utils/slot_clock" }
|
||||
beacon_chain = { path = "../../beacon_node/beacon_chain" }
|
||||
env_logger = "0.6.0"
|
38
eth2/fork_choice_2/src/lib.rs
Normal file
38
eth2/fork_choice_2/src/lib.rs
Normal file
@ -0,0 +1,38 @@
|
||||
pub mod reduced_tree;
|
||||
|
||||
use std::sync::Arc;
|
||||
use store::Error as DBError;
|
||||
use store::Store;
|
||||
use types::{BeaconBlock, ChainSpec, Hash256, Slot};
|
||||
|
||||
type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
BackendError(String),
|
||||
}
|
||||
|
||||
pub trait LmdGhostBackend<T> {
|
||||
fn new(store: Arc<T>) -> Self;
|
||||
|
||||
fn process_message(
|
||||
&mut self,
|
||||
validator_index: usize,
|
||||
block_hash: Hash256,
|
||||
block_slot: Slot,
|
||||
) -> Result<()>;
|
||||
|
||||
fn find_head(&mut self) -> Result<Hash256>;
|
||||
}
|
||||
|
||||
pub struct ForkChoice<T> {
|
||||
algorithm: T,
|
||||
}
|
||||
|
||||
impl<T: LmdGhostBackend<T>> ForkChoice<T> {
|
||||
fn new(store: Arc<T>) -> Self {
|
||||
Self {
|
||||
algorithm: T::new(store),
|
||||
}
|
||||
}
|
||||
}
|
590
eth2/fork_choice_2/src/reduced_tree.rs
Normal file
590
eth2/fork_choice_2/src/reduced_tree.rs
Normal file
@ -0,0 +1,590 @@
|
||||
use super::{Error as SuperError, LmdGhostBackend};
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
use store::{iter::BlockRootsIterator, Error as StoreError, Store};
|
||||
use types::{BeaconBlock, BeaconState, EthSpec, Hash256, Slot};
|
||||
|
||||
type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
pub const SKIP_LIST_LEN: usize = 16;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
MissingNode(Hash256),
|
||||
MissingBlock(Hash256),
|
||||
MissingState(Hash256),
|
||||
NotInTree(Hash256),
|
||||
NoCommonAncestor((Hash256, Hash256)),
|
||||
StoreError(StoreError),
|
||||
}
|
||||
|
||||
impl From<StoreError> for Error {
|
||||
fn from(e: StoreError) -> Error {
|
||||
Error::StoreError(e)
|
||||
}
|
||||
}
|
||||
|
||||
pub type Height = usize;
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct Node {
|
||||
pub parent_hash: Option<Hash256>,
|
||||
pub children: Vec<Hash256>,
|
||||
pub score: u64,
|
||||
pub height: Height,
|
||||
pub block_hash: Hash256,
|
||||
pub voters: Vec<usize>,
|
||||
}
|
||||
|
||||
impl Node {
|
||||
pub fn remove_voter(&mut self, voter: usize) -> Option<usize> {
|
||||
let i = self.voters.iter().position(|&v| v == voter)?;
|
||||
Some(self.voters.remove(i))
|
||||
}
|
||||
|
||||
pub fn add_voter(&mut self, voter: usize) {
|
||||
self.voters.push(voter);
|
||||
}
|
||||
|
||||
pub fn has_votes(&self) -> bool {
|
||||
!self.voters.is_empty()
|
||||
}
|
||||
|
||||
pub fn is_genesis(&self) -> bool {
|
||||
self.parent_hash.is_some()
|
||||
}
|
||||
}
|
||||
|
||||
impl Node {
|
||||
fn does_not_have_children(&self) -> bool {
|
||||
self.children.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Vote {
|
||||
hash: Hash256,
|
||||
slot: Slot,
|
||||
}
|
||||
|
||||
pub struct ReducedTree<T, E> {
|
||||
store: Arc<T>,
|
||||
nodes: HashMap<Hash256, Node>,
|
||||
slots_at_height: SortedList<Slot>,
|
||||
blocks_at_height: HashMap<Height, Vec<Hash256>>,
|
||||
/// Maps validator indices to their latest votes.
|
||||
latest_votes: ElasticList<Option<Vote>>,
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<T, E> LmdGhostBackend<T> for ReducedTree<T, E>
|
||||
where
|
||||
T: Store,
|
||||
E: EthSpec,
|
||||
{
|
||||
fn new(store: Arc<T>) -> Self {
|
||||
Self::new(store)
|
||||
}
|
||||
|
||||
fn process_message(
|
||||
&mut self,
|
||||
validator_index: usize,
|
||||
block_hash: Hash256,
|
||||
block_slot: Slot,
|
||||
) -> std::result::Result<(), SuperError> {
|
||||
self.process_message(validator_index, block_hash, block_slot)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn find_head(&mut self) -> std::result::Result<Hash256, SuperError> {
|
||||
unimplemented!();
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Error> for SuperError {
|
||||
fn from(e: Error) -> SuperError {
|
||||
SuperError::BackendError(format!("{:?}", e))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> ReducedTree<T, E>
|
||||
where
|
||||
T: Store,
|
||||
E: EthSpec,
|
||||
{
|
||||
pub fn new(store: Arc<T>) -> Self {
|
||||
Self {
|
||||
store,
|
||||
nodes: HashMap::new(),
|
||||
slots_at_height: SortedList::new(),
|
||||
blocks_at_height: HashMap::new(),
|
||||
latest_votes: ElasticList::default(),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_message(
|
||||
&mut self,
|
||||
validator_index: usize,
|
||||
block_hash: Hash256,
|
||||
slot: Slot,
|
||||
) -> Result<()> {
|
||||
if let Some(previous_vote) = self.latest_votes.get(validator_index) {
|
||||
if previous_vote.slot > slot {
|
||||
// Given vote is earier than known vote, nothing to do.
|
||||
return Ok(());
|
||||
} else if previous_vote.slot == slot && previous_vote.hash == block_hash {
|
||||
// Given vote is identical to known vote, nothing to do.
|
||||
return Ok(());
|
||||
} else if previous_vote.slot == slot && previous_vote.hash != block_hash {
|
||||
// Vote is an equivocation (double-vote), ignore it.
|
||||
//
|
||||
// TODO: flag this as slashable.
|
||||
return Ok(());
|
||||
} else {
|
||||
// Given vote is newer or different to current vote, replace the current vote.
|
||||
self.remove_latest_message(validator_index)?;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: add new vote.
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn remove_latest_message(&mut self, validator_index: usize) -> Result<()> {
|
||||
if let Some(vote) = self.latest_votes.get(validator_index) {
|
||||
let should_delete = {
|
||||
let node = self.get_mut_node(vote.hash)?;
|
||||
|
||||
node.remove_voter(validator_index);
|
||||
|
||||
if let Some(parent_hash) = node.parent_hash {
|
||||
if node.has_votes() {
|
||||
// A node with votes is never removed.
|
||||
false
|
||||
} else if node.children.len() > 1 {
|
||||
// A node with more than one child is never removed.
|
||||
false
|
||||
} else if node.children.len() == 1 {
|
||||
// A node which has only one child may be removed.
|
||||
//
|
||||
// Load the child of the node and set it's parent to be the parent of this
|
||||
// node (viz., graft the node's child to the node's parent)
|
||||
let child = self
|
||||
.nodes
|
||||
.get_mut(&node.children[0])
|
||||
.ok_or_else(|| Error::MissingNode(node.children[0]))?;
|
||||
|
||||
child.parent_hash = node.parent_hash;
|
||||
|
||||
true
|
||||
} else if node.children.len() == 0 {
|
||||
// A node which has no children may be deleted and potentially it's parent
|
||||
// too.
|
||||
self.maybe_delete_node(parent_hash)?;
|
||||
|
||||
true
|
||||
} else {
|
||||
// It is impossible for a node to have a number of children that is not 0, 1 or
|
||||
// greater than one.
|
||||
//
|
||||
// This code is strictly unnecessary, however we keep it for readability.
|
||||
unreachable!();
|
||||
}
|
||||
} else {
|
||||
// A node without a parent is the genesis/finalized node and should never be removed.
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if should_delete {
|
||||
self.nodes.remove(&vote.hash);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn maybe_delete_node(&mut self, hash: Hash256) -> Result<()> {
|
||||
let should_delete = {
|
||||
let node = self.get_node(hash)?;
|
||||
|
||||
if let Some(parent_hash) = node.parent_hash {
|
||||
if (node.children.len() == 1) && !node.has_votes() {
|
||||
let child_node = self.get_mut_node(node.children[0])?;
|
||||
|
||||
child_node.parent_hash = node.parent_hash;
|
||||
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
} else {
|
||||
// A node without a parent is the genesis node and should not be deleted.
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if should_delete {
|
||||
self.nodes.remove(&hash);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn add_latest_message(&mut self, validator_index: usize, hash: Hash256) -> Result<()> {
|
||||
if let Ok(node) = self.get_mut_node(hash) {
|
||||
node.add_voter(validator_index);
|
||||
} else {
|
||||
self.add_node(hash, vec![validator_index])?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn add_node(&mut self, hash: Hash256, voters: Vec<usize>) -> Result<()> {
|
||||
// Find the highest (by slot) ancestor of the given hash/block that is in the reduced tree.
|
||||
let mut prev_in_tree = {
|
||||
let hash = self
|
||||
.find_prev_in_tree(hash)
|
||||
.ok_or_else(|| Error::NotInTree(hash))?;
|
||||
self.get_mut_node(hash)?.clone()
|
||||
};
|
||||
|
||||
let mut node = Node {
|
||||
block_hash: hash,
|
||||
parent_hash: Some(prev_in_tree.block_hash),
|
||||
voters,
|
||||
..Node::default()
|
||||
};
|
||||
|
||||
if prev_in_tree.does_not_have_children() {
|
||||
node.parent_hash = Some(prev_in_tree.block_hash);
|
||||
prev_in_tree.children.push(hash);
|
||||
} else {
|
||||
for &child_hash in &prev_in_tree.children {
|
||||
let ancestor_hash = self.find_least_common_ancestor(hash, child_hash)?;
|
||||
if ancestor_hash != prev_in_tree.block_hash {
|
||||
let child = self.get_mut_node(child_hash)?;
|
||||
let common_ancestor = Node {
|
||||
block_hash: ancestor_hash,
|
||||
parent_hash: Some(prev_in_tree.block_hash),
|
||||
..Node::default()
|
||||
};
|
||||
child.parent_hash = Some(common_ancestor.block_hash);
|
||||
node.parent_hash = Some(common_ancestor.block_hash);
|
||||
|
||||
self.nodes
|
||||
.insert(common_ancestor.block_hash, common_ancestor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update `prev_in_tree`. A mutable reference was not maintained to satisfy the borrow
|
||||
// checker.
|
||||
//
|
||||
// This is not an ideal solution and results in unnecessary memory copies -- a better
|
||||
// solution is certainly possible.
|
||||
self.nodes.insert(prev_in_tree.block_hash, prev_in_tree);
|
||||
self.nodes.insert(hash, node);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// For the given block `hash`, find it's highest (by slot) ancestor that exists in the reduced
|
||||
/// tree.
|
||||
fn find_prev_in_tree(&mut self, hash: Hash256) -> Option<Hash256> {
|
||||
self.iter_ancestors(hash)
|
||||
.ok()?
|
||||
.find(|(root, _slit)| self.get_node(*root).is_ok())
|
||||
.and_then(|(root, _slot)| Some(root))
|
||||
}
|
||||
|
||||
/// For the given `child` block hash, return the block's ancestor at the given `target` slot.
|
||||
fn find_ancestor_at_slot(&self, child: Hash256, target: Slot) -> Result<Hash256> {
|
||||
let (root, slot) = self
|
||||
.iter_ancestors(child)?
|
||||
.find(|(_block, slot)| *slot <= target)
|
||||
.ok_or_else(|| Error::NotInTree(child))?;
|
||||
|
||||
// Explicitly check that the slot is the target in the case that the given child has a slot
|
||||
// above target.
|
||||
if slot == target {
|
||||
Ok(root)
|
||||
} else {
|
||||
Err(Error::NotInTree(child))
|
||||
}
|
||||
}
|
||||
|
||||
/// For the two given block roots (`a_root` and `b_root`), find the first block they share in
|
||||
/// the tree. Viz, find the block that these two distinct blocks forked from.
|
||||
fn find_least_common_ancestor(&self, a_root: Hash256, b_root: Hash256) -> Result<Hash256> {
|
||||
// If the blocks behind `a_root` and `b_root` are not at the same slot, take the highest
|
||||
// block (by slot) down to be equal with the lower slot.
|
||||
//
|
||||
// The result is two roots which identify two blocks at the same height.
|
||||
let (a_root, b_root) = {
|
||||
let a = self.get_block(a_root)?;
|
||||
let b = self.get_block(b_root)?;
|
||||
|
||||
if a.slot > b.slot {
|
||||
(self.find_ancestor_at_slot(a_root, b.slot)?, b_root)
|
||||
} else if b.slot > a.slot {
|
||||
(a_root, self.find_ancestor_at_slot(b_root, a.slot)?)
|
||||
} else {
|
||||
(a_root, b_root)
|
||||
}
|
||||
};
|
||||
|
||||
let ((a_root, _a_slot), (_b_root, _b_slot)) = self
|
||||
.iter_ancestors(a_root)?
|
||||
.zip(self.iter_ancestors(b_root)?)
|
||||
.find(|((a_root, _), (b_root, _))| a_root == b_root)
|
||||
.ok_or_else(|| Error::NoCommonAncestor((a_root, b_root)))?;
|
||||
|
||||
Ok(a_root)
|
||||
}
|
||||
|
||||
fn iter_ancestors(&self, child: Hash256) -> Result<BlockRootsIterator<E, T>> {
|
||||
let block = self.get_block(child)?;
|
||||
let state = self.get_state(block.state_root)?;
|
||||
|
||||
Ok(BlockRootsIterator::new(
|
||||
self.store.clone(),
|
||||
state,
|
||||
block.slot,
|
||||
))
|
||||
}
|
||||
|
||||
fn get_node(&self, hash: Hash256) -> Result<&Node> {
|
||||
self.nodes
|
||||
.get(&hash)
|
||||
.ok_or_else(|| Error::MissingNode(hash))
|
||||
}
|
||||
|
||||
fn get_mut_node(&mut self, hash: Hash256) -> Result<&mut Node> {
|
||||
self.nodes
|
||||
.get_mut(&hash)
|
||||
.ok_or_else(|| Error::MissingNode(hash))
|
||||
}
|
||||
|
||||
fn get_block(&self, block_root: Hash256) -> Result<BeaconBlock> {
|
||||
self.store
|
||||
.get::<BeaconBlock>(&block_root)?
|
||||
.ok_or_else(|| Error::MissingBlock(block_root))
|
||||
}
|
||||
|
||||
fn get_state(&self, state_root: Hash256) -> Result<BeaconState<E>> {
|
||||
self.store
|
||||
.get::<BeaconState<E>>(&state_root)?
|
||||
.ok_or_else(|| Error::MissingState(state_root))
|
||||
}
|
||||
|
||||
/*
|
||||
fn exists_above_height(&self, hash: Hash256, height: Height) -> Option<bool> {
|
||||
let ancestor_at_height = self.find_ancestor_at_height(hash, height)?;
|
||||
let blocks_at_height = self.blocks_at_height.get(&height)?;
|
||||
|
||||
Some(blocks_at_height.contains(&ancestor_at_height))
|
||||
}
|
||||
|
||||
fn exists_between_heights(&self, hash: Hash256, range: Range<Height>) -> Option<bool> {
|
||||
let low_blocks = self.blocks_at_height.get(&range.start)?;
|
||||
let high_blocks = self.blocks_at_height.get(&range.end)?;
|
||||
|
||||
let low_ancestor = self.find_ancestor_at_height(hash, range.start)?;
|
||||
let high_ancestor = self.find_ancestor_at_height(hash, range.end)?;
|
||||
|
||||
Some(low_blocks.contains(&low_ancestor) && !high_blocks.contains(&high_ancestor))
|
||||
}
|
||||
|
||||
fn find_ancestor_at_height(&self, child: Hash256, height: Height) -> Option<Hash256> {
|
||||
self.find_ancestor_at_slot(child, self.slot_at_height(height)?)
|
||||
}
|
||||
|
||||
fn slot_at_height(&self, height: Height) -> Option<Slot> {
|
||||
self.slots_at_height.nth(height).cloned()
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
pub struct SortedList<K>(BTreeMap<K, ()>);
|
||||
|
||||
impl<K: Ord> SortedList<K> {
|
||||
pub fn new() -> Self {
|
||||
SortedList(BTreeMap::new())
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, key: K) {
|
||||
self.0.insert(key, ());
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub fn nth(&self, n: usize) -> Option<&K> {
|
||||
self.0.iter().nth(n).and_then(|(k, _v)| Some(k))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct ElasticList<T>(Vec<T>);
|
||||
|
||||
impl<T> ElasticList<T>
|
||||
where
|
||||
T: Default,
|
||||
{
|
||||
fn ensure(&mut self, i: usize) {
|
||||
if self.0.len() <= i {
|
||||
self.0.resize_with(i + 1, Default::default);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(&mut self, i: usize) -> &T {
|
||||
self.ensure(i);
|
||||
&self.0[i]
|
||||
}
|
||||
|
||||
pub fn get_mut(&mut self, i: usize) -> &mut T {
|
||||
self.ensure(i);
|
||||
&mut self.0[i]
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, i: usize, element: T) {
|
||||
self.ensure(i);
|
||||
self.0[i] = element;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
#[derive(Default, Clone, Debug)]
|
||||
pub struct Block {
|
||||
pub slot: Slot,
|
||||
ancestor_skip_list: [Hash256; SKIP_LIST_LEN],
|
||||
}
|
||||
|
||||
pub type Store = HashMap<Hash256, Block>;
|
||||
|
||||
pub struct SortedList<K>(BTreeMap<K, ()>);
|
||||
|
||||
impl<K: Ord> SortedList<K> {
|
||||
pub fn new() -> Self {
|
||||
SortedList(BTreeMap::new())
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, key: K) {
|
||||
self.0.insert(key, ());
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub fn nth(&self, n: usize) -> Option<&K> {
|
||||
self.0.iter().nth(n).and_then(|(k, _v)| Some(k))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn new() {
|
||||
let genesis_root = Hash256::random();
|
||||
let genesis_slot = 0;
|
||||
|
||||
let _t = Tree::new(genesis_root, genesis_slot);
|
||||
}
|
||||
|
||||
/// Creates a new "hash" from the `u64`.
|
||||
///
|
||||
/// Does not _actually_ perform a hash, just generates bytes that are some serialization of the
|
||||
/// the `u64`.
|
||||
fn get_hash(i: u64) -> Hash256 {
|
||||
Hash256::from_low_u64_le(i)
|
||||
}
|
||||
|
||||
fn hash_to_u64(hash: Hash256) -> u64 {
|
||||
hash.to_low_u64_le()
|
||||
}
|
||||
|
||||
fn store_chain(store: &mut Store, roots: &[Hash256], slots: &[Slot]) {
|
||||
for i in 0..roots.len() {
|
||||
let mut block = Block::default();
|
||||
block.slot = slots[i];
|
||||
|
||||
// Build the skip list.
|
||||
for j in 0..SKIP_LIST_LEN {
|
||||
let skip = 2_usize.pow(j as u32);
|
||||
block.ancestor_skip_list[j as usize] = roots[i.saturating_sub(skip)];
|
||||
}
|
||||
|
||||
store.insert(roots[i as usize], block);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn common_ancestor() {
|
||||
let common_chain_len = (2_u64 << SKIP_LIST_LEN) - 3;
|
||||
let forked_blocks = 2_u64 << SKIP_LIST_LEN;
|
||||
|
||||
let common_roots: Vec<Hash256> = (0..common_chain_len).map(get_hash).collect();
|
||||
let common_slots: Vec<Slot> = (0..common_chain_len).collect();
|
||||
|
||||
let mut fork_a_roots = common_roots.clone();
|
||||
fork_a_roots.append(
|
||||
&mut (common_chain_len..common_chain_len + forked_blocks)
|
||||
.map(get_hash)
|
||||
.collect(),
|
||||
);
|
||||
let mut fork_a_slots = common_slots.clone();
|
||||
fork_a_slots.append(&mut (common_chain_len..common_chain_len + forked_blocks).collect());
|
||||
|
||||
let mut fork_b_roots = common_roots.clone();
|
||||
fork_b_roots.append(
|
||||
&mut (common_chain_len..common_chain_len + forked_blocks)
|
||||
.map(|i| get_hash(i * 10))
|
||||
.collect(),
|
||||
);
|
||||
let mut fork_b_slots = common_slots.clone();
|
||||
fork_b_slots.append(&mut (common_chain_len..common_chain_len + forked_blocks).collect());
|
||||
|
||||
let fork_a_head = *fork_a_roots.iter().last().unwrap();
|
||||
let fork_b_head = *fork_b_roots.iter().last().unwrap();
|
||||
|
||||
let mut store = Store::default();
|
||||
store_chain(&mut store, &fork_a_roots, &fork_a_slots);
|
||||
store_chain(&mut store, &fork_b_roots, &fork_b_slots);
|
||||
|
||||
assert_eq!(
|
||||
find_least_common_ancestor(fork_a_head, fork_b_head, &store)
|
||||
.and_then(|i| Some(hash_to_u64(i))),
|
||||
Some(hash_to_u64(*common_roots.iter().last().unwrap()))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_at_slot() {
|
||||
let n = 2_u64.pow(SKIP_LIST_LEN as u32) * 2;
|
||||
let mut store = Store::default();
|
||||
|
||||
let roots: Vec<Hash256> = (0..n).map(get_hash).collect();
|
||||
let slots: Vec<Slot> = (0..n).collect();
|
||||
|
||||
store_chain(&mut store, &roots, &slots);
|
||||
|
||||
for i in 0..n - 1 {
|
||||
let key = roots.last().unwrap();
|
||||
|
||||
assert_eq!(
|
||||
get_ancestor_hash_at_slot(i as u64, *key, &store),
|
||||
Some(get_hash(i as u64))
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
Loading…
Reference in New Issue
Block a user