Add proto_array fork choice (#804)

* Start implementing proto_array

* Add progress

* Add unfinished progress

* Add further progress

* Add progress

* Add tree filtering

* Add half-finished modifications

* Add refactored version

* Tidy, add incomplete LmdGhost impl

* Move impls in LmdGhost trait def

* Remove old reduced_tree fork choice

* Combine two functions in to `compute_deltas`

* Start testing

* Add more compute_deltas tests

* Add fork choice testing

* Add more fork choice testing

* Add more fork choice tests

* Add more testing to proto-array

* Remove old tests

* Modify tests

* Add more tests

* Add more testing

* Add comments and fixes

* Re-organise crate

* Tidy, finish pruning tests

* Add ssz encoding, other pub fns

* Rename lmd_ghost > proto_array_fork_choice

* Integrate proto_array into lighthouse

* Add first pass at fixing filter

* Clean out old comments

* Add more comments

* Attempt to fix prune error

* Adjust TODO

* Fix test compile errors

* Add extra justification change check

* Update cargo.lock

* Fix fork choice test compile errors

* Most remove ffg_update_required

* Fix bug with epoch of attestation votes

* Start adding new test format

* Make fork choice tests declarative

* Create test def concept

* Move test defs into crate

* Add binary, re-org crate

* Shuffle files

* Start adding ffg tests

* Add more fork choice tests

* Add fork choice JSON dumping

* Add more detail to best node error

* Ensure fin+just checkpoints from from same block

* Rename JustificationManager

* Move checkpoint manager into own file

* Tidy

* Add targetted logging for sneaky sync bug

* Fix justified balances bug

* Add cache metrics

* Add metrics for log levels

* Fix bug in checkpoint manager

* Fix compile error in fork choice tests

* Ignore duplicate blocks in fork choice

* Add block to fock choice before db

* Rename on_new_block fn

* Fix spec inconsistency in `CheckpointManager`

* Remove BlockRootTree

* Remove old reduced_tree code fragment

* Add API endpoint for fork choice

* Add more ffg tests

* Remove block_root_tree reminents

* Ensure effective balances are used

* Remove old debugging code, fix API fault

* Add check to ensure parent block is in fork choice

* Update readme dates

* Fix readme

* Tidy checkpoint manager

* Remove fork choice yaml files from repo

* Remove fork choice yaml from repo

* General tidy

* Address majority of Michael's comments

* Tidy bin/lib business

* Remove dangling file

* Undo changes for rpc/handler from master

* Revert "Undo changes for rpc/handler from master"

This reverts commit 876edff0e4a501aafbb47113454852826dcc24e8.

Co-authored-by: Age Manning <Age@AgeManning.com>
This commit is contained in:
Paul Hauner 2020-01-29 15:05:00 +11:00 committed by GitHub
parent cd401147ea
commit b771bbb60c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
48 changed files with 3469 additions and 2332 deletions

42
Cargo.lock generated
View File

@ -215,11 +215,11 @@ dependencies = [
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"lighthouse_bootstrap 0.1.0",
"lighthouse_metrics 0.1.0",
"lmd_ghost 0.1.0",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"merkle_proof 0.1.0",
"operation_pool 0.1.0",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"proto_array_fork_choice 0.1.0",
"rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)",
@ -515,7 +515,6 @@ dependencies = [
"futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
"genesis 0.1.0",
"lighthouse_bootstrap 0.1.0",
"lmd_ghost 0.1.0",
"network 0.1.0",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"prometheus 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2395,27 +2394,6 @@ name = "linked-hash-map"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "lmd_ghost"
version = "0.1.0"
dependencies = [
"beacon_chain 0.1.0",
"bls 0.1.0",
"criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
"eth2_ssz 0.1.2",
"eth2_ssz_derive 0.1.0",
"hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
"slot_clock 0.1.0",
"store 0.1.0",
"types 0.1.0",
"yaml-rust 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "lock_api"
version = "0.1.5"
@ -2461,6 +2439,8 @@ dependencies = [
name = "logging"
version = "0.1.0"
dependencies = [
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"lighthouse_metrics 0.1.0",
"slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
"slog-term 2.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -3026,6 +3006,20 @@ dependencies = [
"spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "proto_array_fork_choice"
version = "0.1.0"
dependencies = [
"eth2_ssz 0.1.2",
"eth2_ssz_derive 0.1.0",
"itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)",
"types 0.1.0",
]
[[package]]
name = "protobuf"
version = "2.8.1"
@ -3354,6 +3348,7 @@ dependencies = [
"eth2_ssz 0.1.2",
"futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
"hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"proto_array_fork_choice 0.1.0",
"reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)",
"rest_api 0.1.0",
"serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)",
@ -3933,7 +3928,6 @@ dependencies = [
"integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"lmd_ghost 0.1.0",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"merkle_proof 0.1.0",
"rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -1,6 +1,6 @@
[workspace]
members = [
"eth2/lmd_ghost",
"eth2/proto_array_fork_choice",
"eth2/operation_pool",
"eth2/state_processing",
"eth2/types",

View File

@ -49,11 +49,10 @@ Current development overview:
- ~~**April 2019**: Inital single-client testnets.~~
- ~~**September 2019**: Inter-operability with other Ethereum 2.0 clients.~~
- **Q4 2019**: `lighthouse-0.0.1` release: All major phase 0
features implemented.
- **Q4 2019**: Public, multi-client testnet with user-facing functionality.
- **Q4 2019**: Third-party security review.
- **Q1 2020**: Production Beacon Chain testnet (tentative).
- ~~ **Q1 2020**: `lighthouse-0.1.0` release: All major phase 0 features implemented.~~
- **Q1 2020**: Public, multi-client testnet with user-facing functionality.
- **Q2 2020**: Third-party security review.
- **Q3 2020**: Production Beacon Chain testnet (tentative).
## Documentation

View File

@ -33,7 +33,6 @@ eth2_ssz_derive = "0.1.0"
state_processing = { path = "../../eth2/state_processing" }
tree_hash = "0.1.0"
types = { path = "../../eth2/types" }
lmd_ghost = { path = "../../eth2/lmd_ghost" }
eth1 = { path = "../eth1" }
websocket_server = { path = "../websocket_server" }
futures = "0.1.25"
@ -41,6 +40,7 @@ exit-future = "0.1.3"
genesis = { path = "../genesis" }
integer-sqrt = "0.1"
rand = "0.7.2"
proto_array_fork_choice = { path = "../../eth2/proto_array_fork_choice" }
[dev-dependencies]
tempfile = "3.1.0"

View File

@ -8,7 +8,6 @@ use crate::head_tracker::HeadTracker;
use crate::metrics;
use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY};
use crate::timeout_rw_lock::TimeoutRwLock;
use lmd_ghost::LmdGhost;
use operation_pool::{OperationPool, PersistedOperationPool};
use slog::{debug, error, info, trace, warn, Logger};
use slot_clock::SlotClock;
@ -32,7 +31,7 @@ use std::time::{Duration, Instant};
use store::iter::{
BlockRootsIterator, ReverseBlockRootIterator, ReverseStateRootIterator, StateRootsIterator,
};
use store::{BlockRootTree, Error as DBError, Migrate, Store};
use store::{Error as DBError, Migrate, Store};
use tree_hash::TreeHash;
use types::*;
@ -59,8 +58,11 @@ const HEAD_LOCK_TIMEOUT: Duration = Duration::from_secs(1);
pub enum BlockProcessingOutcome {
/// Block was valid and imported into the block graph.
Processed { block_root: Hash256 },
/// The blocks parent_root is unknown.
ParentUnknown { parent: Hash256 },
/// The parent block was unknown.
ParentUnknown {
parent: Hash256,
reference_location: &'static str,
},
/// The block slot is greater than the present slot.
FutureSlot {
present_slot: Slot,
@ -116,7 +118,6 @@ pub trait BeaconChainTypes: Send + Sync + 'static {
type Store: store::Store<Self::EthSpec>;
type StoreMigrator: store::Migrate<Self::Store, Self::EthSpec>;
type SlotClock: slot_clock::SlotClock;
type LmdGhost: LmdGhost<Self::Store, Self::EthSpec>;
type Eth1Chain: Eth1ChainBackend<Self::EthSpec, Self::Store>;
type EthSpec: types::EthSpec;
type EventHandler: EventHandler<Self::EthSpec>;
@ -150,8 +151,6 @@ pub struct BeaconChain<T: BeaconChainTypes> {
pub(crate) head_tracker: HeadTracker,
/// Provides a small cache of `BeaconState` and `BeaconBlock`.
pub(crate) checkpoint_cache: CheckPointCache<T::EthSpec>,
/// Cache of block roots for all known forks post-finalization.
pub block_root_tree: Arc<BlockRootTree>,
/// Logging to CLI, etc.
pub(crate) log: Logger,
}
@ -192,7 +191,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
ssz_head_tracker: self.head_tracker.to_ssz_container(),
fork_choice: self.fork_choice.as_ssz_container(),
eth1_cache: self.eth1_chain.as_ref().map(|x| x.as_ssz_container()),
block_root_tree: self.block_root_tree.as_ssz_container(),
};
let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes());
@ -1063,14 +1061,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
{
// Provide the attestation to fork choice, updating the validator latest messages but
// _without_ finding and updating the head.
if let Err(e) = self
.fork_choice
.process_attestation(&state, &attestation, block)
{
if let Err(e) = self.fork_choice.process_attestation(&state, &attestation) {
error!(
self.log,
"Add attestation to fork choice failed";
"fork_choice_integrity" => format!("{:?}", self.fork_choice.verify_integrity()),
"beacon_block_root" => format!("{}", attestation.data.beacon_block_root),
"error" => format!("{:?}", e)
);
@ -1232,6 +1226,23 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
});
}
// Reject any block if its parent is not known to fork choice.
//
// A block that is not in fork choice is either:
//
// - Not yet imported: we should reject this block because we should only import a child
// after its parent has been fully imported.
// - Pre-finalized: if the parent block is _prior_ to finalization, we should ignore it
// because it will revert finalization. Note that the finalized block is stored in fork
// choice, so we will not reject any child of the finalized block (this is relevant during
// genesis).
if !self.fork_choice.contains_block(&block.parent_root) {
return Ok(BlockProcessingOutcome::ParentUnknown {
parent: block.parent_root,
reference_location: "fork_choice",
});
}
let block_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOCK_ROOT);
let block_root = block.canonical_root();
@ -1252,8 +1263,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}
// Check if the block is already known. We know it is post-finalization, so it is
// sufficient to check the block root tree.
if self.block_root_tree.is_known_block_root(&block_root) {
// sufficient to check the fork choice.
if self.fork_choice.contains_block(&block_root) {
return Ok(BlockProcessingOutcome::BlockIsAlreadyKnown);
}
@ -1269,6 +1280,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
None => {
return Ok(BlockProcessingOutcome::ParentUnknown {
parent: block.parent_root,
reference_location: "database",
});
}
};
@ -1363,6 +1375,24 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
});
}
let fork_choice_register_timer =
metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE_REGISTER);
// Register the new block with the fork choice service.
if let Err(e) = self
.fork_choice
.process_block(self, &state, &block, block_root)
{
error!(
self.log,
"Add block to fork choice failed";
"block_root" => format!("{}", block_root),
"error" => format!("{:?}", e),
)
}
metrics::stop_timer(fork_choice_register_timer);
let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE);
// Store all the states between the parent block state and this blocks slot before storing
@ -1392,30 +1422,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
metrics::stop_timer(db_write_timer);
self.block_root_tree
.add_block_root(block_root, block.parent_root, block.slot)?;
self.head_tracker.register_block(block_root, &block);
let fork_choice_register_timer =
metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE_REGISTER);
// Register the new block with the fork choice service.
if let Err(e) = self
.fork_choice
.process_block(self, &state, &block, block_root)
{
error!(
self.log,
"Add block to fork choice failed";
"fork_choice_integrity" => format!("{:?}", self.fork_choice.verify_integrity()),
"block_root" => format!("{}", block_root),
"error" => format!("{:?}", e),
)
}
metrics::stop_timer(fork_choice_register_timer);
metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES);
metrics::observe(
&metrics::OPERATIONS_PER_BLOCK_ATTESTATION,
@ -1706,8 +1714,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
new_epoch: new_finalized_epoch,
})
} else {
self.fork_choice
.process_finalization(&finalized_block, finalized_block_root)?;
self.fork_choice.prune()?;
let finalized_state = self
.get_state_caching_only_with_committee_caches(
@ -1726,12 +1733,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
max_finality_distance,
);
// Prune in-memory block root tree.
self.block_root_tree.prune_to(
finalized_block_root,
self.heads().into_iter().map(|(block_root, _)| block_root),
);
let _ = self.event_handler.register(EventKind::BeaconFinalization {
epoch: new_finalized_epoch,
root: finalized_block_root,

View File

@ -9,54 +9,35 @@ use crate::{
ForkChoice,
};
use eth1::Config as Eth1Config;
use lmd_ghost::{LmdGhost, ThreadSafeReducedTree};
use operation_pool::OperationPool;
use proto_array_fork_choice::ProtoArrayForkChoice;
use slog::{info, Logger};
use slot_clock::{SlotClock, TestingSlotClock};
use std::marker::PhantomData;
use std::sync::Arc;
use std::time::Duration;
use store::{BlockRootTree, Store};
use store::Store;
use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Slot};
/// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing
/// functionality and only exists to satisfy the type system.
pub struct Witness<
TStore,
TStoreMigrator,
TSlotClock,
TLmdGhost,
TEth1Backend,
TEthSpec,
TEventHandler,
>(
pub struct Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>(
PhantomData<(
TStore,
TStoreMigrator,
TSlotClock,
TLmdGhost,
TEth1Backend,
TEthSpec,
TEventHandler,
)>,
);
impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
BeaconChainTypes
for Witness<
TStore,
TStoreMigrator,
TSlotClock,
TLmdGhost,
TEth1Backend,
TEthSpec,
TEventHandler,
>
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler> BeaconChainTypes
for Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
where
TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
TSlotClock: SlotClock + 'static,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static,
@ -64,7 +45,6 @@ where
type Store = TStore;
type StoreMigrator = TStoreMigrator;
type SlotClock = TSlotClock;
type LmdGhost = TLmdGhost;
type Eth1Chain = TEth1Backend;
type EthSpec = TEthSpec;
type EventHandler = TEventHandler;
@ -92,28 +72,18 @@ pub struct BeaconChainBuilder<T: BeaconChainTypes> {
slot_clock: Option<T::SlotClock>,
persisted_beacon_chain: Option<PersistedBeaconChain<T>>,
head_tracker: Option<HeadTracker>,
block_root_tree: Option<Arc<BlockRootTree>>,
spec: ChainSpec,
log: Option<Logger>,
}
impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
BeaconChainBuilder<
Witness<
TStore,
TStoreMigrator,
TSlotClock,
TLmdGhost,
TEth1Backend,
TEthSpec,
TEventHandler,
>,
Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>,
>
where
TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
TSlotClock: SlotClock + 'static,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static,
@ -135,7 +105,6 @@ where
slot_clock: None,
persisted_beacon_chain: None,
head_tracker: None,
block_root_tree: None,
spec: TEthSpec::default_spec(),
log: None,
}
@ -194,15 +163,7 @@ where
let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes());
let p: PersistedBeaconChain<
Witness<
TStore,
TStoreMigrator,
TSlotClock,
TLmdGhost,
TEth1Backend,
TEthSpec,
TEventHandler,
>,
Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>,
> = match store.get(&key) {
Err(e) => {
return Err(format!(
@ -230,7 +191,6 @@ where
Some(cache) => Some(Eth1Chain::from_ssz_container(cache, config, store, log)?),
None => None,
};
self.block_root_tree = Some(Arc::new(p.block_root_tree.clone().into()));
self.persisted_beacon_chain = Some(p);
Ok(self)
@ -273,11 +233,6 @@ where
)
})?;
self.block_root_tree = Some(Arc::new(BlockRootTree::new(
beacon_block_root,
beacon_block.slot,
)));
self.finalized_checkpoint = Some(CheckPoint {
beacon_block_root,
beacon_block,
@ -327,15 +282,7 @@ where
self,
) -> Result<
BeaconChain<
Witness<
TStore,
TStoreMigrator,
TSlotClock,
TLmdGhost,
TEth1Backend,
TEthSpec,
TEventHandler,
>,
Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>,
>,
String,
> {
@ -387,9 +334,6 @@ where
.event_handler
.ok_or_else(|| "Cannot build without an event handler".to_string())?,
head_tracker: self.head_tracker.unwrap_or_default(),
block_root_tree: self
.block_root_tree
.ok_or_else(|| "Cannot build without a block root tree".to_string())?,
checkpoint_cache: CheckPointCache::default(),
log: log.clone(),
};
@ -412,15 +356,7 @@ where
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
BeaconChainBuilder<
Witness<
TStore,
TStoreMigrator,
TSlotClock,
ThreadSafeReducedTree<TStore, TEthSpec>,
TEth1Backend,
TEthSpec,
TEventHandler,
>,
Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>,
>
where
TStore: Store<TEthSpec> + 'static,
@ -435,23 +371,9 @@ where
/// If this builder is being "resumed" from disk, then rebuild the last fork choice stored to
/// the database. Otherwise, create a new, empty fork choice.
pub fn reduced_tree_fork_choice(mut self) -> Result<Self, String> {
let store = self
.store
.clone()
.ok_or_else(|| "reduced_tree_fork_choice requires a store")?;
let block_root_tree = self
.block_root_tree
.clone()
.ok_or_else(|| "reduced_tree_fork_choice requires a block root tree")?;
let fork_choice = if let Some(persisted_beacon_chain) = &self.persisted_beacon_chain {
ForkChoice::from_ssz_container(
persisted_beacon_chain.fork_choice.clone(),
store,
block_root_tree,
)
.map_err(|e| format!("Unable to decode fork choice from db: {:?}", e))?
ForkChoice::from_ssz_container(persisted_beacon_chain.fork_choice.clone())
.map_err(|e| format!("Unable to decode fork choice from db: {:?}", e))?
} else {
let finalized_checkpoint = &self
.finalized_checkpoint
@ -461,14 +383,22 @@ where
.genesis_block_root
.ok_or_else(|| "fork_choice_backend requires a genesis_block_root")?;
let backend = ThreadSafeReducedTree::new(
store,
block_root_tree,
&finalized_checkpoint.beacon_block,
let backend = ProtoArrayForkChoice::new(
finalized_checkpoint.beacon_block.slot,
// Note: here we set the `justified_epoch` to be the same as the epoch of the
// finalized checkpoint. Whilst this finalized checkpoint may actually point to
// a _later_ justified checkpoint, that checkpoint won't yet exist in the fork
// choice.
finalized_checkpoint.beacon_state.current_epoch(),
finalized_checkpoint.beacon_state.current_epoch(),
finalized_checkpoint.beacon_block_root,
);
)?;
ForkChoice::new(backend, genesis_block_root, self.spec.genesis_slot)
ForkChoice::new(
backend,
genesis_block_root,
&finalized_checkpoint.beacon_state,
)
};
self.fork_choice = Some(fork_choice);
@ -477,13 +407,12 @@ where
}
}
impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEthSpec, TEventHandler>
impl<TStore, TStoreMigrator, TSlotClock, TEthSpec, TEventHandler>
BeaconChainBuilder<
Witness<
TStore,
TStoreMigrator,
TSlotClock,
TLmdGhost,
CachingEth1Backend<TEthSpec, TStore>,
TEthSpec,
TEventHandler,
@ -493,7 +422,6 @@ where
TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
TSlotClock: SlotClock + 'static,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static,
{
@ -529,22 +457,13 @@ where
}
}
impl<TStore, TStoreMigrator, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
impl<TStore, TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler>
BeaconChainBuilder<
Witness<
TStore,
TStoreMigrator,
TestingSlotClock,
TLmdGhost,
TEth1Backend,
TEthSpec,
TEventHandler,
>,
Witness<TStore, TStoreMigrator, TestingSlotClock, TEth1Backend, TEthSpec, TEventHandler>,
>
where
TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static,
@ -570,13 +489,12 @@ where
}
}
impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec>
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec>
BeaconChainBuilder<
Witness<
TStore,
TStoreMigrator,
TSlotClock,
TLmdGhost,
TEth1Backend,
TEthSpec,
NullEventHandler<TEthSpec>,
@ -586,7 +504,6 @@ where
TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
TSlotClock: SlotClock + 'static,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
TEthSpec: EthSpec + 'static,
{

View File

@ -6,7 +6,6 @@ use state_processing::per_block_processing::errors::AttestationValidationError;
use state_processing::BlockProcessingError;
use state_processing::SlotProcessingError;
use std::time::Duration;
use store::block_root_tree::BlockRootTreeError;
use types::*;
macro_rules! easy_from_to {
@ -51,13 +50,11 @@ pub enum BeaconChainError {
InvariantViolated(String),
SszTypesError(SszTypesError),
CanonicalHeadLockTimeout,
BlockRootTreeError(BlockRootTreeError),
}
easy_from_to!(SlotProcessingError, BeaconChainError);
easy_from_to!(AttestationValidationError, BeaconChainError);
easy_from_to!(SszTypesError, BeaconChainError);
easy_from_to!(BlockRootTreeError, BeaconChainError);
#[derive(Debug, PartialEq)]
pub enum BlockProductionError {

View File

@ -1,13 +1,14 @@
mod checkpoint_manager;
use crate::{errors::BeaconChainError, metrics, BeaconChain, BeaconChainTypes};
use lmd_ghost::LmdGhost;
use parking_lot::RwLock;
use checkpoint_manager::{get_effective_balances, CheckpointManager, CheckpointWithBalances};
use parking_lot::{RwLock, RwLockReadGuard};
use proto_array_fork_choice::{core::ProtoArray, ProtoArrayForkChoice};
use ssz_derive::{Decode, Encode};
use state_processing::{common::get_attesting_indices, per_slot_processing};
use std::sync::Arc;
use store::{BlockRootTree, Error as StoreError, Store};
use types::{
Attestation, BeaconBlock, BeaconState, BeaconStateError, Checkpoint, EthSpec, Hash256, Slot,
};
use state_processing::common::get_attesting_indices;
use std::marker::PhantomData;
use store::Error as StoreError;
use types::{Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, Hash256};
type Result<T> = std::result::Result<T, Error>;
@ -19,27 +20,29 @@ pub enum Error {
BeaconStateError(BeaconStateError),
StoreError(StoreError),
BeaconChainError(Box<BeaconChainError>),
UnknownBlockSlot(Hash256),
UnknownJustifiedBlock(Hash256),
UnknownJustifiedState(Hash256),
UnableToJsonEncode(String),
}
pub struct ForkChoice<T: BeaconChainTypes> {
backend: T::LmdGhost,
backend: ProtoArrayForkChoice,
/// Used for resolving the `0x00..00` alias back to genesis.
///
/// Does not necessarily need to be the _actual_ genesis, it suffices to be the finalized root
/// whenever the struct was instantiated.
genesis_block_root: Hash256,
/// The fork choice rule's current view of the justified checkpoint.
justified_checkpoint: RwLock<Checkpoint>,
/// The best justified checkpoint we've seen, which may be ahead of `justified_checkpoint`.
best_justified_checkpoint: RwLock<Checkpoint>,
checkpoint_manager: RwLock<CheckpointManager>,
_phantom: PhantomData<T>,
}
impl<T: BeaconChainTypes> PartialEq for ForkChoice<T> {
/// This implementation ignores the `store`.
fn eq(&self, other: &Self) -> bool {
self.backend == other.backend
&& self.genesis_block_root == other.genesis_block_root
&& *self.justified_checkpoint.read() == *other.justified_checkpoint.read()
&& *self.best_justified_checkpoint.read() == *other.best_justified_checkpoint.read()
&& *self.checkpoint_manager.read() == *other.checkpoint_manager.read()
}
}
@ -48,122 +51,48 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
///
/// "Genesis" does not necessarily need to be the absolute genesis, it can be some finalized
/// block.
pub fn new(backend: T::LmdGhost, genesis_block_root: Hash256, genesis_slot: Slot) -> Self {
let justified_checkpoint = Checkpoint {
epoch: genesis_slot.epoch(T::EthSpec::slots_per_epoch()),
pub fn new(
backend: ProtoArrayForkChoice,
genesis_block_root: Hash256,
genesis_state: &BeaconState<T::EthSpec>,
) -> Self {
let genesis_checkpoint = CheckpointWithBalances {
epoch: genesis_state.current_epoch(),
root: genesis_block_root,
balances: get_effective_balances(genesis_state),
};
Self {
backend,
genesis_block_root,
justified_checkpoint: RwLock::new(justified_checkpoint.clone()),
best_justified_checkpoint: RwLock::new(justified_checkpoint),
checkpoint_manager: RwLock::new(CheckpointManager::new(genesis_checkpoint)),
_phantom: PhantomData,
}
}
/// Determine whether the fork choice's view of the justified checkpoint should be updated.
///
/// To prevent the bouncing attack, an update is allowed only in these conditions:
///
/// * We're in the first SAFE_SLOTS_TO_UPDATE_JUSTIFIED slots of the epoch, or
/// * The new justified checkpoint is a descendant of the current justified checkpoint
fn should_update_justified_checkpoint(
&self,
chain: &BeaconChain<T>,
new_justified_checkpoint: &Checkpoint,
) -> Result<bool> {
if Self::compute_slots_since_epoch_start(chain.slot()?)
< chain.spec.safe_slots_to_update_justified
{
return Ok(true);
}
let justified_checkpoint = self.justified_checkpoint.read().clone();
let current_justified_block = chain
.get_block(&justified_checkpoint.root)?
.ok_or_else(|| Error::MissingBlock(justified_checkpoint.root))?;
let new_justified_block = chain
.get_block(&new_justified_checkpoint.root)?
.ok_or_else(|| Error::MissingBlock(new_justified_checkpoint.root))?;
let slots_per_epoch = T::EthSpec::slots_per_epoch();
Ok(
new_justified_block.slot > justified_checkpoint.epoch.start_slot(slots_per_epoch)
&& chain.get_ancestor_block_root(
new_justified_checkpoint.root,
current_justified_block.slot,
)? == Some(justified_checkpoint.root),
)
}
/// Calculate how far `slot` lies from the start of its epoch.
fn compute_slots_since_epoch_start(slot: Slot) -> u64 {
let slots_per_epoch = T::EthSpec::slots_per_epoch();
(slot - slot.epoch(slots_per_epoch).start_slot(slots_per_epoch)).as_u64()
}
/// Run the fork choice rule to determine the head.
pub fn find_head(&self, chain: &BeaconChain<T>) -> Result<Hash256> {
let timer = metrics::start_timer(&metrics::FORK_CHOICE_FIND_HEAD_TIMES);
let (start_state, start_block_root, start_block_slot) = {
// Check if we should update our view of the justified checkpoint.
// Doing this check here should be quasi-equivalent to the update in the `on_tick`
// function of the spec, so long as `find_head` is called at least once during the first
// SAFE_SLOTS_TO_UPDATE_JUSTIFIED slots.
let best_justified_checkpoint = self.best_justified_checkpoint.read();
if self.should_update_justified_checkpoint(chain, &best_justified_checkpoint)? {
*self.justified_checkpoint.write() = best_justified_checkpoint.clone();
}
let current_justified_checkpoint = self.justified_checkpoint.read().clone();
let (block_root, block_justified_slot) = (
current_justified_checkpoint.root,
current_justified_checkpoint
.epoch
.start_slot(T::EthSpec::slots_per_epoch()),
);
let block = chain
.store
.get::<BeaconBlock<T::EthSpec>>(&block_root)?
.ok_or_else(|| Error::MissingBlock(block_root))?;
// Resolve the `0x00.. 00` alias back to genesis
let block_root = if block_root == Hash256::zero() {
let remove_alias = |root| {
if root == Hash256::zero() {
self.genesis_block_root
} else {
block_root
};
let mut state: BeaconState<T::EthSpec> = chain
.get_state_caching_only_with_committee_caches(&block.state_root, Some(block.slot))?
.ok_or_else(|| Error::MissingState(block.state_root))?;
// Fast-forward the state to the start slot of the epoch where it was justified.
for _ in block.slot.as_u64()..block_justified_slot.as_u64() {
per_slot_processing(&mut state, None, &chain.spec)
.map_err(BeaconChainError::SlotProcessingError)?
root
}
(state, block_root, block_justified_slot)
};
// A function that returns the weight for some validator index.
let weight = |validator_index: usize| -> Option<u64> {
start_state
.validators
.get(validator_index)
.map(|v| v.effective_balance)
};
let mut manager = self.checkpoint_manager.write();
manager.maybe_update(chain.slot()?, chain)?;
let result = self
.backend
.find_head(start_block_slot, start_block_root, weight)
.find_head(
manager.current.justified.epoch,
remove_alias(manager.current.justified.root),
manager.current.finalized.epoch,
&manager.current.justified.balances,
)
.map_err(Into::into);
metrics::stop_timer(timer);
@ -171,6 +100,11 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
result
}
/// Returns true if the given block is known to fork choice.
pub fn contains_block(&self, block_root: &Hash256) -> bool {
self.backend.contains_block(block_root)
}
/// Process all attestations in the given `block`.
///
/// Assumes the block (and therefore its attestations) are valid. It is a logic error to
@ -183,36 +117,35 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
block_root: Hash256,
) -> Result<()> {
let timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES);
// Note: we never count the block as a latest message, only attestations.
//
// I (Paul H) do not have an explicit reference to this, but I derive it from this
// document:
//
// https://github.com/ethereum/eth2.0-specs/blob/v0.7.0/specs/core/0_fork-choice.md
for attestation in &block.body.attestations {
// If the `data.beacon_block_root` block is not known to us, simply ignore the latest
// vote.
if let Some(block) = chain.get_block_caching(&attestation.data.beacon_block_root)? {
self.process_attestation(state, attestation, &block)?;
}
}
// Check if we should update our view of the justified checkpoint
if state.current_justified_checkpoint.epoch > self.justified_checkpoint.read().epoch {
*self.best_justified_checkpoint.write() = state.current_justified_checkpoint.clone();
self.checkpoint_manager
.write()
.process_state(block_root, state, chain, &self.backend)?;
self.checkpoint_manager
.write()
.maybe_update(chain.slot()?, chain)?;
// Note: we never count the block as a latest message, only attestations.
for attestation in &block.body.attestations {
// If the `data.beacon_block_root` block is not known to the fork choice, simply ignore
// the vote.
if self
.should_update_justified_checkpoint(chain, &state.current_justified_checkpoint)?
.backend
.contains_block(&attestation.data.beacon_block_root)
{
*self.justified_checkpoint.write() = state.current_justified_checkpoint.clone();
self.process_attestation(state, attestation)?;
}
}
// This does not apply a vote to the block, it just makes fork choice aware of the block so
// it can still be identified as the head even if it doesn't have any votes.
//
// A case where a block without any votes can be the head is where it is the only child of
// a block that has the majority of votes applied to it.
self.backend.process_block(block, block_root)?;
self.backend.process_block(
block.slot,
block_root,
block.parent_root,
state.current_justified_checkpoint.epoch,
state.finalized_checkpoint.epoch,
)?;
metrics::stop_timer(timer);
@ -226,7 +159,6 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
&self,
state: &BeaconState<T::EthSpec>,
attestation: &Attestation<T::EthSpec>,
block: &BeaconBlock<T::EthSpec>,
) -> Result<()> {
let timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES);
@ -252,8 +184,11 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?;
for validator_index in validator_indices {
self.backend
.process_attestation(validator_index, block_hash, block.slot)?;
self.backend.process_attestation(
validator_index,
block_hash,
attestation.data.target.epoch,
)?;
}
}
@ -265,38 +200,29 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
/// Returns the latest message for a given validator, if any.
///
/// Returns `(block_root, block_slot)`.
pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> {
pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> {
self.backend.latest_message(validator_index)
}
/// Runs an integrity verification function on the underlying fork choice algorithm.
///
/// Returns `Ok(())` if the underlying fork choice has maintained it's integrity,
/// `Err(description)` otherwise.
pub fn verify_integrity(&self) -> core::result::Result<(), String> {
self.backend.verify_integrity()
/// Trigger a prune on the underlying fork choice backend.
pub fn prune(&self) -> Result<()> {
let finalized_root = self.checkpoint_manager.read().current.finalized.root;
self.backend.maybe_prune(finalized_root).map_err(Into::into)
}
/// Inform the fork choice that the given block (and corresponding root) have been finalized so
/// it may prune it's storage.
/// Returns a read-lock to the core `ProtoArray` struct.
///
/// `finalized_block_root` must be the root of `finalized_block`.
pub fn process_finalization(
&self,
finalized_block: &BeaconBlock<T::EthSpec>,
finalized_block_root: Hash256,
) -> Result<()> {
self.backend
.update_finalized_root(finalized_block, finalized_block_root)
.map_err(Into::into)
/// Should only be used when encoding/decoding during troubleshooting.
pub fn core_proto_array(&self) -> RwLockReadGuard<ProtoArray> {
self.backend.core_proto_array()
}
/// Returns a `SszForkChoice` which contains the current state of `Self`.
pub fn as_ssz_container(&self) -> SszForkChoice {
SszForkChoice {
genesis_block_root: self.genesis_block_root,
justified_checkpoint: self.justified_checkpoint.read().clone(),
best_justified_checkpoint: self.best_justified_checkpoint.read().clone(),
genesis_block_root: self.genesis_block_root.clone(),
checkpoint_manager: self.checkpoint_manager.read().clone(),
backend_bytes: self.backend.as_bytes(),
}
}
@ -304,18 +230,14 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
/// Instantiates `Self` from a prior `SszForkChoice`.
///
/// The created `Self` will have the same state as the `Self` that created the `SszForkChoice`.
pub fn from_ssz_container(
ssz_container: SszForkChoice,
store: Arc<T::Store>,
block_root_tree: Arc<BlockRootTree>,
) -> Result<Self> {
let backend = LmdGhost::from_bytes(&ssz_container.backend_bytes, store, block_root_tree)?;
pub fn from_ssz_container(ssz_container: SszForkChoice) -> Result<Self> {
let backend = ProtoArrayForkChoice::from_bytes(&ssz_container.backend_bytes)?;
Ok(Self {
backend,
genesis_block_root: ssz_container.genesis_block_root,
justified_checkpoint: RwLock::new(ssz_container.justified_checkpoint),
best_justified_checkpoint: RwLock::new(ssz_container.best_justified_checkpoint),
checkpoint_manager: RwLock::new(ssz_container.checkpoint_manager),
_phantom: PhantomData,
})
}
}
@ -326,8 +248,7 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
#[derive(Encode, Decode, Clone)]
pub struct SszForkChoice {
genesis_block_root: Hash256,
justified_checkpoint: Checkpoint,
best_justified_checkpoint: Checkpoint,
checkpoint_manager: CheckpointManager,
backend_bytes: Vec<u8>,
}

View File

@ -0,0 +1,340 @@
use super::Error;
use crate::{metrics, BeaconChain, BeaconChainTypes};
use proto_array_fork_choice::ProtoArrayForkChoice;
use ssz_derive::{Decode, Encode};
use types::{BeaconState, Checkpoint, Epoch, EthSpec, Hash256, Slot};
const MAX_BALANCE_CACHE_SIZE: usize = 4;
/// An item that is stored in the `BalancesCache`.
#[derive(PartialEq, Clone, Encode, Decode)]
struct CacheItem {
/// The block root at which `self.balances` are valid.
block_root: Hash256,
/// The `state.balances` list.
balances: Vec<u64>,
}
/// Provides a cache to avoid reading `BeaconState` from disk when updating the current justified
/// checkpoint.
///
/// It should store a mapping of `epoch_boundary_block_root -> state.balances`.
#[derive(PartialEq, Clone, Default, Encode, Decode)]
struct BalancesCache {
items: Vec<CacheItem>,
}
impl BalancesCache {
/// Inspect the given `state` and determine the root of the block at the first slot of
/// `state.current_epoch`. If there is not already some entry for the given block root, then
/// add `state.balances` to the cache.
pub fn process_state<E: EthSpec>(
&mut self,
block_root: Hash256,
state: &BeaconState<E>,
) -> Result<(), Error> {
// We are only interested in balances from states that are at the start of an epoch,
// because this is where the `current_justified_checkpoint.root` will point.
if !Self::is_first_block_in_epoch(block_root, state)? {
return Ok(());
}
let epoch_boundary_slot = state.current_epoch().start_slot(E::slots_per_epoch());
let epoch_boundary_root = if epoch_boundary_slot == state.slot {
block_root
} else {
// This call remains sensible as long as `state.block_roots` is larger than a single
// epoch.
*state.get_block_root(epoch_boundary_slot)?
};
if self.position(epoch_boundary_root).is_none() {
let item = CacheItem {
block_root: epoch_boundary_root,
balances: get_effective_balances(state),
};
if self.items.len() == MAX_BALANCE_CACHE_SIZE {
self.items.remove(0);
}
self.items.push(item);
}
Ok(())
}
/// Returns `true` if the given `block_root` is the first/only block to have been processed in
/// the epoch of the given `state`.
///
/// We can determine if it is the first block by looking back through `state.block_roots` to
/// see if there is a block in the current epoch with a different root.
fn is_first_block_in_epoch<E: EthSpec>(
block_root: Hash256,
state: &BeaconState<E>,
) -> Result<bool, Error> {
let mut prior_block_found = false;
for slot in state.current_epoch().slot_iter(E::slots_per_epoch()) {
if slot < state.slot {
if *state.get_block_root(slot)? != block_root {
prior_block_found = true;
break;
}
} else {
break;
}
}
Ok(!prior_block_found)
}
fn position(&self, block_root: Hash256) -> Option<usize> {
self.items
.iter()
.position(|item| item.block_root == block_root)
}
/// Get the balances for the given `block_root`, if any.
///
/// If some balances are found, they are removed from the cache.
pub fn get(&mut self, block_root: Hash256) -> Option<Vec<u64>> {
let i = self.position(block_root)?;
Some(self.items.remove(i).balances)
}
}
/// Returns the effective balances for every validator in the given `state`.
///
/// Any validator who is not active in the epoch of the given `state` is assigned a balance of
/// zero.
pub fn get_effective_balances<T: EthSpec>(state: &BeaconState<T>) -> Vec<u64> {
state
.validators
.iter()
.map(|validator| {
if validator.is_active_at(state.current_epoch()) {
validator.effective_balance
} else {
0
}
})
.collect()
}
/// A `types::Checkpoint` that also stores the validator balances from a `BeaconState`.
///
/// Useful because we need to track the justified checkpoint balances.
#[derive(PartialEq, Clone, Encode, Decode)]
pub struct CheckpointWithBalances {
pub epoch: Epoch,
pub root: Hash256,
/// These are the balances of the state with `self.root`.
///
/// Importantly, these are _not_ the balances of the first state that we saw that has
/// `self.epoch` and `self.root` as `state.current_justified_checkpoint`. These are the
/// balances of the state from the block with `state.current_justified_checkpoint.root`.
pub balances: Vec<u64>,
}
impl Into<Checkpoint> for CheckpointWithBalances {
fn into(self) -> Checkpoint {
Checkpoint {
epoch: self.epoch,
root: self.root,
}
}
}
/// A pair of checkpoints, representing `state.current_justified_checkpoint` and
/// `state.finalized_checkpoint` for some `BeaconState`.
#[derive(PartialEq, Clone, Encode, Decode)]
pub struct FFGCheckpoints {
pub justified: CheckpointWithBalances,
pub finalized: Checkpoint,
}
/// A struct to manage the justified and finalized checkpoints to be used for `ForkChoice`.
///
/// This struct exists to manage the `should_update_justified_checkpoint` logic in the fork choice
/// section of the spec:
///
/// https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/fork-choice.md#should_update_justified_checkpoint
#[derive(PartialEq, Clone, Encode, Decode)]
pub struct CheckpointManager {
/// The current FFG checkpoints that should be used for finding the head.
pub current: FFGCheckpoints,
/// The best-known checkpoints that should be moved to `self.current` when the time is right.
best: FFGCheckpoints,
/// The epoch at which `self.current` should become `self.best`, if any.
update_at: Option<Epoch>,
/// A cached used to try and avoid DB reads when updating `self.current` and `self.best`.
balances_cache: BalancesCache,
}
impl CheckpointManager {
/// Create a new checkpoint cache from `genesis_checkpoint` derived from the genesis block.
pub fn new(genesis_checkpoint: CheckpointWithBalances) -> Self {
let ffg_checkpoint = FFGCheckpoints {
justified: genesis_checkpoint.clone(),
finalized: genesis_checkpoint.into(),
};
Self {
current: ffg_checkpoint.clone(),
best: ffg_checkpoint,
update_at: None,
balances_cache: BalancesCache::default(),
}
}
/// Potentially updates `self.current`, if the conditions are correct.
///
/// Should be called before running the fork choice `find_head` function to ensure
/// `self.current` is up-to-date.
pub fn maybe_update<T: BeaconChainTypes>(
&mut self,
current_slot: Slot,
chain: &BeaconChain<T>,
) -> Result<(), Error> {
if self.best.justified.epoch > self.current.justified.epoch {
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
match self.update_at {
None => {
if self.best.justified.epoch > self.current.justified.epoch {
if Self::compute_slots_since_epoch_start::<T>(current_slot)
< chain.spec.safe_slots_to_update_justified
{
self.current = self.best.clone();
} else {
self.update_at = Some(current_epoch + 1)
}
}
}
Some(epoch) if epoch <= current_epoch => {
self.current = self.best.clone();
self.update_at = None
}
_ => {}
}
}
Ok(())
}
/// Checks the given `state` (must correspond to the given `block_root`) to see if it contains
/// a `current_justified_checkpoint` that is better than `self.best_justified_checkpoint`. If
/// so, the value is updated.
///
/// Note: this does not update `self.justified_checkpoint`.
pub fn process_state<T: BeaconChainTypes>(
&mut self,
block_root: Hash256,
state: &BeaconState<T::EthSpec>,
chain: &BeaconChain<T>,
proto_array: &ProtoArrayForkChoice,
) -> Result<(), Error> {
// Only proceed if the new checkpoint is better than our current checkpoint.
if state.current_justified_checkpoint.epoch > self.current.justified.epoch
&& state.finalized_checkpoint.epoch >= self.current.finalized.epoch
{
let candidate = FFGCheckpoints {
justified: CheckpointWithBalances {
epoch: state.current_justified_checkpoint.epoch,
root: state.current_justified_checkpoint.root,
balances: self
.get_balances_for_block(state.current_justified_checkpoint.root, chain)?,
},
finalized: state.finalized_checkpoint.clone(),
};
// Using the given `state`, determine its ancestor at the slot of our current justified
// epoch. Later, this will be compared to the root of the current justified checkpoint
// to determine if this state is descendant of our current justified state.
let new_checkpoint_ancestor = Self::get_block_root_at_slot(
state,
chain,
candidate.justified.root,
self.current
.justified
.epoch
.start_slot(T::EthSpec::slots_per_epoch()),
)?;
let candidate_justified_block_slot = proto_array
.block_slot(&candidate.justified.root)
.ok_or_else(|| Error::UnknownBlockSlot(candidate.justified.root))?;
// If the new justified checkpoint is an ancestor of the current justified checkpoint,
// it is always safe to change it.
if new_checkpoint_ancestor == Some(self.current.justified.root)
&& candidate_justified_block_slot
>= candidate
.justified
.epoch
.start_slot(T::EthSpec::slots_per_epoch())
{
self.current = candidate.clone()
}
if candidate.justified.epoch > self.best.justified.epoch {
// Always update the best checkpoint, if it's better.
self.best = candidate;
}
// Add the state's balances to the balances cache to avoid a state read later.
self.balances_cache.process_state(block_root, state)?;
}
Ok(())
}
fn get_balances_for_block<T: BeaconChainTypes>(
&mut self,
block_root: Hash256,
chain: &BeaconChain<T>,
) -> Result<Vec<u64>, Error> {
if let Some(balances) = self.balances_cache.get(block_root) {
metrics::inc_counter(&metrics::BALANCES_CACHE_HITS);
Ok(balances)
} else {
metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES);
let block = chain
.get_block_caching(&block_root)?
.ok_or_else(|| Error::UnknownJustifiedBlock(block_root))?;
let state = chain
.get_state_caching_only_with_committee_caches(&block.state_root, Some(block.slot))?
.ok_or_else(|| Error::UnknownJustifiedState(block.state_root))?;
Ok(get_effective_balances(&state))
}
}
/// Attempts to get the block root for the given `slot`.
///
/// First, the `state` is used to see if the slot is within the distance of its historical
/// lists. Then, the `chain` is used which will anchor the search at the given
/// `justified_root`.
fn get_block_root_at_slot<T: BeaconChainTypes>(
state: &BeaconState<T::EthSpec>,
chain: &BeaconChain<T>,
justified_root: Hash256,
slot: Slot,
) -> Result<Option<Hash256>, Error> {
match state.get_block_root(slot) {
Ok(root) => Ok(Some(*root)),
Err(_) => chain
.get_ancestor_block_root(justified_root, slot)
.map_err(Into::into),
}
}
/// Calculate how far `slot` lies from the start of its epoch.
fn compute_slots_since_epoch_start<T: BeaconChainTypes>(slot: Slot) -> u64 {
let slots_per_epoch = T::EthSpec::slots_per_epoch();
(slot - slot.epoch(slots_per_epoch).start_slot(slots_per_epoch)).as_u64()
}
}

View File

@ -24,7 +24,6 @@ pub use self::errors::{BeaconChainError, BlockProductionError};
pub use eth1_chain::{Eth1Chain, Eth1ChainBackend};
pub use events::EventHandler;
pub use fork_choice::ForkChoice;
pub use lmd_ghost;
pub use metrics::scrape_for_metrics;
pub use parking_lot;
pub use slot_clock;

View File

@ -138,6 +138,10 @@ lazy_static! {
"beacon_fork_choice_process_attestation_seconds",
"Time taken to add an attestation to fork choice"
);
pub static ref BALANCES_CACHE_HITS: Result<IntCounter> =
try_create_int_counter("beacon_balances_cache_hits_total", "Count of times balances cache fulfils request");
pub static ref BALANCES_CACHE_MISSES: Result<IntCounter> =
try_create_int_counter("beacon_balances_cache_misses_total", "Count of times balances cache fulfils request");
/*
* Persisting BeaconChain to disk

View File

@ -5,7 +5,7 @@ use crate::{BeaconChainTypes, CheckPoint};
use operation_pool::PersistedOperationPool;
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use store::{DBColumn, Error as StoreError, SimpleStoreItem, SszBlockRootTree};
use store::{DBColumn, Error as StoreError, SimpleStoreItem};
use types::Hash256;
/// 32-byte key for accessing the `PersistedBeaconChain`.
@ -20,7 +20,6 @@ pub struct PersistedBeaconChain<T: BeaconChainTypes> {
pub ssz_head_tracker: SszHeadTracker,
pub fork_choice: SszForkChoice,
pub eth1_cache: Option<SszEth1>,
pub block_root_tree: SszBlockRootTree,
}
impl<T: BeaconChainTypes> SimpleStoreItem for PersistedBeaconChain<T> {

View File

@ -6,7 +6,6 @@ use crate::{
};
use eth1::Config as Eth1Config;
use genesis::interop_genesis_state;
use lmd_ghost::ThreadSafeReducedTree;
use rayon::prelude::*;
use sloggers::{terminal::TerminalLoggerBuilder, types::Severity, Build};
use slot_clock::TestingSlotClock;
@ -35,7 +34,6 @@ pub type BaseHarnessType<TStore, TStoreMigrator, TEthSpec> = Witness<
TStore,
TStoreMigrator,
TestingSlotClock,
ThreadSafeReducedTree<TStore, TEthSpec>,
CachingEth1Backend<TEthSpec, TStore>,
TEthSpec,
NullEventHandler<TEthSpec>,

View File

@ -391,7 +391,7 @@ fn free_attestations_added_to_fork_choice_some_none() {
if slot <= num_blocks_produced && slot != 0 {
assert_eq!(
latest_message.unwrap().1,
slot,
slot.epoch(MinimalEthSpec::slots_per_epoch()),
"Latest message slot for {} should be equal to slot {}.",
validator,
slot
@ -483,7 +483,7 @@ fn free_attestations_added_to_fork_choice_all_updated() {
assert_eq!(
latest_message.unwrap().1,
slot,
slot.epoch(MinimalEthSpec::slots_per_epoch()),
"Latest message slot should be equal to attester duty."
);

View File

@ -33,7 +33,6 @@ exit-future = "0.1.4"
futures = "0.1.29"
reqwest = "0.9.22"
url = "2.1.0"
lmd_ghost = { path = "../../eth2/lmd_ghost" }
eth1 = { path = "../eth1" }
genesis = { path = "../genesis" }
environment = { path = "../../lighthouse/environment" }

View File

@ -4,7 +4,6 @@ use crate::Client;
use beacon_chain::{
builder::{BeaconChainBuilder, Witness},
eth1_chain::CachingEth1Backend,
lmd_ghost::ThreadSafeReducedTree,
slot_clock::{SlotClock, SystemTimeSlotClock},
store::{
migrate::{BackgroundMigrator, Migrate, NullMigrator},
@ -21,7 +20,6 @@ use genesis::{
generate_deterministic_keypairs, interop_genesis_state, state_from_ssz_file, Eth1GenesisService,
};
use lighthouse_bootstrap::Bootstrapper;
use lmd_ghost::LmdGhost;
use network::{NetworkConfig, NetworkMessage, Service as NetworkService};
use slog::info;
use ssz::Decode;
@ -67,23 +65,14 @@ pub struct ClientBuilder<T: BeaconChainTypes> {
eth_spec_instance: T::EthSpec,
}
impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
ClientBuilder<
Witness<
TStore,
TStoreMigrator,
TSlotClock,
TLmdGhost,
TEth1Backend,
TEthSpec,
TEventHandler,
>,
Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>,
>
where
TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
TSlotClock: SlotClock + Clone + 'static,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static,
@ -367,17 +356,8 @@ where
/// If type inference errors are being raised, see the comment on the definition of `Self`.
pub fn build(
self,
) -> Client<
Witness<
TStore,
TStoreMigrator,
TSlotClock,
TLmdGhost,
TEth1Backend,
TEthSpec,
TEventHandler,
>,
> {
) -> Client<Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>>
{
Client {
beacon_chain: self.beacon_chain,
libp2p_network: self.libp2p_network,
@ -390,15 +370,7 @@ where
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
ClientBuilder<
Witness<
TStore,
TStoreMigrator,
TSlotClock,
ThreadSafeReducedTree<TStore, TEthSpec>,
TEth1Backend,
TEthSpec,
TEventHandler,
>,
Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>,
>
where
TStore: Store<TEthSpec> + 'static,
@ -435,13 +407,12 @@ where
}
}
impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec>
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec>
ClientBuilder<
Witness<
TStore,
TStoreMigrator,
TSlotClock,
TLmdGhost,
TEth1Backend,
TEthSpec,
WebSocketSender<TEthSpec>,
@ -451,7 +422,6 @@ where
TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
TSlotClock: SlotClock + 'static,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
TEthSpec: EthSpec + 'static,
{
@ -485,13 +455,12 @@ where
}
}
impl<TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
ClientBuilder<
Witness<
DiskStore<TEthSpec>,
TStoreMigrator,
TSlotClock,
TLmdGhost,
TEth1Backend,
TEthSpec,
TEventHandler,
@ -500,7 +469,6 @@ impl<TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandle
where
TSlotClock: SlotClock + 'static,
TStoreMigrator: store::Migrate<DiskStore<TEthSpec>, TEthSpec> + 'static,
TLmdGhost: LmdGhost<DiskStore<TEthSpec>, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, DiskStore<TEthSpec>> + 'static,
TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static,
@ -535,13 +503,12 @@ where
}
}
impl<TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
ClientBuilder<
Witness<
SimpleDiskStore<TEthSpec>,
TStoreMigrator,
TSlotClock,
TLmdGhost,
TEth1Backend,
TEthSpec,
TEventHandler,
@ -550,7 +517,6 @@ impl<TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandle
where
TSlotClock: SlotClock + 'static,
TStoreMigrator: store::Migrate<SimpleDiskStore<TEthSpec>, TEthSpec> + 'static,
TLmdGhost: LmdGhost<SimpleDiskStore<TEthSpec>, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, SimpleDiskStore<TEthSpec>> + 'static,
TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static,
@ -564,13 +530,12 @@ where
}
}
impl<TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
impl<TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
ClientBuilder<
Witness<
MemoryStore<TEthSpec>,
NullMigrator,
TSlotClock,
TLmdGhost,
TEth1Backend,
TEthSpec,
TEventHandler,
@ -578,7 +543,6 @@ impl<TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
>
where
TSlotClock: SlotClock + 'static,
TLmdGhost: LmdGhost<MemoryStore<TEthSpec>, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, MemoryStore<TEthSpec>> + 'static,
TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static,
@ -594,13 +558,12 @@ where
}
}
impl<TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
impl<TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
ClientBuilder<
Witness<
DiskStore<TEthSpec>,
BackgroundMigrator<TEthSpec>,
TSlotClock,
TLmdGhost,
TEth1Backend,
TEthSpec,
TEventHandler,
@ -608,7 +571,6 @@ impl<TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
>
where
TSlotClock: SlotClock + 'static,
TLmdGhost: LmdGhost<DiskStore<TEthSpec>, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, DiskStore<TEthSpec>> + 'static,
TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static,
@ -622,13 +584,12 @@ where
}
}
impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEthSpec, TEventHandler>
impl<TStore, TStoreMigrator, TSlotClock, TEthSpec, TEventHandler>
ClientBuilder<
Witness<
TStore,
TStoreMigrator,
TSlotClock,
TLmdGhost,
CachingEth1Backend<TEthSpec, TStore>,
TEthSpec,
TEventHandler,
@ -638,7 +599,6 @@ where
TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
TSlotClock: SlotClock + 'static,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static,
{
@ -724,22 +684,13 @@ where
}
}
impl<TStore, TStoreMigrator, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
impl<TStore, TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler>
ClientBuilder<
Witness<
TStore,
TStoreMigrator,
SystemTimeSlotClock,
TLmdGhost,
TEth1Backend,
TEthSpec,
TEventHandler,
>,
Witness<TStore, TStoreMigrator, SystemTimeSlotClock, TEth1Backend, TEthSpec, TEventHandler>,
>
where
TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static,

View File

@ -12,7 +12,7 @@ use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeError};
use libp2p::swarm::protocols_handler::{
KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,
};
use slog::{crit, debug, error};
use slog::{crit, debug, error, warn};
use smallvec::SmallVec;
use std::collections::hash_map::Entry;
use std::time::{Duration, Instant};
@ -319,8 +319,12 @@ where
substream: out,
request,
};
self.outbound_substreams
.insert(id, (awaiting_stream, delay_key));
if let Some(_) = self
.outbound_substreams
.insert(id, (awaiting_stream, delay_key))
{
warn!(self.log, "Duplicate outbound substream id"; "id" => format!("{:?}", id));
}
}
_ => { // a response is not expected, drop the stream for all other requests
}

View File

@ -70,7 +70,7 @@ fn process_batch<T: BeaconChainTypes>(
);
successful_block_import = true;
}
BlockProcessingOutcome::ParentUnknown { parent } => {
BlockProcessingOutcome::ParentUnknown { parent, .. } => {
// blocks should be sequential and all parents should exist
warn!(
log, "Parent block is unknown";

View File

@ -307,8 +307,8 @@ impl<T: BeaconChainTypes> ChainCollection<T> {
self.finalized_chains.retain(|chain| {
if chain.target_head_slot <= local_finalized_slot
|| beacon_chain
.block_root_tree
.is_known_block_root(&chain.target_head_root)
.fork_choice
.contains_block(&chain.target_head_root)
{
debug!(log, "Purging out of finalized chain"; "start_slot" => chain.start_slot, "end_slot" => chain.target_head_slot);
chain.status_peers(network);
@ -320,8 +320,8 @@ impl<T: BeaconChainTypes> ChainCollection<T> {
self.head_chains.retain(|chain| {
if chain.target_head_slot <= local_finalized_slot
|| beacon_chain
.block_root_tree
.is_known_block_root(&chain.target_head_root)
.fork_choice
.contains_block(&chain.target_head_root)
{
debug!(log, "Purging out of date head chain"; "start_slot" => chain.start_slot, "end_slot" => chain.target_head_slot);
chain.status_peers(network);

View File

@ -150,9 +150,7 @@ impl<T: BeaconChainTypes> RangeSync<T> {
self.chains.purge_outdated_chains(network, &self.log);
if remote_finalized_slot > local_info.head_slot
&& !chain
.block_root_tree
.is_known_block_root(&remote.finalized_root)
&& !chain.fork_choice.contains_block(&remote.finalized_root)
{
debug!(self.log, "Finalization sync peer joined"; "peer_id" => format!("{:?}", peer_id));
// Finalized chain search

View File

@ -0,0 +1,15 @@
use crate::response_builder::ResponseBuilder;
use crate::ApiResult;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use hyper::{Body, Request};
use std::sync::Arc;
/// Returns the `proto_array` fork choice struct, encoded as JSON.
///
/// Useful for debugging or advanced inspection of the chain.
pub fn get_fork_choice<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
ResponseBuilder::new(&req)?.body_no_ssz(&*beacon_chain.fork_choice.core_proto_array())
}

View File

@ -4,6 +4,7 @@ mod macros;
extern crate lazy_static;
extern crate network as client_network;
mod advanced;
mod beacon;
pub mod config;
mod consensus;

View File

@ -1,6 +1,6 @@
use crate::{
beacon, consensus, error::ApiError, helpers, metrics, network, node, spec, validator, BoxFut,
NetworkChannel,
advanced, beacon, consensus, error::ApiError, helpers, metrics, network, node, spec, validator,
BoxFut, NetworkChannel,
};
use beacon_chain::{BeaconChain, BeaconChainTypes};
use client_network::Service as NetworkService;
@ -147,6 +147,11 @@ pub fn route<T: BeaconChainTypes>(
into_boxfut(spec::get_eth2_config::<T>(req, eth2_config))
}
// Methods for advanced parameters
(&Method::GET, "/advanced/fork_choice") => {
into_boxfut(advanced::get_fork_choice::<T>(req, beacon_chain))
}
(&Method::GET, "/metrics") => into_boxfut(metrics::get_prometheus::<T>(
req,
beacon_chain,

View File

@ -792,6 +792,30 @@ fn get_committees() {
assert_eq!(result, expected, "result should be as expected");
}
#[test]
fn get_fork_choice() {
let mut env = build_env();
let node = build_node(&mut env, testing_client_config());
let remote_node = node.remote_node().expect("should produce remote node");
let fork_choice = env
.runtime()
.block_on(remote_node.http.advanced().get_fork_choice())
.expect("should not error when getting fork choice");
assert_eq!(
fork_choice,
*node
.client
.beacon_chain()
.expect("node should have beacon chain")
.fork_choice
.core_proto_array(),
"result should be as expected"
);
}
fn compare_validator_response<T: EthSpec>(
state: &BeaconState<T>,
response: &ValidatorResponse,

View File

@ -11,7 +11,7 @@ pub use eth2_config::Eth2Config;
use beacon_chain::{
builder::Witness, eth1_chain::CachingEth1Backend, events::WebSocketSender,
lmd_ghost::ThreadSafeReducedTree, slot_clock::SystemTimeSlotClock,
slot_clock::SystemTimeSlotClock,
};
use clap::ArgMatches;
use config::get_configs;
@ -28,7 +28,6 @@ pub type ProductionClient<E> = Client<
DiskStore<E>,
BackgroundMigrator<E>,
SystemTimeSlotClock,
ThreadSafeReducedTree<DiskStore<E>, E>,
CachingEth1Backend<E, DiskStore<E>>,
E,
WebSocketSender<E>,

View File

@ -1,363 +0,0 @@
use itertools::Itertools;
use parking_lot::RwLock;
use ssz_derive::{Decode, Encode};
use std::collections::{HashMap, HashSet};
use std::iter::{self, FromIterator};
use types::{Hash256, Slot};
/// In-memory cache of all block roots post-finalization. Includes short-lived forks.
///
/// Used by fork choice to avoid reconstructing hot states just for their block roots.
// NOTE: could possibly be streamlined by combining with the head tracker and/or fork choice
#[derive(Debug)]
pub struct BlockRootTree {
nodes: RwLock<HashMap<Hash256, Node>>,
}
impl Clone for BlockRootTree {
fn clone(&self) -> Self {
Self {
nodes: RwLock::new(self.nodes.read().clone()),
}
}
}
#[derive(Debug, PartialEq)]
pub enum BlockRootTreeError {
PrevUnknown(Hash256),
}
/// Data for a single `block_root` in the tree.
#[derive(Debug, Clone, Encode, Decode)]
struct Node {
/// Hash of the preceding block (should be the parent block).
///
/// A `previous` of `Hash256::zero` indicates the root of the tree.
previous: Hash256,
/// Slot of this node's block.
slot: Slot,
}
impl BlockRootTree {
/// Create a new block root tree where `(root_hash, root_slot)` is considered finalized.
///
/// All subsequent blocks added should descend from the root block.
pub fn new(root_hash: Hash256, root_slot: Slot) -> Self {
Self {
nodes: RwLock::new(HashMap::from_iter(iter::once((
root_hash,
Node {
previous: Hash256::zero(),
slot: root_slot,
},
)))),
}
}
/// Check if `block_root` exists in the tree.
pub fn is_known_block_root(&self, block_root: &Hash256) -> bool {
self.nodes.read().contains_key(block_root)
}
/// Add a new `block_root` to the tree.
///
/// Will return an error if `prev_block_root` doesn't exist in the tree.
pub fn add_block_root(
&self,
block_root: Hash256,
prev_block_root: Hash256,
block_slot: Slot,
) -> Result<(), BlockRootTreeError> {
let mut nodes = self.nodes.write();
if nodes.contains_key(&prev_block_root) {
nodes.insert(
block_root,
Node {
previous: prev_block_root,
slot: block_slot,
},
);
Ok(())
} else {
Err(BlockRootTreeError::PrevUnknown(prev_block_root))
}
}
/// Create a reverse iterator from `block_root` (inclusive).
///
/// Will skip slots, see `every_slot_iter_from` for a non-skipping variant.
pub fn iter_from(&self, block_root: Hash256) -> BlockRootTreeIter {
BlockRootTreeIter {
tree: self,
current_block_root: block_root,
}
}
/// Create a reverse iterator that yields a block root for every slot.
///
/// E.g. if slot 6 is skipped, this iterator will return the block root from slot 5 at slot 6.
pub fn every_slot_iter_from<'a>(
&'a self,
block_root: Hash256,
) -> impl Iterator<Item = (Hash256, Slot)> + 'a {
let mut block_roots = self.iter_from(block_root).peekable();
// Include the value for the first `block_root` if any, then fill in the skipped slots
// between each pair of previous block roots by duplicating the older root.
block_roots
.peek()
.cloned()
.into_iter()
.chain(block_roots.tuple_windows().flat_map(
|((_, high_slot), (low_hash, low_slot))| {
(low_slot.as_u64()..high_slot.as_u64())
.rev()
.map(move |slot| (low_hash, Slot::new(slot)))
},
))
}
/// Prune the tree.
///
/// Only keep block roots descended from `finalized_root`, which lie on a chain leading
/// to one of the heads contained in `heads`.
pub fn prune_to(&self, finalized_root: Hash256, heads: impl IntoIterator<Item = Hash256>) {
let mut keep = HashSet::new();
keep.insert(finalized_root);
for head_block_root in heads.into_iter() {
// Iterate backwards until we reach a portion of the chain that we've already decided
// to keep. This also discards the pre-finalization block roots.
let mut keep_head = false;
let head_blocks = self
.iter_from(head_block_root)
.map(|(block_root, _)| block_root)
.inspect(|block_root| {
if block_root == &finalized_root {
keep_head = true;
}
})
.take_while(|block_root| !keep.contains(&block_root))
.collect::<HashSet<_>>();
// If the head descends from the finalized root, keep it. Else throw it out.
if keep_head {
keep.extend(head_blocks);
}
}
self.nodes
.write()
.retain(|block_root, _| keep.contains(block_root));
}
pub fn as_ssz_container(&self) -> SszBlockRootTree {
SszBlockRootTree {
nodes: Vec::from_iter(self.nodes.read().clone()),
}
}
}
/// Simple (skipping) iterator for `BlockRootTree`.
#[derive(Debug)]
pub struct BlockRootTreeIter<'a> {
tree: &'a BlockRootTree,
current_block_root: Hash256,
}
impl<'a> Iterator for BlockRootTreeIter<'a> {
type Item = (Hash256, Slot);
fn next(&mut self) -> Option<Self::Item> {
// Genesis
if self.current_block_root.is_zero() {
None
} else {
let block_root = self.current_block_root;
self.tree.nodes.read().get(&block_root).map(|node| {
self.current_block_root = node.previous;
(block_root, node.slot)
})
}
}
}
/// Serializable version of `BlockRootTree` that can be persisted to disk.
#[derive(Debug, Clone, Encode, Decode)]
pub struct SszBlockRootTree {
nodes: Vec<(Hash256, Node)>,
}
impl Into<BlockRootTree> for SszBlockRootTree {
fn into(self) -> BlockRootTree {
BlockRootTree {
nodes: RwLock::new(HashMap::from_iter(self.nodes)),
}
}
}
#[cfg(test)]
mod test {
use super::*;
fn int_hash(x: u64) -> Hash256 {
Hash256::from_low_u64_be(x)
}
fn check_iter_from(
block_tree: &BlockRootTree,
start_block_root: Hash256,
expected: &[(Hash256, Slot)],
) {
assert_eq!(
&block_tree.iter_from(start_block_root).collect::<Vec<_>>()[..],
expected
);
}
fn check_every_slot_iter_from(
block_tree: &BlockRootTree,
start_block_root: Hash256,
expected: &[(Hash256, Slot)],
) {
assert_eq!(
&block_tree
.every_slot_iter_from(start_block_root)
.collect::<Vec<_>>()[..],
expected
);
}
#[test]
fn single_chain() {
let block_tree = BlockRootTree::new(int_hash(1), Slot::new(1));
for i in 2..100 {
block_tree
.add_block_root(int_hash(i), int_hash(i - 1), Slot::new(i))
.expect("add_block_root ok");
let expected = (1..=i)
.rev()
.map(|j| (int_hash(j), Slot::new(j)))
.collect::<Vec<_>>();
check_iter_from(&block_tree, int_hash(i), &expected);
check_every_slot_iter_from(&block_tree, int_hash(i), &expected);
// Still OK after pruning.
block_tree.prune_to(int_hash(1), vec![int_hash(i)]);
check_iter_from(&block_tree, int_hash(i), &expected);
check_every_slot_iter_from(&block_tree, int_hash(i), &expected);
}
}
#[test]
fn skips_of_2() {
let block_tree = BlockRootTree::new(int_hash(1), Slot::new(1));
let step_length = 2u64;
for i in (1 + step_length..100).step_by(step_length as usize) {
block_tree
.add_block_root(int_hash(i), int_hash(i - step_length), Slot::new(i))
.expect("add_block_root ok");
let sparse_expected = (1..=i)
.rev()
.step_by(step_length as usize)
.map(|j| (int_hash(j), Slot::new(j)))
.collect_vec();
let every_slot_expected = (1..=i)
.rev()
.map(|j| {
let nearest = 1 + (j - 1) / step_length * step_length;
(int_hash(nearest), Slot::new(j))
})
.collect_vec();
check_iter_from(&block_tree, int_hash(i), &sparse_expected);
check_every_slot_iter_from(&block_tree, int_hash(i), &every_slot_expected);
// Still OK after pruning.
block_tree.prune_to(int_hash(1), vec![int_hash(i)]);
check_iter_from(&block_tree, int_hash(i), &sparse_expected);
check_every_slot_iter_from(&block_tree, int_hash(i), &every_slot_expected);
}
}
#[test]
fn prune_small_fork() {
let tree = BlockRootTree::new(int_hash(1), Slot::new(1));
// Space between fork hash values
let offset = 1000;
let num_blocks = 50;
let fork1_start = 2;
let fork2_start = 2 + offset;
tree.add_block_root(int_hash(fork1_start), int_hash(1), Slot::new(2))
.expect("add first block of left fork");
tree.add_block_root(int_hash(fork2_start), int_hash(1), Slot::new(2))
.expect("add first block of right fork");
for i in 3..num_blocks {
tree.add_block_root(int_hash(i), int_hash(i - 1), Slot::new(i))
.expect("add block to left fork");
tree.add_block_root(int_hash(i + offset), int_hash(i + offset - 1), Slot::new(i))
.expect("add block to right fork");
}
let root = (int_hash(1), Slot::new(1));
let (all_fork1_blocks, all_fork2_blocks): (Vec<_>, Vec<_>) = (2..num_blocks)
.rev()
.map(|i| {
(
(int_hash(i), Slot::new(i)),
(int_hash(i + offset), Slot::new(i)),
)
})
.chain(iter::once((root, root)))
.unzip();
let fork1_head = int_hash(num_blocks - 1);
let fork2_head = int_hash(num_blocks + offset - 1);
// Check that pruning with both heads preserves both chains.
let both_tree = tree.clone();
both_tree.prune_to(root.0, vec![fork1_head, fork2_head]);
check_iter_from(&both_tree, fork1_head, &all_fork1_blocks);
check_iter_from(&both_tree, fork2_head, &all_fork2_blocks);
// Check that pruning to either of the single chains leaves just that chain in the tree.
let fork1_tree = tree.clone();
fork1_tree.prune_to(root.0, vec![fork1_head]);
check_iter_from(&fork1_tree, fork1_head, &all_fork1_blocks);
check_iter_from(&fork1_tree, fork2_head, &[]);
let fork2_tree = tree.clone();
fork2_tree.prune_to(root.0, vec![fork2_head]);
check_iter_from(&fork2_tree, fork1_head, &[]);
check_iter_from(&fork2_tree, fork2_head, &all_fork2_blocks);
// Check that advancing the finalized root onto one side completely removes the other
// side.
let fin_tree = tree;
let prune_point = num_blocks / 2;
let remaining_fork1_blocks = all_fork1_blocks
.into_iter()
.take_while(|(_, slot)| *slot >= prune_point)
.collect_vec();
fin_tree.prune_to(int_hash(prune_point), vec![fork1_head, fork2_head]);
check_iter_from(&fin_tree, fork1_head, &remaining_fork1_blocks);
check_iter_from(&fin_tree, fork2_head, &[]);
}
#[test]
fn iter_zero() {
let block_tree = BlockRootTree::new(int_hash(0), Slot::new(0));
assert_eq!(block_tree.iter_from(int_hash(0)).count(), 0);
assert_eq!(block_tree.every_slot_iter_from(int_hash(0)).count(), 0);
}
}

View File

@ -11,7 +11,6 @@
extern crate lazy_static;
mod block_at_slot;
pub mod block_root_tree;
pub mod chunked_iter;
pub mod chunked_vector;
pub mod config;
@ -29,7 +28,6 @@ pub mod migrate;
use std::sync::Arc;
pub use self::block_root_tree::{BlockRootTree, SszBlockRootTree};
pub use self::config::StoreConfig;
pub use self::hot_cold_store::HotColdDB as DiskStore;
pub use self::leveldb_store::LevelDB as SimpleDiskStore;

View File

@ -1,24 +0,0 @@
[package]
name = "lmd_ghost"
version = "0.1.0"
authors = ["Age Manning <Age@AgeManning.com>", "Paul Hauner <paul@sigmaprime.io>"]
edition = "2018"
[dependencies]
parking_lot = "0.9.0"
store = { path = "../../beacon_node/store" }
types = { path = "../types" }
itertools = "0.8.1"
eth2_ssz = "0.1.2"
eth2_ssz_derive = "0.1.0"
[dev-dependencies]
criterion = "0.3.0"
hex = "0.3"
yaml-rust = "0.4.3"
bls = { path = "../utils/bls" }
slot_clock = { path = "../utils/slot_clock" }
beacon_chain = { path = "../../beacon_node/beacon_chain" }
env_logger = "0.7.1"
lazy_static = "1.4.0"
rand = "0.7.2"

View File

@ -1,69 +0,0 @@
mod reduced_tree;
use std::sync::Arc;
use store::{BlockRootTree, Store};
use types::{BeaconBlock, EthSpec, Hash256, Slot};
pub use reduced_tree::ThreadSafeReducedTree;
pub type Result<T> = std::result::Result<T, String>;
// Note: the `PartialEq` bound is only required for testing. If it becomes a serious annoyance we
// can remove it.
pub trait LmdGhost<S: Store<E>, E: EthSpec>: PartialEq + Send + Sync + Sized {
/// Create a new instance, with the given `store` and `finalized_root`.
fn new(
store: Arc<S>,
block_root_tree: Arc<BlockRootTree>,
finalized_block: &BeaconBlock<E>,
finalized_root: Hash256,
) -> Self;
/// Process an attestation message from some validator that attests to some `block_hash`
/// representing a block at some `block_slot`.
fn process_attestation(
&self,
validator_index: usize,
block_hash: Hash256,
block_slot: Slot,
) -> Result<()>;
/// Process a block that was seen on the network.
fn process_block(&self, block: &BeaconBlock<E>, block_hash: Hash256) -> Result<()>;
/// Returns the head of the chain, starting the search at `start_block_root` and moving upwards
/// (in block height).
fn find_head<F>(
&self,
start_block_slot: Slot,
start_block_root: Hash256,
weight: F,
) -> Result<Hash256>
where
F: Fn(usize) -> Option<u64> + Copy;
/// Provide an indication that the blockchain has been finalized at the given `finalized_block`.
///
/// `finalized_block_root` must be the root of `finalized_block`.
fn update_finalized_root(
&self,
finalized_block: &BeaconBlock<E>,
finalized_block_root: Hash256,
) -> Result<()>;
/// Returns the latest message for a given validator index.
fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)>;
/// Runs an integrity verification function on fork choice algorithm.
///
/// Returns `Ok(())` if the underlying fork choice has maintained its integrity,
/// `Err(description)` otherwise.
fn verify_integrity(&self) -> Result<()>;
/// Encode the `LmdGhost` instance to bytes.
fn as_bytes(&self) -> Vec<u8>;
/// Create a new `LmdGhost` instance given a `store` and encoded bytes.
fn from_bytes(bytes: &[u8], store: Arc<S>, block_root_tree: Arc<BlockRootTree>)
-> Result<Self>;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,400 +0,0 @@
#![cfg(not(debug_assertions))]
#[macro_use]
extern crate lazy_static;
use beacon_chain::test_utils::{
generate_deterministic_keypairs, AttestationStrategy,
BeaconChainHarness as BaseBeaconChainHarness, BlockStrategy, HarnessType,
};
use lmd_ghost::{LmdGhost, ThreadSafeReducedTree as BaseThreadSafeReducedTree};
use rand::{prelude::*, rngs::StdRng};
use std::sync::Arc;
use store::{iter::AncestorIter, MemoryStore, Store};
use types::{BeaconBlock, EthSpec, Hash256, MinimalEthSpec, Slot};
// Should ideally be divisible by 3.
pub const VALIDATOR_COUNT: usize = 3 * 8;
type TestEthSpec = MinimalEthSpec;
type ThreadSafeReducedTree = BaseThreadSafeReducedTree<MemoryStore<TestEthSpec>, TestEthSpec>;
type BeaconChainHarness = BaseBeaconChainHarness<HarnessType<TestEthSpec>>;
type RootAndSlot = (Hash256, Slot);
lazy_static! {
/// A lazy-static instance of a `BeaconChainHarness` that contains two forks.
///
/// Reduces test setup time by providing a common harness.
static ref FORKED_HARNESS: ForkedHarness = ForkedHarness::new();
}
/// Contains a `BeaconChainHarness` that has two forks, caused by a validator skipping a slot and
/// then some validators building on one head and some on the other.
///
/// Care should be taken to ensure that the `ForkedHarness` does not expose any interior mutability
/// from it's fields. This would cause cross-contamination between tests when used with
/// `lazy_static`.
struct ForkedHarness {
/// Private (not `pub`) because the `BeaconChainHarness` has interior mutability. We
/// don't expose it to avoid contamination between tests.
harness: BeaconChainHarness,
pub genesis_block_root: Hash256,
pub genesis_block: BeaconBlock<TestEthSpec>,
pub honest_head: RootAndSlot,
pub faulty_head: RootAndSlot,
/// Honest roots in reverse order (slot high to low)
pub honest_roots: Vec<RootAndSlot>,
/// Faulty roots in reverse order (slot high to low)
pub faulty_roots: Vec<RootAndSlot>,
}
impl ForkedHarness {
/// A new standard instance of with constant parameters.
pub fn new() -> Self {
let harness = BeaconChainHarness::new(
MinimalEthSpec,
generate_deterministic_keypairs(VALIDATOR_COUNT),
);
// Move past the zero slot.
harness.advance_slot();
let delay = TestEthSpec::default_spec().min_attestation_inclusion_delay as usize;
let initial_blocks = delay + 5;
// Build an initial chain where all validators agree.
harness.extend_chain(
initial_blocks,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
let honest_validators: Vec<usize> = (0..two_thirds).collect();
let faulty_validators: Vec<usize> = (two_thirds..VALIDATOR_COUNT).collect();
let honest_fork_blocks = delay + 5;
let faulty_fork_blocks = delay + 5;
let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block(
&honest_validators,
&faulty_validators,
honest_fork_blocks,
faulty_fork_blocks,
);
let mut honest_roots = get_ancestor_roots(harness.chain.store.clone(), honest_head);
honest_roots.insert(
0,
(honest_head, get_slot_for_block_root(&harness, honest_head)),
);
let mut faulty_roots = get_ancestor_roots(harness.chain.store.clone(), faulty_head);
faulty_roots.insert(
0,
(faulty_head, get_slot_for_block_root(&harness, faulty_head)),
);
let genesis_block_root = harness.chain.genesis_block_root;
let genesis_block = harness
.chain
.store
.get::<BeaconBlock<TestEthSpec>>(&genesis_block_root)
.expect("Genesis block should exist")
.expect("DB should not error");
Self {
harness,
genesis_block_root,
genesis_block,
honest_head: *honest_roots.last().expect("Chain cannot be empty"),
faulty_head: *faulty_roots.last().expect("Chain cannot be empty"),
honest_roots,
faulty_roots,
}
}
pub fn store_clone(&self) -> MemoryStore<TestEthSpec> {
(*self.harness.chain.store).clone()
}
/// Return a brand-new, empty fork choice with a reference to `harness.store`.
pub fn new_fork_choice(&self) -> ThreadSafeReducedTree {
// Take a full clone of the store built by the harness.
//
// Taking a clone here ensures that each fork choice gets it's own store so there is no
// cross-contamination between tests.
let store: MemoryStore<TestEthSpec> = self.store_clone();
ThreadSafeReducedTree::new(
Arc::new(store),
self.harness.chain.block_root_tree.clone(),
&self.genesis_block,
self.genesis_block_root,
)
}
pub fn all_block_roots(&self) -> Vec<RootAndSlot> {
let mut all_roots = self.honest_roots.clone();
all_roots.append(&mut self.faulty_roots.clone());
all_roots.dedup();
all_roots
}
pub fn weight_function(_validator_index: usize) -> Option<u64> {
Some(1)
}
}
/// Helper: returns all the ancestor roots and slots for a given block_root.
fn get_ancestor_roots<U: Store<TestEthSpec>>(
store: Arc<U>,
block_root: Hash256,
) -> Vec<(Hash256, Slot)> {
let block = store
.get::<BeaconBlock<TestEthSpec>>(&block_root)
.expect("block should exist")
.expect("store should not error");
<BeaconBlock<TestEthSpec> as AncestorIter<_, _, _>>::try_iter_ancestor_roots(&block, store)
.expect("should be able to create ancestor iter")
.collect()
}
/// Helper: returns the slot for some block_root.
fn get_slot_for_block_root(harness: &BeaconChainHarness, block_root: Hash256) -> Slot {
harness
.chain
.store
.get::<BeaconBlock<TestEthSpec>>(&block_root)
.expect("head block should exist")
.expect("DB should not error")
.slot
}
const RANDOM_ITERATIONS: usize = 50;
const RANDOM_ACTIONS_PER_ITERATION: usize = 100;
/// Create a single LMD instance and have one validator vote in reverse (highest to lowest slot)
/// down the chain.
#[test]
fn random_scenario() {
let harness = &FORKED_HARNESS;
let block_roots = harness.all_block_roots();
let validators: Vec<usize> = (0..VALIDATOR_COUNT).collect();
let mut rng = StdRng::seed_from_u64(9375205782030385); // Keyboard mash.
for _ in 0..RANDOM_ITERATIONS {
let lmd = harness.new_fork_choice();
for _ in 0..RANDOM_ACTIONS_PER_ITERATION {
let (root, slot) = block_roots[rng.next_u64() as usize % block_roots.len()];
let validator_index = validators[rng.next_u64() as usize % validators.len()];
lmd.process_attestation(validator_index, root, slot)
.expect("fork choice should accept randomly-placed attestations");
assert_eq!(
lmd.verify_integrity(),
Ok(()),
"New tree should have integrity"
);
}
}
}
/// Create a single LMD instance and have one validator vote in reverse (highest to lowest slot)
/// down the chain.
#[test]
fn single_voter_persistent_instance_reverse_order() {
let harness = &FORKED_HARNESS;
let lmd = harness.new_fork_choice();
assert_eq!(
lmd.verify_integrity(),
Ok(()),
"New tree should have integrity"
);
for (root, slot) in &harness.honest_roots {
lmd.process_attestation(0, *root, *slot)
.expect("fork choice should accept attestations to honest roots in reverse");
assert_eq!(
lmd.verify_integrity(),
Ok(()),
"Tree integrity should be maintained whilst processing attestations"
);
}
// The honest head should be selected.
let (head_root, _) = harness.honest_roots.first().unwrap();
let (finalized_root, finalized_slot) = harness.honest_roots.last().unwrap();
assert_eq!(
lmd.find_head(
*finalized_slot,
*finalized_root,
ForkedHarness::weight_function
),
Ok(*head_root),
"Honest head should be selected"
);
}
/// A single validator applies a single vote to each block in the honest fork, using a new tree
/// each time.
#[test]
fn single_voter_many_instance_honest_blocks_voting_forwards() {
let harness = &FORKED_HARNESS;
for (root, slot) in harness.honest_roots.iter().rev() {
let lmd = harness.new_fork_choice();
lmd.process_attestation(0, *root, *slot)
.expect("fork choice should accept attestations to honest roots");
assert_eq!(
lmd.verify_integrity(),
Ok(()),
"Tree integrity should be maintained whilst processing attestations"
);
}
}
/// Same as above, but in reverse order (votes on the highest honest block first).
#[test]
fn single_voter_many_instance_honest_blocks_voting_in_reverse() {
let harness = &FORKED_HARNESS;
// Same as above, but in reverse order (votes on the highest honest block first).
for (root, slot) in &harness.honest_roots {
let lmd = harness.new_fork_choice();
lmd.process_attestation(0, *root, *slot)
.expect("fork choice should accept attestations to honest roots in reverse");
assert_eq!(
lmd.verify_integrity(),
Ok(()),
"Tree integrity should be maintained whilst processing attestations"
);
}
}
/// A single validator applies a single vote to each block in the faulty fork, using a new tree
/// each time.
#[test]
fn single_voter_many_instance_faulty_blocks_voting_forwards() {
let harness = &FORKED_HARNESS;
for (root, slot) in harness.faulty_roots.iter().rev() {
let lmd = harness.new_fork_choice();
lmd.process_attestation(0, *root, *slot)
.expect("fork choice should accept attestations to faulty roots");
assert_eq!(
lmd.verify_integrity(),
Ok(()),
"Tree integrity should be maintained whilst processing attestations"
);
}
}
/// Same as above, but in reverse order (votes on the highest faulty block first).
#[test]
fn single_voter_many_instance_faulty_blocks_voting_in_reverse() {
let harness = &FORKED_HARNESS;
for (root, slot) in &harness.faulty_roots {
let lmd = harness.new_fork_choice();
lmd.process_attestation(0, *root, *slot)
.expect("fork choice should accept attestations to faulty roots in reverse");
assert_eq!(
lmd.verify_integrity(),
Ok(()),
"Tree integrity should be maintained whilst processing attestations"
);
}
}
/// Ensure that votes with slots before the justified slot are not counted.
#[test]
fn discard_votes_before_justified_slot() {
let harness = &FORKED_HARNESS;
let lmd = harness.new_fork_choice();
let (genesis_root, genesis_slot) = *harness.honest_roots.last().unwrap();
// Add attestations from all validators for all honest blocks.
for (root, slot) in harness.honest_roots.iter().rev() {
for i in 0..VALIDATOR_COUNT {
lmd.process_attestation(i, *root, *slot)
.expect("should accept attestations in increasing order");
}
// Head starting from 0 checkpoint (genesis) should be current root
assert_eq!(
lmd.find_head(genesis_slot, genesis_root, ForkedHarness::weight_function),
Ok(*root),
"Honest head should be selected"
);
// Head from one slot after genesis should still be genesis, because the successor
// block of the genesis block has slot `genesis_slot + 1` which isn't greater than
// the slot we're starting from. This is a very artifical test, but one that's easy to
// describe.
assert_eq!(
lmd.find_head(
genesis_slot + 1,
genesis_root,
ForkedHarness::weight_function
),
Ok(genesis_root)
);
}
}
/// Ensures that the finalized root can be set to all values in `roots`.
fn test_update_finalized_root(roots: &[(Hash256, Slot)]) {
let harness = &FORKED_HARNESS;
let lmd = harness.new_fork_choice();
for (root, _slot) in roots.iter().rev() {
let block = harness
.store_clone()
.get::<BeaconBlock<TestEthSpec>>(root)
.expect("block should exist")
.expect("db should not error");
lmd.update_finalized_root(&block, *root)
.expect("finalized root should update for faulty fork");
assert_eq!(
lmd.verify_integrity(),
Ok(()),
"Tree integrity should be maintained after updating the finalized root"
);
}
}
/// Iterates from low-to-high slot through the faulty roots, updating the finalized root.
#[test]
fn update_finalized_root_faulty() {
let harness = &FORKED_HARNESS;
test_update_finalized_root(&harness.faulty_roots)
}
/// Iterates from low-to-high slot through the honest roots, updating the finalized root.
#[test]
fn update_finalized_root_honest() {
let harness = &FORKED_HARNESS;
test_update_finalized_root(&harness.honest_roots)
}

View File

@ -0,0 +1 @@
*.yaml

View File

@ -0,0 +1,19 @@
[package]
name = "proto_array_fork_choice"
version = "0.1.0"
authors = ["Paul Hauner <paul@sigmaprime.io>"]
edition = "2018"
[[bin]]
name = "proto_array_fork_choice"
path = "src/bin.rs"
[dependencies]
parking_lot = "0.9.0"
types = { path = "../types" }
itertools = "0.8.1"
eth2_ssz = "0.1.2"
eth2_ssz_derive = "0.1.0"
serde = "1.0.102"
serde_derive = "1.0.102"
serde_yaml = "0.8.11"

View File

@ -0,0 +1,15 @@
use proto_array_fork_choice::fork_choice_test_definition::*;
use serde_yaml;
use std::fs::File;
fn main() {
write_test_def_to_yaml("votes.yaml", get_votes_test_definition());
write_test_def_to_yaml("no_votes.yaml", get_no_votes_test_definition());
write_test_def_to_yaml("ffg_01.yaml", get_ffg_case_01_test_definition());
write_test_def_to_yaml("ffg_02.yaml", get_ffg_case_02_test_definition());
}
fn write_test_def_to_yaml(filename: &str, def: ForkChoiceTestDefinition) {
let file = File::create(filename).expect("Should be able to open file");
serde_yaml::to_writer(file, &def).expect("Should be able to write YAML to file");
}

View File

@ -0,0 +1,33 @@
use types::{Epoch, Hash256};
#[derive(Clone, PartialEq, Debug)]
pub enum Error {
FinalizedNodeUnknown(Hash256),
JustifiedNodeUnknown(Hash256),
InvalidFinalizedRootChange,
InvalidNodeIndex(usize),
InvalidParentIndex(usize),
InvalidBestChildIndex(usize),
InvalidJustifiedIndex(usize),
InvalidBestDescendant(usize),
InvalidParentDelta(usize),
InvalidNodeDelta(usize),
DeltaOverflow(usize),
IndexOverflow(&'static str),
InvalidDeltaLen {
deltas: usize,
indices: usize,
},
RevertedFinalizedEpoch {
current_finalized_epoch: Epoch,
new_finalized_epoch: Epoch,
},
InvalidBestNode {
start_root: Hash256,
justified_epoch: Epoch,
finalized_epoch: Epoch,
head_root: Hash256,
head_justified_epoch: Epoch,
head_finalized_epoch: Epoch,
},
}

View File

@ -0,0 +1,181 @@
mod ffg_updates;
mod no_votes;
mod votes;
use crate::proto_array_fork_choice::ProtoArrayForkChoice;
use serde_derive::{Deserialize, Serialize};
use types::{Epoch, Hash256, Slot};
pub use ffg_updates::*;
pub use no_votes::*;
pub use votes::*;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Operation {
FindHead {
justified_epoch: Epoch,
justified_root: Hash256,
finalized_epoch: Epoch,
justified_state_balances: Vec<u64>,
expected_head: Hash256,
},
InvalidFindHead {
justified_epoch: Epoch,
justified_root: Hash256,
finalized_epoch: Epoch,
justified_state_balances: Vec<u64>,
},
ProcessBlock {
slot: Slot,
root: Hash256,
parent_root: Hash256,
justified_epoch: Epoch,
finalized_epoch: Epoch,
},
ProcessAttestation {
validator_index: usize,
block_root: Hash256,
target_epoch: Epoch,
},
Prune {
finalized_root: Hash256,
prune_threshold: usize,
expected_len: usize,
},
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ForkChoiceTestDefinition {
pub finalized_block_slot: Slot,
pub justified_epoch: Epoch,
pub finalized_epoch: Epoch,
pub finalized_root: Hash256,
pub operations: Vec<Operation>,
}
impl ForkChoiceTestDefinition {
pub fn run(self) {
let fork_choice = ProtoArrayForkChoice::new(
self.finalized_block_slot,
self.justified_epoch,
self.finalized_epoch,
self.finalized_root,
)
.expect("should create fork choice struct");
for (op_index, op) in self.operations.into_iter().enumerate() {
match op.clone() {
Operation::FindHead {
justified_epoch,
justified_root,
finalized_epoch,
justified_state_balances,
expected_head,
} => {
let head = fork_choice
.find_head(
justified_epoch,
justified_root,
finalized_epoch,
&justified_state_balances,
)
.expect(&format!(
"find_head op at index {} returned error",
op_index
));
assert_eq!(
head, expected_head,
"Operation at index {} failed checks. Operation: {:?}",
op_index, op
);
check_bytes_round_trip(&fork_choice);
}
Operation::InvalidFindHead {
justified_epoch,
justified_root,
finalized_epoch,
justified_state_balances,
} => {
let result = fork_choice.find_head(
justified_epoch,
justified_root,
finalized_epoch,
&justified_state_balances,
);
assert!(
result.is_err(),
"Operation at index {} . Operation: {:?}",
op_index,
op
);
check_bytes_round_trip(&fork_choice);
}
Operation::ProcessBlock {
slot,
root,
parent_root,
justified_epoch,
finalized_epoch,
} => {
fork_choice
.process_block(slot, root, parent_root, justified_epoch, finalized_epoch)
.expect(&format!(
"process_block op at index {} returned error",
op_index
));
check_bytes_round_trip(&fork_choice);
}
Operation::ProcessAttestation {
validator_index,
block_root,
target_epoch,
} => {
fork_choice
.process_attestation(validator_index, block_root, target_epoch)
.expect(&format!(
"process_attestation op at index {} returned error",
op_index
));
check_bytes_round_trip(&fork_choice);
}
Operation::Prune {
finalized_root,
prune_threshold,
expected_len,
} => {
fork_choice.set_prune_threshold(prune_threshold);
fork_choice
.maybe_prune(finalized_root)
.expect("update_finalized_root op at index {} returned error");
// Ensure that no pruning happened.
assert_eq!(
fork_choice.len(),
expected_len,
"Prune op at index {} failed with {} instead of {}",
op_index,
fork_choice.len(),
expected_len
);
}
}
}
}
}
/// Gives a hash that is not the zero hash (unless i is `usize::max_value)`.
fn get_hash(i: u64) -> Hash256 {
Hash256::from_low_u64_be(i)
}
fn check_bytes_round_trip(original: &ProtoArrayForkChoice) {
let bytes = original.as_bytes();
let decoded =
ProtoArrayForkChoice::from_bytes(&bytes).expect("fork choice should decode from bytes");
assert!(
*original == decoded,
"fork choice should encode and decode without change"
);
}

View File

@ -0,0 +1,452 @@
use super::*;
pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition {
let balances = vec![1; 2];
let mut ops = vec![];
// Ensure that the head starts at the finalized block.
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(0),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
expected_head: get_hash(0),
});
// Build the following tree (stick? lol).
//
// 0 <- just: 0, fin: 0
// |
// 1 <- just: 0, fin: 0
// |
// 2 <- just: 1, fin: 0
// |
// 3 <- just: 2, fin: 1
ops.push(Operation::ProcessBlock {
slot: Slot::new(1),
root: get_hash(1),
parent_root: get_hash(0),
justified_epoch: Epoch::new(0),
finalized_epoch: Epoch::new(0),
});
ops.push(Operation::ProcessBlock {
slot: Slot::new(2),
root: get_hash(2),
parent_root: get_hash(1),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(0),
});
ops.push(Operation::ProcessBlock {
slot: Slot::new(3),
root: get_hash(3),
parent_root: get_hash(2),
justified_epoch: Epoch::new(2),
finalized_epoch: Epoch::new(1),
});
// Ensure that with justified epoch 0 we find 3
//
// 0 <- start
// |
// 1
// |
// 2
// |
// 3 <- head
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(0),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
expected_head: get_hash(3),
});
// Ensure that with justified epoch 1 we find 2
//
// 0
// |
// 1
// |
// 2 <- start
// |
// 3 <- head
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: get_hash(2),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
expected_head: get_hash(2),
});
// Ensure that with justified epoch 2 we find 3
//
// 0
// |
// 1
// |
// 2
// |
// 3 <- start + head
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(3),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(3),
});
// END OF TESTS
ForkChoiceTestDefinition {
finalized_block_slot: Slot::new(0),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(1),
finalized_root: get_hash(0),
operations: ops,
}
}
pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition {
let balances = vec![1; 2];
let mut ops = vec![];
// Ensure that the head starts at the finalized block.
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(0),
});
// Build the following tree.
//
// 0
// / \
// just: 0, fin: 0 -> 1 2 <- just: 0, fin: 0
// | |
// just: 1, fin: 0 -> 3 4 <- just: 0, fin: 0
// | |
// just: 1, fin: 0 -> 5 6 <- just: 0, fin: 0
// | |
// just: 1, fin: 0 -> 7 8 <- just: 1, fin: 0
// | |
// just: 2, fin: 0 -> 9 10 <- just: 2, fin: 0
// Left branch
ops.push(Operation::ProcessBlock {
slot: Slot::new(1),
root: get_hash(1),
parent_root: get_hash(0),
justified_epoch: Epoch::new(0),
finalized_epoch: Epoch::new(0),
});
ops.push(Operation::ProcessBlock {
slot: Slot::new(2),
root: get_hash(3),
parent_root: get_hash(1),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(0),
});
ops.push(Operation::ProcessBlock {
slot: Slot::new(3),
root: get_hash(5),
parent_root: get_hash(3),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(0),
});
ops.push(Operation::ProcessBlock {
slot: Slot::new(4),
root: get_hash(7),
parent_root: get_hash(5),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(0),
});
ops.push(Operation::ProcessBlock {
slot: Slot::new(4),
root: get_hash(9),
parent_root: get_hash(7),
justified_epoch: Epoch::new(2),
finalized_epoch: Epoch::new(0),
});
// Right branch
ops.push(Operation::ProcessBlock {
slot: Slot::new(1),
root: get_hash(2),
parent_root: get_hash(0),
justified_epoch: Epoch::new(0),
finalized_epoch: Epoch::new(0),
});
ops.push(Operation::ProcessBlock {
slot: Slot::new(2),
root: get_hash(4),
parent_root: get_hash(2),
justified_epoch: Epoch::new(0),
finalized_epoch: Epoch::new(0),
});
ops.push(Operation::ProcessBlock {
slot: Slot::new(3),
root: get_hash(6),
parent_root: get_hash(4),
justified_epoch: Epoch::new(0),
finalized_epoch: Epoch::new(0),
});
ops.push(Operation::ProcessBlock {
slot: Slot::new(4),
root: get_hash(8),
parent_root: get_hash(6),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(0),
});
ops.push(Operation::ProcessBlock {
slot: Slot::new(4),
root: get_hash(10),
parent_root: get_hash(8),
justified_epoch: Epoch::new(2),
finalized_epoch: Epoch::new(0),
});
// Ensure that if we start at 0 we find 10 (just: 0, fin: 0).
//
// 0 <-- start
// / \
// 1 2
// | |
// 3 4
// | |
// 5 6
// | |
// 7 8
// | |
// 9 10 <-- head
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(0),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
expected_head: get_hash(10),
});
// Same as above, but with justified epoch 2.
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
expected_head: get_hash(10),
});
// Same as above, but with justified epoch 3 (should be invalid).
ops.push(Operation::InvalidFindHead {
justified_epoch: Epoch::new(3),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
});
// Add a vote to 1.
//
// 0
// / \
// +1 vote -> 1 2
// | |
// 3 4
// | |
// 5 6
// | |
// 7 8
// | |
// 9 10
ops.push(Operation::ProcessAttestation {
validator_index: 0,
block_root: get_hash(1),
target_epoch: Epoch::new(0),
});
// Ensure that if we start at 0 we find 9 (just: 0, fin: 0).
//
// 0 <-- start
// / \
// 1 2
// | |
// 3 4
// | |
// 5 6
// | |
// 7 8
// | |
// head -> 9 10
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(0),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
expected_head: get_hash(9),
});
// Save as above but justified epoch 2.
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
expected_head: get_hash(9),
});
// Save as above but justified epoch 3 (should fail).
ops.push(Operation::InvalidFindHead {
justified_epoch: Epoch::new(3),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
});
// Add a vote to 2.
//
// 0
// / \
// 1 2 <- +1 vote
// | |
// 3 4
// | |
// 5 6
// | |
// 7 8
// | |
// 9 10
ops.push(Operation::ProcessAttestation {
validator_index: 1,
block_root: get_hash(2),
target_epoch: Epoch::new(0),
});
// Ensure that if we start at 0 we find 10 (just: 0, fin: 0).
//
// 0 <-- start
// / \
// 1 2
// | |
// 3 4
// | |
// 5 6
// | |
// 7 8
// | |
// 9 10 <-- head
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(0),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
expected_head: get_hash(10),
});
// Same as above but justified epoch 2.
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
expected_head: get_hash(10),
});
// Same as above but justified epoch 3 (should fail).
ops.push(Operation::InvalidFindHead {
justified_epoch: Epoch::new(3),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
});
// Ensure that if we start at 1 we find 9 (just: 0, fin: 0).
//
// 0
// / \
// start-> 1 2
// | |
// 3 4
// | |
// 5 6
// | |
// 7 8
// | |
// head -> 9 10
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(0),
justified_root: get_hash(1),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
expected_head: get_hash(9),
});
// Same as above but justified epoch 2.
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(1),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
expected_head: get_hash(9),
});
// Same as above but justified epoch 3 (should fail).
ops.push(Operation::InvalidFindHead {
justified_epoch: Epoch::new(3),
justified_root: get_hash(1),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
});
// Ensure that if we start at 2 we find 10 (just: 0, fin: 0).
//
// 0
// / \
// 1 2 <- start
// | |
// 3 4
// | |
// 5 6
// | |
// 7 8
// | |
// 9 10 <- head
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(0),
justified_root: get_hash(2),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
expected_head: get_hash(10),
});
// Same as above but justified epoch 2.
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(2),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
expected_head: get_hash(10),
});
// Same as above but justified epoch 3 (should fail).
ops.push(Operation::InvalidFindHead {
justified_epoch: Epoch::new(3),
justified_root: get_hash(2),
finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(),
});
// END OF TESTS
ForkChoiceTestDefinition {
finalized_block_slot: Slot::new(0),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(1),
finalized_root: get_hash(0),
operations: ops,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ffg_case_01() {
let test = get_ffg_case_01_test_definition();
test.run();
}
#[test]
fn ffg_case_02() {
let test = get_ffg_case_02_test_definition();
test.run();
}
}

View File

@ -0,0 +1,237 @@
use super::*;
pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition {
let balances = vec![0; 16];
let operations = vec![
// Check that the head is the finalized block.
Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: Hash256::zero(),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: Hash256::zero(),
},
// Add block 2
//
// 0
// /
// 2
Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(2),
parent_root: get_hash(0),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(1),
},
// Ensure the head is 2
//
// 0
// /
// 2 <- head
Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: Hash256::zero(),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(2),
},
// Add block 1
//
// 0
// / \
// 2 1
Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(1),
parent_root: get_hash(0),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(1),
},
// Ensure the head is still 2
//
// 0
// / \
// head-> 2 1
Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: Hash256::zero(),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(2),
},
// Add block 3
//
// 0
// / \
// 2 1
// |
// 3
Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(3),
parent_root: get_hash(1),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(1),
},
// Ensure 2 is still the head
//
// 0
// / \
// head-> 2 1
// |
// 3
Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: Hash256::zero(),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(2),
},
// Add block 4
//
// 0
// / \
// 2 1
// | |
// 4 3
Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(4),
parent_root: get_hash(2),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(1),
},
// Ensure the head is 4.
//
// 0
// / \
// 2 1
// | |
// head-> 4 3
Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: Hash256::zero(),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(4),
},
// Add block 5 with a justified epoch of 2
//
// 0
// / \
// 2 1
// | |
// 4 3
// |
// 5 <- justified epoch = 2
Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(5),
parent_root: get_hash(4),
justified_epoch: Epoch::new(2),
finalized_epoch: Epoch::new(1),
},
// Ensure the head is still 4 whilst the justified epoch is 0.
//
// 0
// / \
// 2 1
// | |
// head-> 4 3
// |
// 5
Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: Hash256::zero(),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(4),
},
// Ensure there is an error when starting from a block that has the wrong justified epoch.
//
// 0
// / \
// 2 1
// | |
// 4 3
// |
// 5 <- starting from 5 with justified epoch 0 should error.
Operation::InvalidFindHead {
justified_epoch: Epoch::new(1),
justified_root: get_hash(5),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
},
// Set the justified epoch to 2 and the start block to 5 and ensure 5 is the head.
//
// 0
// / \
// 2 1
// | |
// 4 3
// |
// 5 <- head
Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(5),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(5),
},
// Add block 6
//
// 0
// / \
// 2 1
// | |
// 4 3
// |
// 5
// |
// 6
Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(6),
parent_root: get_hash(5),
justified_epoch: Epoch::new(2),
finalized_epoch: Epoch::new(1),
},
// Ensure 6 is the head
//
// 0
// / \
// 2 1
// | |
// 4 3
// |
// 5
// |
// 6 <- head
Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(5),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(6),
},
];
ForkChoiceTestDefinition {
finalized_block_slot: Slot::new(0),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(1),
finalized_root: get_hash(0),
operations,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test() {
let test = get_no_votes_test_definition();
test.run();
}
}

View File

@ -0,0 +1,698 @@
use super::*;
pub fn get_votes_test_definition() -> ForkChoiceTestDefinition {
let mut balances = vec![1; 2];
let mut ops = vec![];
// Ensure that the head starts at the finalized block.
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(0),
});
// Add a block with a hash of 2.
//
// 0
// /
// 2
ops.push(Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(2),
parent_root: get_hash(0),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(1),
});
// Ensure that the head is 2
//
// 0
// /
// head-> 2
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(2),
});
// Add a block with a hash of 1 that comes off the genesis block (this is a fork compared
// to the previous block).
//
// 0
// / \
// 2 1
ops.push(Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(1),
parent_root: get_hash(0),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(1),
});
// Ensure that the head is still 2
//
// 0
// / \
// head-> 2 1
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(2),
});
// Add a vote to block 1
//
// 0
// / \
// 2 1 <- +vote
ops.push(Operation::ProcessAttestation {
validator_index: 0,
block_root: get_hash(1),
target_epoch: Epoch::new(2),
});
// Ensure that the head is now 1, beacuse 1 has a vote.
//
// 0
// / \
// 2 1 <- head
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(1),
});
// Add a vote to block 2
//
// 0
// / \
// +vote-> 2 1
ops.push(Operation::ProcessAttestation {
validator_index: 1,
block_root: get_hash(2),
target_epoch: Epoch::new(2),
});
// Ensure that the head is 2 since 1 and 2 both have a vote
//
// 0
// / \
// head-> 2 1
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(2),
});
// Add block 3.
//
// 0
// / \
// 2 1
// |
// 3
ops.push(Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(3),
parent_root: get_hash(1),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(1),
});
// Ensure that the head is still 2
//
// 0
// / \
// head-> 2 1
// |
// 3
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(2),
});
// Move validator #0 vote from 1 to 3
//
// 0
// / \
// 2 1 <- -vote
// |
// 3 <- +vote
ops.push(Operation::ProcessAttestation {
validator_index: 0,
block_root: get_hash(3),
target_epoch: Epoch::new(3),
});
// Ensure that the head is still 2
//
// 0
// / \
// head-> 2 1
// |
// 3
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(2),
});
// Move validator #1 vote from 2 to 1 (this is an equivocation, but fork choice doesn't
// care)
//
// 0
// / \
// -vote-> 2 1 <- +vote
// |
// 3
ops.push(Operation::ProcessAttestation {
validator_index: 1,
block_root: get_hash(1),
target_epoch: Epoch::new(3),
});
// Ensure that the head is now 3
//
// 0
// / \
// 2 1
// |
// 3 <- head
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(3),
});
// Add block 4.
//
// 0
// / \
// 2 1
// |
// 3
// |
// 4
ops.push(Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(4),
parent_root: get_hash(3),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(1),
});
// Ensure that the head is now 4
//
// 0
// / \
// 2 1
// |
// 3
// |
// 4 <- head
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(4),
});
// Add block 5, which has a justified epoch of 2.
//
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// /
// 5 <- justified epoch = 2
ops.push(Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(5),
parent_root: get_hash(4),
justified_epoch: Epoch::new(2),
finalized_epoch: Epoch::new(2),
});
// Ensure that 5 is filtered out and the head stays at 4.
//
// 0
// / \
// 2 1
// |
// 3
// |
// 4 <- head
// /
// 5
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(4),
});
// Add block 6, which has a justified epoch of 0.
//
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// / \
// 5 6 <- justified epoch = 0
ops.push(Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(6),
parent_root: get_hash(4),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(1),
});
// Move both votes to 5.
//
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// / \
// +2 vote-> 5 6
ops.push(Operation::ProcessAttestation {
validator_index: 0,
block_root: get_hash(5),
target_epoch: Epoch::new(4),
});
ops.push(Operation::ProcessAttestation {
validator_index: 1,
block_root: get_hash(5),
target_epoch: Epoch::new(4),
});
// Add blocks 7, 8 and 9. Adding these blocks helps test the `best_descendant`
// functionality.
//
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// / \
// 5 6
// |
// 7
// |
// 8
// /
// 9
ops.push(Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(7),
parent_root: get_hash(5),
justified_epoch: Epoch::new(2),
finalized_epoch: Epoch::new(2),
});
ops.push(Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(8),
parent_root: get_hash(7),
justified_epoch: Epoch::new(2),
finalized_epoch: Epoch::new(2),
});
ops.push(Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(9),
parent_root: get_hash(8),
justified_epoch: Epoch::new(2),
finalized_epoch: Epoch::new(2),
});
// Ensure that 6 is the head, even though 5 has all the votes. This is testing to ensure
// that 5 is filtered out due to a differing justified epoch.
//
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// / \
// 5 6 <- head
// |
// 7
// |
// 8
// /
// 9
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(1),
justified_root: get_hash(0),
finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(),
expected_head: get_hash(6),
});
// Change fork-choice justified epoch to 1, and the start block to 5 and ensure that 9 is
// the head.
//
// << Change justified epoch to 1 >>
//
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// / \
// 5 6
// |
// 7
// |
// 8
// /
// head-> 9
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(5),
finalized_epoch: Epoch::new(2),
justified_state_balances: balances.clone(),
expected_head: get_hash(9),
});
// Change fork-choice justified epoch to 1, and the start block to 5 and ensure that 9 is
// the head.
//
// << Change justified epoch to 1 >>
//
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// / \
// 5 6
// |
// 7
// |
// 8
// /
// 9 <- +2 votes
ops.push(Operation::ProcessAttestation {
validator_index: 0,
block_root: get_hash(9),
target_epoch: Epoch::new(5),
});
ops.push(Operation::ProcessAttestation {
validator_index: 1,
block_root: get_hash(9),
target_epoch: Epoch::new(5),
});
// Add block 10
//
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// / \
// 5 6
// |
// 7
// |
// 8
// / \
// 9 10
ops.push(Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(10),
parent_root: get_hash(8),
justified_epoch: Epoch::new(2),
finalized_epoch: Epoch::new(2),
});
// Double-check the head is still 9 (no diagram this time)
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(5),
finalized_epoch: Epoch::new(2),
justified_state_balances: balances.clone(),
expected_head: get_hash(9),
});
// Introduce 2 more validators into the system
balances = vec![1; 4];
// Have the two new validators vote for 10
//
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// / \
// 5 6
// |
// 7
// |
// 8
// / \
// 9 10 <- +2 votes
ops.push(Operation::ProcessAttestation {
validator_index: 2,
block_root: get_hash(10),
target_epoch: Epoch::new(5),
});
ops.push(Operation::ProcessAttestation {
validator_index: 3,
block_root: get_hash(10),
target_epoch: Epoch::new(5),
});
// Check the head is now 10.
//
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// / \
// 5 6
// |
// 7
// |
// 8
// / \
// 9 10 <- head
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(5),
finalized_epoch: Epoch::new(2),
justified_state_balances: balances.clone(),
expected_head: get_hash(10),
});
// Set the balances of the last two validators to zero
balances = vec![1, 1, 0, 0];
// Check the head is 9 again.
//
// .
// .
// .
// |
// 8
// / \
// head-> 9 10
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(5),
finalized_epoch: Epoch::new(2),
justified_state_balances: balances.clone(),
expected_head: get_hash(9),
});
// Set the balances of the last two validators back to 1
balances = vec![1; 4];
// Check the head is 10.
//
// .
// .
// .
// |
// 8
// / \
// 9 10 <- head
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(5),
finalized_epoch: Epoch::new(2),
justified_state_balances: balances.clone(),
expected_head: get_hash(10),
});
// Remove the last two validators
balances = vec![1; 2];
// Check the head is 9 again.
//
// (prior blocks omitted for brevity)
// .
// .
// .
// |
// 8
// / \
// head-> 9 10
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(5),
finalized_epoch: Epoch::new(2),
justified_state_balances: balances.clone(),
expected_head: get_hash(9),
});
// Ensure that pruning below the prune threshold does not prune.
ops.push(Operation::Prune {
finalized_root: get_hash(5),
prune_threshold: usize::max_value(),
expected_len: 11,
});
// Run find-head, ensure the no-op prune didn't change the head.
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(5),
finalized_epoch: Epoch::new(2),
justified_state_balances: balances.clone(),
expected_head: get_hash(9),
});
// Ensure that pruning above the prune threshold does prune.
//
//
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// -------pruned here ------
// 5 6
// |
// 7
// |
// 8
// / \
// 9 10
ops.push(Operation::Prune {
finalized_root: get_hash(5),
prune_threshold: 1,
expected_len: 6,
});
// Run find-head, ensure the prune didn't change the head.
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(5),
finalized_epoch: Epoch::new(2),
justified_state_balances: balances.clone(),
expected_head: get_hash(9),
});
// Add block 11
//
// 5 6
// |
// 7
// |
// 8
// / \
// 9 10
// |
// 11
ops.push(Operation::ProcessBlock {
slot: Slot::new(0),
root: get_hash(11),
parent_root: get_hash(9),
justified_epoch: Epoch::new(2),
finalized_epoch: Epoch::new(2),
});
// Ensure the head is now 11
//
// 5 6
// |
// 7
// |
// 8
// / \
// 9 10
// |
// head-> 11
ops.push(Operation::FindHead {
justified_epoch: Epoch::new(2),
justified_root: get_hash(5),
finalized_epoch: Epoch::new(2),
justified_state_balances: balances.clone(),
expected_head: get_hash(11),
});
ForkChoiceTestDefinition {
finalized_block_slot: Slot::new(0),
justified_epoch: Epoch::new(1),
finalized_epoch: Epoch::new(1),
finalized_root: get_hash(0),
operations: ops,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test() {
let test = get_votes_test_definition();
test.run();
}
}

View File

@ -0,0 +1,12 @@
mod error;
pub mod fork_choice_test_definition;
mod proto_array;
mod proto_array_fork_choice;
mod ssz_container;
pub use crate::proto_array_fork_choice::ProtoArrayForkChoice;
pub use error::Error;
pub mod core {
pub use super::proto_array::ProtoArray;
}

View File

@ -0,0 +1,405 @@
use crate::error::Error;
use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use std::collections::HashMap;
use types::{Epoch, Hash256, Slot};
#[derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)]
pub struct ProtoNode {
/// The `slot` is not necessary for `ProtoArray`, it just exists so external components can
/// easily query the block slot. This is useful for upstream fork choice logic.
pub slot: Slot,
root: Hash256,
parent: Option<usize>,
justified_epoch: Epoch,
finalized_epoch: Epoch,
weight: u64,
best_child: Option<usize>,
best_descendant: Option<usize>,
}
#[derive(PartialEq, Debug, Serialize, Deserialize)]
pub struct ProtoArray {
/// Do not attempt to prune the tree unless it has at least this many nodes. Small prunes
/// simply waste time.
pub prune_threshold: usize,
pub justified_epoch: Epoch,
pub finalized_epoch: Epoch,
pub nodes: Vec<ProtoNode>,
pub indices: HashMap<Hash256, usize>,
}
impl ProtoArray {
/// Iterate backwards through the array, touching all nodes and their parents and potentially
/// the best-child of each parent.
///
/// The structure of the `self.nodes` array ensures that the child of each node is always
/// touched before its parent.
///
/// For each node, the following is done:
///
/// - Update the node's weight with the corresponding delta.
/// - Back-propagate each node's delta to its parents delta.
/// - Compare the current node with the parents best-child, updating it if the current node
/// should become the best child.
/// - If required, update the parents best-descendant with the current node or its best-descendant.
pub fn apply_score_changes(
&mut self,
mut deltas: Vec<i64>,
justified_epoch: Epoch,
finalized_epoch: Epoch,
) -> Result<(), Error> {
if deltas.len() != self.indices.len() {
return Err(Error::InvalidDeltaLen {
deltas: deltas.len(),
indices: self.indices.len(),
});
}
if justified_epoch != self.justified_epoch || finalized_epoch != self.finalized_epoch {
self.justified_epoch = justified_epoch;
self.finalized_epoch = finalized_epoch;
}
// Iterate backwards through all indices in `self.nodes`.
for node_index in (0..self.nodes.len()).rev() {
let node = self
.nodes
.get_mut(node_index)
.ok_or_else(|| Error::InvalidNodeIndex(node_index))?;
// There is no need to adjust the balances or manage parent of the zero hash since it
// is an alias to the genesis block. The weight applied to the genesis block is
// irrelevant as we _always_ choose it and it's impossible for it to have a parent.
if node.root == Hash256::zero() {
continue;
}
let node_delta = deltas
.get(node_index)
.copied()
.ok_or_else(|| Error::InvalidNodeDelta(node_index))?;
// Apply the delta to the node.
if node_delta < 0 {
// Note: I am conflicted about whether to use `saturating_sub` or `checked_sub`
// here.
//
// I can't think of any valid reason why `node_delta.abs()` should be greater than
// `node.weight`, so I have chosen `checked_sub` to try and fail-fast if there is
// some error.
//
// However, I am not fully convinced that some valid case for `saturating_sub` does
// not exist.
node.weight = node
.weight
.checked_sub(node_delta.abs() as u64)
.ok_or_else(|| Error::DeltaOverflow(node_index))?;
} else {
node.weight = node
.weight
.checked_add(node_delta as u64)
.ok_or_else(|| Error::DeltaOverflow(node_index))?;
}
// If the node has a parent, try to update its best-child and best-descendant.
if let Some(parent_index) = node.parent {
let parent_delta = deltas
.get_mut(parent_index)
.ok_or_else(|| Error::InvalidParentDelta(parent_index))?;
// Back-propagate the nodes delta to its parent.
*parent_delta += node_delta;
self.maybe_update_best_child_and_descendant(parent_index, node_index)?;
}
}
Ok(())
}
/// Register a block with the fork choice.
///
/// It is only sane to supply a `None` parent for the genesis block.
pub fn on_block(
&mut self,
slot: Slot,
root: Hash256,
parent_opt: Option<Hash256>,
justified_epoch: Epoch,
finalized_epoch: Epoch,
) -> Result<(), Error> {
// If the block is already known, simply ignore it.
if self.indices.contains_key(&root) {
return Ok(());
}
let node_index = self.nodes.len();
let node = ProtoNode {
slot,
root,
parent: parent_opt.and_then(|parent| self.indices.get(&parent).copied()),
justified_epoch,
finalized_epoch,
weight: 0,
best_child: None,
best_descendant: None,
};
self.indices.insert(node.root, node_index);
self.nodes.push(node.clone());
if let Some(parent_index) = node.parent {
self.maybe_update_best_child_and_descendant(parent_index, node_index)?;
}
Ok(())
}
/// Follows the best-descendant links to find the best-block (i.e., head-block).
///
/// ## Notes
///
/// The result of this function is not guaranteed to be accurate if `Self::on_new_block` has
/// been called without a subsequent `Self::apply_score_changes` call. This is because
/// `on_new_block` does not attempt to walk backwards through the tree and update the
/// best-child/best-descendant links.
pub fn find_head(&self, justified_root: &Hash256) -> Result<Hash256, Error> {
let justified_index = self
.indices
.get(justified_root)
.copied()
.ok_or_else(|| Error::JustifiedNodeUnknown(*justified_root))?;
let justified_node = self
.nodes
.get(justified_index)
.ok_or_else(|| Error::InvalidJustifiedIndex(justified_index))?;
let best_descendant_index = justified_node
.best_descendant
.unwrap_or_else(|| justified_index);
let best_node = self
.nodes
.get(best_descendant_index)
.ok_or_else(|| Error::InvalidBestDescendant(best_descendant_index))?;
// Perform a sanity check that the node is indeed valid to be the head.
if !self.node_is_viable_for_head(&best_node) {
return Err(Error::InvalidBestNode {
start_root: *justified_root,
justified_epoch: self.justified_epoch,
finalized_epoch: self.finalized_epoch,
head_root: justified_node.root,
head_justified_epoch: justified_node.justified_epoch,
head_finalized_epoch: justified_node.finalized_epoch,
});
}
Ok(best_node.root)
}
/// Update the tree with new finalization information. The tree is only actually pruned if both
/// of the two following criteria are met:
///
/// - The supplied finalized epoch and root are different to the current values.
/// - The number of nodes in `self` is at least `self.prune_threshold`.
///
/// # Errors
///
/// Returns errors if:
///
/// - The finalized epoch is less than the current one.
/// - The finalized epoch is equal to the current one, but the finalized root is different.
/// - There is some internal error relating to invalid indices inside `self`.
pub fn maybe_prune(&mut self, finalized_root: Hash256) -> Result<(), Error> {
let finalized_index = *self
.indices
.get(&finalized_root)
.ok_or_else(|| Error::FinalizedNodeUnknown(finalized_root))?;
if finalized_index < self.prune_threshold {
// Pruning at small numbers incurs more cost than benefit.
return Ok(());
}
// Remove the `self.indices` key/values for all the to-be-deleted nodes.
for node_index in 0..finalized_index {
let root = &self
.nodes
.get(node_index)
.ok_or_else(|| Error::InvalidNodeIndex(node_index))?
.root;
self.indices.remove(root);
}
// Drop all the nodes prior to finalization.
self.nodes = self.nodes.split_off(finalized_index);
// Adjust the indices map.
for (_root, index) in self.indices.iter_mut() {
*index = index
.checked_sub(finalized_index)
.ok_or_else(|| Error::IndexOverflow("indices"))?;
}
// Iterate through all the existing nodes and adjust their indices to match the new layout
// of `self.nodes`.
for node in self.nodes.iter_mut() {
if let Some(parent) = node.parent {
// If `node.parent` is less than `finalized_index`, set it to `None`.
node.parent = parent.checked_sub(finalized_index);
}
if let Some(best_child) = node.best_child {
node.best_child = Some(
best_child
.checked_sub(finalized_index)
.ok_or_else(|| Error::IndexOverflow("best_child"))?,
);
}
if let Some(best_descendant) = node.best_descendant {
node.best_descendant = Some(
best_descendant
.checked_sub(finalized_index)
.ok_or_else(|| Error::IndexOverflow("best_descendant"))?,
);
}
}
Ok(())
}
/// Observe the parent at `parent_index` with respect to the child at `child_index` and
/// potentially modify the `parent.best_child` and `parent.best_descendant` values.
///
/// ## Detail
///
/// There are four outcomes:
///
/// - The child is already the best child but it's now invalid due to a FFG change and should be removed.
/// - The child is already the best child and the parent is updated with the new
/// best-descendant.
/// - The child is not the best child but becomes the best child.
/// - The child is not the best child and does not become the best child.
fn maybe_update_best_child_and_descendant(
&mut self,
parent_index: usize,
child_index: usize,
) -> Result<(), Error> {
let child = self
.nodes
.get(child_index)
.ok_or_else(|| Error::InvalidNodeIndex(child_index))?;
let parent = self
.nodes
.get(parent_index)
.ok_or_else(|| Error::InvalidNodeIndex(parent_index))?;
let child_leads_to_viable_head = self.node_leads_to_viable_head(&child)?;
// These three variables are aliases to the three options that we may set the
// `parent.best_child` and `parent.best_descendant` to.
//
// I use the aliases to assist readability.
let change_to_none = (None, None);
let change_to_child = (
Some(child_index),
child.best_descendant.or(Some(child_index)),
);
let no_change = (parent.best_child, parent.best_descendant);
let (new_best_child, new_best_descendant) =
if let Some(best_child_index) = parent.best_child {
if best_child_index == child_index && !child_leads_to_viable_head {
// If the child is already the best-child of the parent but it's not viable for
// the head, remove it.
change_to_none
} else if best_child_index == child_index {
// If the child is the best-child already, set it again to ensure that the
// best-descendant of the parent is updated.
change_to_child
} else {
let best_child = self
.nodes
.get(best_child_index)
.ok_or_else(|| Error::InvalidBestDescendant(best_child_index))?;
let best_child_leads_to_viable_head =
self.node_leads_to_viable_head(&best_child)?;
if child_leads_to_viable_head && !best_child_leads_to_viable_head {
// The child leads to a viable head, but the current best-child doesn't.
change_to_child
} else if !child_leads_to_viable_head && best_child_leads_to_viable_head {
// The best child leads to a viable head, but the child doesn't.
no_change
} else if child.weight == best_child.weight {
// Tie-breaker of equal weights by root.
if child.root >= best_child.root {
change_to_child
} else {
no_change
}
} else {
// Choose the winner by weight.
if child.weight >= best_child.weight {
change_to_child
} else {
no_change
}
}
}
} else {
if child_leads_to_viable_head {
// There is no current best-child and the child is viable.
change_to_child
} else {
// There is no current best-child but the child is not viable.
no_change
}
};
let parent = self
.nodes
.get_mut(parent_index)
.ok_or_else(|| Error::InvalidNodeIndex(parent_index))?;
parent.best_child = new_best_child;
parent.best_descendant = new_best_descendant;
Ok(())
}
/// Indicates if the node itself is viable for the head, or if it's best descendant is viable
/// for the head.
fn node_leads_to_viable_head(&self, node: &ProtoNode) -> Result<bool, Error> {
let best_descendant_is_viable_for_head =
if let Some(best_descendant_index) = node.best_descendant {
let best_descendant = self
.nodes
.get(best_descendant_index)
.ok_or_else(|| Error::InvalidBestDescendant(best_descendant_index))?;
self.node_is_viable_for_head(best_descendant)
} else {
false
};
Ok(best_descendant_is_viable_for_head || self.node_is_viable_for_head(node))
}
/// This is the equivalent to the `filter_block_tree` function in the eth2 spec:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/fork-choice.md#filter_block_tree
///
/// Any node that has a different finalized or justified epoch should not be viable for the
/// head.
fn node_is_viable_for_head(&self, node: &ProtoNode) -> bool {
(node.justified_epoch == self.justified_epoch || self.justified_epoch == Epoch::new(0))
&& (node.finalized_epoch == self.finalized_epoch
|| self.finalized_epoch == Epoch::new(0))
}
}

View File

@ -0,0 +1,697 @@
use crate::error::Error;
use crate::proto_array::ProtoArray;
use crate::ssz_container::SszContainer;
use parking_lot::{RwLock, RwLockReadGuard};
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use std::collections::HashMap;
use types::{Epoch, Hash256, Slot};
pub const DEFAULT_PRUNE_THRESHOLD: usize = 256;
#[derive(Default, PartialEq, Clone, Encode, Decode)]
pub struct VoteTracker {
current_root: Hash256,
next_root: Hash256,
next_epoch: Epoch,
}
/// A Vec-wrapper which will grow to match any request.
///
/// E.g., a `get` or `insert` to an out-of-bounds element will cause the Vec to grow (using
/// Default) to the smallest size required to fulfill the request.
#[derive(Default, Clone, Debug, PartialEq)]
pub struct ElasticList<T>(pub Vec<T>);
impl<T> ElasticList<T>
where
T: Default,
{
fn ensure(&mut self, i: usize) {
if self.0.len() <= i {
self.0.resize_with(i + 1, Default::default);
}
}
pub fn get_mut(&mut self, i: usize) -> &mut T {
self.ensure(i);
&mut self.0[i]
}
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut T> {
self.0.iter_mut()
}
}
pub struct ProtoArrayForkChoice {
pub(crate) proto_array: RwLock<ProtoArray>,
pub(crate) votes: RwLock<ElasticList<VoteTracker>>,
pub(crate) balances: RwLock<Vec<u64>>,
}
impl PartialEq for ProtoArrayForkChoice {
fn eq(&self, other: &Self) -> bool {
*self.proto_array.read() == *other.proto_array.read()
&& *self.votes.read() == *other.votes.read()
&& *self.balances.read() == *other.balances.read()
}
}
impl ProtoArrayForkChoice {
pub fn new(
finalized_block_slot: Slot,
justified_epoch: Epoch,
finalized_epoch: Epoch,
finalized_root: Hash256,
) -> Result<Self, String> {
let mut proto_array = ProtoArray {
prune_threshold: DEFAULT_PRUNE_THRESHOLD,
justified_epoch,
finalized_epoch,
nodes: Vec::with_capacity(1),
indices: HashMap::with_capacity(1),
};
proto_array
.on_block(
finalized_block_slot,
finalized_root,
None,
justified_epoch,
finalized_epoch,
)
.map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?;
Ok(Self {
proto_array: RwLock::new(proto_array),
votes: RwLock::new(ElasticList::default()),
balances: RwLock::new(vec![]),
})
}
pub fn process_attestation(
&self,
validator_index: usize,
block_root: Hash256,
target_epoch: Epoch,
) -> Result<(), String> {
let mut votes = self.votes.write();
let vote = votes.get_mut(validator_index);
if target_epoch > vote.next_epoch || *vote == VoteTracker::default() {
vote.next_root = block_root;
vote.next_epoch = target_epoch;
}
Ok(())
}
pub fn process_block(
&self,
slot: Slot,
block_root: Hash256,
parent_root: Hash256,
justified_epoch: Epoch,
finalized_epoch: Epoch,
) -> Result<(), String> {
self.proto_array
.write()
.on_block(
slot,
block_root,
Some(parent_root),
justified_epoch,
finalized_epoch,
)
.map_err(|e| format!("process_block_error: {:?}", e))
}
pub fn find_head(
&self,
justified_epoch: Epoch,
justified_root: Hash256,
finalized_epoch: Epoch,
justified_state_balances: &[u64],
) -> Result<Hash256, String> {
let mut proto_array = self.proto_array.write();
let mut votes = self.votes.write();
let mut old_balances = self.balances.write();
let new_balances = justified_state_balances;
let deltas = compute_deltas(
&proto_array.indices,
&mut votes,
&old_balances,
&new_balances,
)
.map_err(|e| format!("find_head compute_deltas failed: {:?}", e))?;
proto_array
.apply_score_changes(deltas, justified_epoch, finalized_epoch)
.map_err(|e| format!("find_head apply_score_changes failed: {:?}", e))?;
*old_balances = new_balances.to_vec();
proto_array
.find_head(&justified_root)
.map_err(|e| format!("find_head failed: {:?}", e))
}
pub fn maybe_prune(&self, finalized_root: Hash256) -> Result<(), String> {
self.proto_array
.write()
.maybe_prune(finalized_root)
.map_err(|e| format!("find_head maybe_prune failed: {:?}", e))
}
pub fn set_prune_threshold(&self, prune_threshold: usize) {
self.proto_array.write().prune_threshold = prune_threshold;
}
pub fn len(&self) -> usize {
self.proto_array.read().nodes.len()
}
pub fn contains_block(&self, block_root: &Hash256) -> bool {
self.proto_array.read().indices.contains_key(block_root)
}
pub fn block_slot(&self, block_root: &Hash256) -> Option<Slot> {
let proto_array = self.proto_array.read();
let i = proto_array.indices.get(block_root)?;
let block = proto_array.nodes.get(*i)?;
Some(block.slot)
}
pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> {
let votes = self.votes.read();
if validator_index < votes.0.len() {
let vote = &votes.0[validator_index];
if *vote == VoteTracker::default() {
None
} else {
Some((vote.next_root, vote.next_epoch))
}
} else {
None
}
}
pub fn as_bytes(&self) -> Vec<u8> {
SszContainer::from(self).as_ssz_bytes()
}
pub fn from_bytes(bytes: &[u8]) -> Result<Self, String> {
SszContainer::from_ssz_bytes(bytes)
.map(Into::into)
.map_err(|e| format!("Failed to decode ProtoArrayForkChoice: {:?}", e))
}
/// Returns a read-lock to core `ProtoArray` struct.
///
/// Should only be used when encoding/decoding during troubleshooting.
pub fn core_proto_array(&self) -> RwLockReadGuard<ProtoArray> {
self.proto_array.read()
}
}
/// Returns a list of `deltas`, where there is one delta for each of the indices in
/// `0..indices.len()`.
///
/// The deltas are formed by a change between `old_balances` and `new_balances`, and/or a change of vote in `votes`.
///
/// ## Errors
///
/// - If a value in `indices` is greater to or equal to `indices.len()`.
/// - If some `Hash256` in `votes` is not a key in `indices` (except for `Hash256::zero()`, this is
/// always valid).
fn compute_deltas(
indices: &HashMap<Hash256, usize>,
votes: &mut ElasticList<VoteTracker>,
old_balances: &[u64],
new_balances: &[u64],
) -> Result<Vec<i64>, Error> {
let mut deltas = vec![0_i64; indices.len()];
for (val_index, vote) in votes.iter_mut().enumerate() {
// There is no need to create a score change if the validator has never voted or both their
// votes are for the zero hash (alias to the genesis block).
if vote.current_root == Hash256::zero() && vote.next_root == Hash256::zero() {
continue;
}
// If the validator was not included in the _old_ balances (i.e., it did not exist yet)
// then say its balance was zero.
let old_balance = old_balances.get(val_index).copied().unwrap_or_else(|| 0);
// If the validators vote is not known in the _new_ balances, then use a balance of zero.
//
// It is possible that there is a vote for an unknown validator if we change our justified
// state to a new state with a higher epoch that is on a different fork because that fork may have
// on-boarded less validators than the prior fork.
let new_balance = new_balances.get(val_index).copied().unwrap_or_else(|| 0);
if vote.current_root != vote.next_root || old_balance != new_balance {
// We ignore the vote if it is not known in `indices`. We assume that it is outside
// of our tree (i.e., pre-finalization) and therefore not interesting.
if let Some(current_delta_index) = indices.get(&vote.current_root).copied() {
let delta = deltas
.get(current_delta_index)
.ok_or_else(|| Error::InvalidNodeDelta(current_delta_index))?
.checked_sub(old_balance as i64)
.ok_or_else(|| Error::DeltaOverflow(current_delta_index))?;
// Array access safe due to check on previous line.
deltas[current_delta_index] = delta;
}
// We ignore the vote if it is not known in `indices`. We assume that it is outside
// of our tree (i.e., pre-finalization) and therefore not interesting.
if let Some(next_delta_index) = indices.get(&vote.next_root).copied() {
let delta = deltas
.get(next_delta_index)
.ok_or_else(|| Error::InvalidNodeDelta(next_delta_index))?
.checked_add(new_balance as i64)
.ok_or_else(|| Error::DeltaOverflow(next_delta_index))?;
// Array access safe due to check on previous line.
deltas[next_delta_index] = delta;
}
vote.current_root = vote.next_root;
}
}
Ok(deltas)
}
#[cfg(test)]
mod test_compute_deltas {
use super::*;
/// Gives a hash that is not the zero hash (unless i is `usize::max_value)`.
fn hash_from_index(i: usize) -> Hash256 {
Hash256::from_low_u64_be(i as u64 + 1)
}
#[test]
fn zero_hash() {
let validator_count: usize = 16;
let mut indices = HashMap::new();
let mut votes = ElasticList::default();
let mut old_balances = vec![];
let mut new_balances = vec![];
for i in 0..validator_count {
indices.insert(hash_from_index(i), i);
votes.0.push(VoteTracker {
current_root: Hash256::zero(),
next_root: Hash256::zero(),
next_epoch: Epoch::new(0),
});
old_balances.push(0);
new_balances.push(0);
}
let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances)
.expect("should compute deltas");
assert_eq!(
deltas.len(),
validator_count,
"deltas should have expected length"
);
assert_eq!(
deltas,
vec![0; validator_count],
"deltas should all be zero"
);
for vote in votes.0 {
assert_eq!(
vote.current_root, vote.next_root,
"the vote shoulds should have been updated"
);
}
}
#[test]
fn all_voted_the_same() {
const BALANCE: u64 = 42;
let validator_count: usize = 16;
let mut indices = HashMap::new();
let mut votes = ElasticList::default();
let mut old_balances = vec![];
let mut new_balances = vec![];
for i in 0..validator_count {
indices.insert(hash_from_index(i), i);
votes.0.push(VoteTracker {
current_root: Hash256::zero(),
next_root: hash_from_index(0),
next_epoch: Epoch::new(0),
});
old_balances.push(BALANCE);
new_balances.push(BALANCE);
}
let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances)
.expect("should compute deltas");
assert_eq!(
deltas.len(),
validator_count,
"deltas should have expected length"
);
for (i, delta) in deltas.into_iter().enumerate() {
if i == 0 {
assert_eq!(
delta,
BALANCE as i64 * validator_count as i64,
"zero'th root should have a delta"
);
} else {
assert_eq!(delta, 0, "all other deltas should be zero");
}
}
for vote in votes.0 {
assert_eq!(
vote.current_root, vote.next_root,
"the vote shoulds should have been updated"
);
}
}
#[test]
fn different_votes() {
const BALANCE: u64 = 42;
let validator_count: usize = 16;
let mut indices = HashMap::new();
let mut votes = ElasticList::default();
let mut old_balances = vec![];
let mut new_balances = vec![];
for i in 0..validator_count {
indices.insert(hash_from_index(i), i);
votes.0.push(VoteTracker {
current_root: Hash256::zero(),
next_root: hash_from_index(i),
next_epoch: Epoch::new(0),
});
old_balances.push(BALANCE);
new_balances.push(BALANCE);
}
let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances)
.expect("should compute deltas");
assert_eq!(
deltas.len(),
validator_count,
"deltas should have expected length"
);
for delta in deltas.into_iter() {
assert_eq!(
delta, BALANCE as i64,
"each root should have the same delta"
);
}
for vote in votes.0 {
assert_eq!(
vote.current_root, vote.next_root,
"the vote shoulds should have been updated"
);
}
}
#[test]
fn moving_votes() {
const BALANCE: u64 = 42;
let validator_count: usize = 16;
let mut indices = HashMap::new();
let mut votes = ElasticList::default();
let mut old_balances = vec![];
let mut new_balances = vec![];
for i in 0..validator_count {
indices.insert(hash_from_index(i), i);
votes.0.push(VoteTracker {
current_root: hash_from_index(0),
next_root: hash_from_index(1),
next_epoch: Epoch::new(0),
});
old_balances.push(BALANCE);
new_balances.push(BALANCE);
}
let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances)
.expect("should compute deltas");
assert_eq!(
deltas.len(),
validator_count,
"deltas should have expected length"
);
let total_delta = BALANCE as i64 * validator_count as i64;
for (i, delta) in deltas.into_iter().enumerate() {
if i == 0 {
assert_eq!(
delta,
0 - total_delta,
"zero'th root should have a negative delta"
);
} else if i == 1 {
assert_eq!(delta, total_delta, "first root should have positive delta");
} else {
assert_eq!(delta, 0, "all other deltas should be zero");
}
}
for vote in votes.0 {
assert_eq!(
vote.current_root, vote.next_root,
"the vote shoulds should have been updated"
);
}
}
#[test]
fn move_out_of_tree() {
const BALANCE: u64 = 42;
let mut indices = HashMap::new();
let mut votes = ElasticList::default();
// There is only one block.
indices.insert(hash_from_index(1), 0);
// There are two validators.
let old_balances = vec![BALANCE; 2];
let new_balances = vec![BALANCE; 2];
// One validator moves their vote from the block to the zero hash.
votes.0.push(VoteTracker {
current_root: hash_from_index(1),
next_root: Hash256::zero(),
next_epoch: Epoch::new(0),
});
// One validator moves their vote from the block to something outside the tree.
votes.0.push(VoteTracker {
current_root: hash_from_index(1),
next_root: Hash256::from_low_u64_be(1337),
next_epoch: Epoch::new(0),
});
let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances)
.expect("should compute deltas");
assert_eq!(deltas.len(), 1, "deltas should have expected length");
assert_eq!(
deltas[0],
0 - BALANCE as i64 * 2,
"the block should have lost both balances"
);
for vote in votes.0 {
assert_eq!(
vote.current_root, vote.next_root,
"the vote shoulds should have been updated"
);
}
}
#[test]
fn changing_balances() {
const OLD_BALANCE: u64 = 42;
const NEW_BALANCE: u64 = OLD_BALANCE * 2;
let validator_count: usize = 16;
let mut indices = HashMap::new();
let mut votes = ElasticList::default();
let mut old_balances = vec![];
let mut new_balances = vec![];
for i in 0..validator_count {
indices.insert(hash_from_index(i), i);
votes.0.push(VoteTracker {
current_root: hash_from_index(0),
next_root: hash_from_index(1),
next_epoch: Epoch::new(0),
});
old_balances.push(OLD_BALANCE);
new_balances.push(NEW_BALANCE);
}
let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances)
.expect("should compute deltas");
assert_eq!(
deltas.len(),
validator_count,
"deltas should have expected length"
);
for (i, delta) in deltas.into_iter().enumerate() {
if i == 0 {
assert_eq!(
delta,
0 - OLD_BALANCE as i64 * validator_count as i64,
"zero'th root should have a negative delta"
);
} else if i == 1 {
assert_eq!(
delta,
NEW_BALANCE as i64 * validator_count as i64,
"first root should have positive delta"
);
} else {
assert_eq!(delta, 0, "all other deltas should be zero");
}
}
for vote in votes.0 {
assert_eq!(
vote.current_root, vote.next_root,
"the vote shoulds should have been updated"
);
}
}
#[test]
fn validator_appears() {
const BALANCE: u64 = 42;
let mut indices = HashMap::new();
let mut votes = ElasticList::default();
// There are two blocks.
indices.insert(hash_from_index(1), 0);
indices.insert(hash_from_index(2), 1);
// There is only one validator in the old balances.
let old_balances = vec![BALANCE; 1];
// There are two validators in the new balances.
let new_balances = vec![BALANCE; 2];
// Both validator move votes from block 1 to block 2.
for _ in 0..2 {
votes.0.push(VoteTracker {
current_root: hash_from_index(1),
next_root: hash_from_index(2),
next_epoch: Epoch::new(0),
});
}
let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances)
.expect("should compute deltas");
assert_eq!(deltas.len(), 2, "deltas should have expected length");
assert_eq!(
deltas[0],
0 - BALANCE as i64,
"block 1 should have only lost one balance"
);
assert_eq!(
deltas[1],
2 * BALANCE as i64,
"block 2 should have gained two balances"
);
for vote in votes.0 {
assert_eq!(
vote.current_root, vote.next_root,
"the vote shoulds should have been updated"
);
}
}
#[test]
fn validator_disappears() {
const BALANCE: u64 = 42;
let mut indices = HashMap::new();
let mut votes = ElasticList::default();
// There are two blocks.
indices.insert(hash_from_index(1), 0);
indices.insert(hash_from_index(2), 1);
// There are two validators in the old balances.
let old_balances = vec![BALANCE; 2];
// There is only one validator in the new balances.
let new_balances = vec![BALANCE; 1];
// Both validator move votes from block 1 to block 2.
for _ in 0..2 {
votes.0.push(VoteTracker {
current_root: hash_from_index(1),
next_root: hash_from_index(2),
next_epoch: Epoch::new(0),
});
}
let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances)
.expect("should compute deltas");
assert_eq!(deltas.len(), 2, "deltas should have expected length");
assert_eq!(
deltas[0],
0 - BALANCE as i64 * 2,
"block 1 should have lost both balances"
);
assert_eq!(
deltas[1], BALANCE as i64,
"block 2 should have only gained one balance"
);
for vote in votes.0 {
assert_eq!(
vote.current_root, vote.next_root,
"the vote should have been updated"
);
}
}
}

View File

@ -0,0 +1,54 @@
use crate::{
proto_array::{ProtoArray, ProtoNode},
proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker},
};
use parking_lot::RwLock;
use ssz_derive::{Decode, Encode};
use std::collections::HashMap;
use std::iter::FromIterator;
use types::{Epoch, Hash256};
#[derive(Encode, Decode)]
pub struct SszContainer {
votes: Vec<VoteTracker>,
balances: Vec<u64>,
prune_threshold: usize,
justified_epoch: Epoch,
finalized_epoch: Epoch,
nodes: Vec<ProtoNode>,
indices: Vec<(Hash256, usize)>,
}
impl From<&ProtoArrayForkChoice> for SszContainer {
fn from(from: &ProtoArrayForkChoice) -> Self {
let proto_array = from.proto_array.read();
Self {
votes: from.votes.read().0.clone(),
balances: from.balances.read().clone(),
prune_threshold: proto_array.prune_threshold,
justified_epoch: proto_array.justified_epoch,
finalized_epoch: proto_array.finalized_epoch,
nodes: proto_array.nodes.clone(),
indices: proto_array.indices.iter().map(|(k, v)| (*k, *v)).collect(),
}
}
}
impl From<SszContainer> for ProtoArrayForkChoice {
fn from(from: SszContainer) -> Self {
let proto_array = ProtoArray {
prune_threshold: from.prune_threshold,
justified_epoch: from.justified_epoch,
finalized_epoch: from.finalized_epoch,
nodes: from.nodes,
indices: HashMap::from_iter(from.indices.into_iter()),
};
Self {
proto_array: RwLock::new(proto_array),
votes: RwLock::new(ElasticList(from.votes)),
balances: RwLock::new(from.balances),
}
}
}

View File

@ -18,7 +18,6 @@ serde_yaml = "0.8.11"
eth2_ssz = "0.1.2"
beacon_chain = { path = "../../beacon_node/beacon_chain" }
store = { path = "../../beacon_node/store" }
lmd_ghost = { path = "../lmd_ghost" }
[dependencies]

View File

@ -7,3 +7,5 @@ edition = "2018"
[dependencies]
slog = "2.5.2"
slog-term = "2.4.2"
lighthouse_metrics = { path = "../lighthouse_metrics" }
lazy_static = "1.4.0"

View File

@ -1,7 +1,24 @@
#[macro_use]
extern crate lazy_static;
use lighthouse_metrics::{
inc_counter, try_create_int_counter, IntCounter, Result as MetricsResult,
};
use std::io::{Result, Write};
pub const MAX_MESSAGE_WIDTH: usize = 40;
lazy_static! {
pub static ref INFOS_TOTAL: MetricsResult<IntCounter> =
try_create_int_counter("info_total", "Count of infos logged");
pub static ref WARNS_TOTAL: MetricsResult<IntCounter> =
try_create_int_counter("warn_total", "Count of warns logged");
pub static ref ERRORS_TOTAL: MetricsResult<IntCounter> =
try_create_int_counter("error_total", "Count of errors logged");
pub static ref CRITS_TOTAL: MetricsResult<IntCounter> =
try_create_int_counter("crit_total", "Count of crits logged");
}
pub struct AlignedTermDecorator {
wrapped: slog_term::TermDecorator,
message_width: usize,
@ -19,14 +36,22 @@ impl AlignedTermDecorator {
impl slog_term::Decorator for AlignedTermDecorator {
fn with_record<F>(
&self,
_record: &slog::Record,
record: &slog::Record,
_logger_values: &slog::OwnedKVList,
f: F,
) -> Result<()>
where
F: FnOnce(&mut dyn slog_term::RecordDecorator) -> std::io::Result<()>,
{
self.wrapped.with_record(_record, _logger_values, |deco| {
match record.level() {
slog::Level::Info => inc_counter(&INFOS_TOTAL),
slog::Level::Warning => inc_counter(&WARNS_TOTAL),
slog::Level::Error => inc_counter(&ERRORS_TOTAL),
slog::Level::Critical => inc_counter(&CRITS_TOTAL),
_ => (),
}
self.wrapped.with_record(record, _logger_values, |deco| {
f(&mut AlignedRecordDecorator::new(deco, self.message_width))
})
}

View File

@ -17,3 +17,4 @@ hex = "0.3"
eth2_ssz = { path = "../../../eth2/utils/ssz" }
serde_json = "^1.0"
eth2_config = { path = "../../../eth2/utils/eth2_config" }
proto_array_fork_choice = { path = "../../../eth2/proto_array_fork_choice" }

View File

@ -5,6 +5,7 @@
use eth2_config::Eth2Config;
use futures::{future, Future, IntoFuture};
use proto_array_fork_choice::core::ProtoArray;
use reqwest::{
r#async::{Client, ClientBuilder, Response},
StatusCode,
@ -101,6 +102,10 @@ impl<E: EthSpec> HttpClient<E> {
Node(self.clone())
}
pub fn advanced(&self) -> Advanced<E> {
Advanced(self.clone())
}
fn url(&self, path: &str) -> Result<Url, Error> {
self.url.join(path).map_err(|e| e.into())
}
@ -536,6 +541,27 @@ impl<E: EthSpec> Node<E> {
}
}
/// Provides the functions on the `/advanced` endpoint of the node.
#[derive(Clone)]
pub struct Advanced<E>(HttpClient<E>);
impl<E: EthSpec> Advanced<E> {
fn url(&self, path: &str) -> Result<Url, Error> {
self.0
.url("advanced/")
.and_then(move |url| url.join(path).map_err(Error::from))
.map_err(Into::into)
}
/// Gets the core `ProtoArray` struct from the node.
pub fn get_fork_choice(&self) -> impl Future<Item = ProtoArray, Error = Error> {
let client = self.0.clone();
self.url("fork_choice")
.into_future()
.and_then(move |url| client.json_get(url, vec![]))
}
}
#[derive(Deserialize)]
#[serde(bound = "T: EthSpec")]
pub struct BlockResponse<T: EthSpec> {