Merge branch 'master' into simple-cached-tree-hash

This commit is contained in:
Paul Hauner 2019-04-08 19:50:55 +10:00
commit 19465268ec
No known key found for this signature in database
GPG Key ID: D362883A9218FCC6
108 changed files with 6820 additions and 1528 deletions

View File

@ -8,8 +8,12 @@ before_install:
- sudo chown -R $USER /usr/local/include/google - sudo chown -R $USER /usr/local/include/google
script: script:
- cargo build --verbose --all - cargo build --verbose --all
- cargo build --verbose --release --all
- cargo test --verbose --all - cargo test --verbose --all
- cargo test --verbose --release --all
- cargo fmt --all -- --check - cargo fmt --all -- --check
# No clippy until later...
#- cargo clippy
rust: rust:
- stable - stable
- beta - beta
@ -20,3 +24,4 @@ matrix:
fast_finish: true fast_finish: true
install: install:
- rustup component add rustfmt - rustup component add rustfmt
- rustup component add clippy

View File

@ -3,6 +3,7 @@ members = [
"eth2/attester", "eth2/attester",
"eth2/block_proposer", "eth2/block_proposer",
"eth2/fork_choice", "eth2/fork_choice",
"eth2/operation_pool",
"eth2/state_processing", "eth2/state_processing",
"eth2/state_processing/yaml_utils", "eth2/state_processing/yaml_utils",
"eth2/types", "eth2/types",

View File

@ -15,3 +15,7 @@ RUN git clone https://github.com/google/protobuf.git && \
RUN mkdir /cargocache && chmod -R ugo+rwX /cargocache RUN mkdir /cargocache && chmod -R ugo+rwX /cargocache
ENV CARGO_HOME /cargocache
RUN rustup component add rustfmt clippy

29
Jenkinsfile vendored
View File

@ -1,20 +1,29 @@
pipeline { pipeline {
agent { agent {
dockerfile { dockerfile {
filename 'Dockerfile' filename 'Dockerfile'
args '-v cargo-cache:/cargocache:rw -e "CARGO_HOME=/cargocache"' args '-v cargo-cache:/cargocache:rw -e "CARGO_HOME=/cargocache"'
} }
} }
stages { stages {
stage('Build') { stage('Build') {
steps {
sh 'cargo build'
}
}
stage('Test') {
steps { steps {
sh 'cargo test --all' sh 'cargo build --verbose --all'
sh 'cargo build --verbose --all --release'
} }
} }
} stage('Check') {
steps {
sh 'cargo fmt --all -- --check'
// No clippy until later...
//sh 'cargo clippy'
}
}
stage('Test') {
steps {
sh 'cargo test --verbose --all'
sh 'cargo test --verbose --all --release'
}
}
}
} }

View File

@ -112,6 +112,7 @@ A few basic steps are needed to get set up:
5. Install build dependencies (Arch packages are listed here, your distribution will likely be similar): 5. Install build dependencies (Arch packages are listed here, your distribution will likely be similar):
- `clang`: required by RocksDB. - `clang`: required by RocksDB.
- `protobuf`: required for protobuf serialization (gRPC). - `protobuf`: required for protobuf serialization (gRPC).
- `cmake`: required for building protobuf
6. Navigate to the working directory. 6. Navigate to the working directory.
7. Run the test by using command `cargo test --all`. By running, it will pass all the required test cases. 7. Run the test by using command `cargo test --all`. By running, it will pass all the required test cases.
If you are doing it for the first time, then you can grab a coffee in the meantime. Usually, it takes time If you are doing it for the first time, then you can grab a coffee in the meantime. Usually, it takes time

View File

@ -9,11 +9,12 @@ types = { path = "../eth2/types" }
client = { path = "client" } client = { path = "client" }
version = { path = "version" } version = { path = "version" }
clap = "2.32.0" clap = "2.32.0"
slog = "^2.2.3" slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_debug"] }
slog-term = "^2.4.0" slog-term = "^2.4.0"
slog-async = "^2.3.0" slog-async = "^2.3.0"
ctrlc = { version = "3.1.1", features = ["termination"] } ctrlc = { version = "3.1.1", features = ["termination"] }
tokio = "0.1.15" tokio = "0.1.15"
tokio-timer = "0.2.10"
futures = "0.1.25" futures = "0.1.25"
exit-future = "0.1.3" exit-future = "0.1.3"
state_processing = { path = "../eth2/state_processing" } state_processing = { path = "../eth2/state_processing" }

View File

@ -15,6 +15,7 @@ hashing = { path = "../../eth2/utils/hashing" }
fork_choice = { path = "../../eth2/fork_choice" } fork_choice = { path = "../../eth2/fork_choice" }
parking_lot = "0.7" parking_lot = "0.7"
log = "0.4" log = "0.4"
operation_pool = { path = "../../eth2/operation_pool" }
env_logger = "0.6" env_logger = "0.6"
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"

View File

@ -1,218 +0,0 @@
use ssz::TreeHash;
use state_processing::per_block_processing::validate_attestation_without_signature;
use std::collections::{HashMap, HashSet};
use types::*;
const PHASE_0_CUSTODY_BIT: bool = false;
/// Provides the functionality to:
///
/// - Recieve a `FreeAttestation` and aggregate it into an `Attestation` (or create a new if it
/// doesn't exist).
/// - Store all aggregated or created `Attestation`s.
/// - Produce a list of attestations that would be valid for inclusion in some `BeaconState` (and
/// therefore valid for inclusion in a `BeaconBlock`.
///
/// Note: `Attestations` are stored in memory and never deleted. This is not scalable and must be
/// rectified in a future revision.
#[derive(Default)]
pub struct AttestationAggregator {
store: HashMap<Vec<u8>, Attestation>,
}
pub struct Outcome {
pub valid: bool,
pub message: Message,
}
pub enum Message {
/// The free attestation was added to an existing attestation.
Aggregated,
/// The free attestation has already been aggregated to an existing attestation.
AggregationNotRequired,
/// The free attestation was transformed into a new attestation.
NewAttestationCreated,
/// The supplied `validator_index` is not in the committee for the given `shard` and `slot`.
BadValidatorIndex,
/// The given `signature` did not match the `pubkey` in the given
/// `state.validator_registry`.
BadSignature,
/// The given `slot` does not match the validators committee assignment.
BadSlot,
/// The given `shard` does not match the validators committee assignment, or is not included in
/// a committee for the given slot.
BadShard,
/// Attestation is from the epoch prior to this, ignoring.
TooOld,
}
macro_rules! valid_outcome {
($error: expr) => {
return Ok(Outcome {
valid: true,
message: $error,
});
};
}
macro_rules! invalid_outcome {
($error: expr) => {
return Ok(Outcome {
valid: false,
message: $error,
});
};
}
impl AttestationAggregator {
/// Instantiates a new AttestationAggregator with an empty database.
pub fn new() -> Self {
Self {
store: HashMap::new(),
}
}
/// Accepts some `FreeAttestation`, validates it and either aggregates it upon some existing
/// `Attestation` or produces a new `Attestation`.
///
/// The "validation" provided is not complete, instead the following points are checked:
/// - The given `validator_index` is in the committee for the given `shard` for the given
/// `slot`.
/// - The signature is verified against that of the validator at `validator_index`.
pub fn process_free_attestation(
&mut self,
state: &BeaconState,
free_attestation: &FreeAttestation,
spec: &ChainSpec,
) -> Result<Outcome, BeaconStateError> {
let duties =
match state.get_attestation_duties(free_attestation.validator_index as usize, spec) {
Err(BeaconStateError::EpochCacheUninitialized(e)) => {
panic!("Attempted to access unbuilt cache {:?}.", e)
}
Err(BeaconStateError::EpochOutOfBounds) => invalid_outcome!(Message::TooOld),
Err(BeaconStateError::ShardOutOfBounds) => invalid_outcome!(Message::BadShard),
Err(e) => return Err(e),
Ok(None) => invalid_outcome!(Message::BadValidatorIndex),
Ok(Some(attestation_duties)) => attestation_duties,
};
if free_attestation.data.slot != duties.slot {
invalid_outcome!(Message::BadSlot);
}
if free_attestation.data.shard != duties.shard {
invalid_outcome!(Message::BadShard);
}
let signable_message = AttestationDataAndCustodyBit {
data: free_attestation.data.clone(),
custody_bit: PHASE_0_CUSTODY_BIT,
}
.hash_tree_root();
let validator_record = match state
.validator_registry
.get(free_attestation.validator_index as usize)
{
None => invalid_outcome!(Message::BadValidatorIndex),
Some(validator_record) => validator_record,
};
if !free_attestation.signature.verify(
&signable_message,
spec.get_domain(state.current_epoch(spec), Domain::Attestation, &state.fork),
&validator_record.pubkey,
) {
invalid_outcome!(Message::BadSignature);
}
if let Some(existing_attestation) = self.store.get(&signable_message) {
if let Some(updated_attestation) = aggregate_attestation(
existing_attestation,
&free_attestation.signature,
duties.committee_index as usize,
) {
self.store.insert(signable_message, updated_attestation);
valid_outcome!(Message::Aggregated);
} else {
valid_outcome!(Message::AggregationNotRequired);
}
} else {
let mut aggregate_signature = AggregateSignature::new();
aggregate_signature.add(&free_attestation.signature);
let mut aggregation_bitfield = Bitfield::new();
aggregation_bitfield.set(duties.committee_index as usize, true);
let new_attestation = Attestation {
data: free_attestation.data.clone(),
aggregation_bitfield,
custody_bitfield: Bitfield::new(),
aggregate_signature,
};
self.store.insert(signable_message, new_attestation);
valid_outcome!(Message::NewAttestationCreated);
}
}
/// Returns all known attestations which are:
///
/// - Valid for the given state
/// - Not already in `state.latest_attestations`.
pub fn get_attestations_for_state(
&self,
state: &BeaconState,
spec: &ChainSpec,
) -> Vec<Attestation> {
let mut known_attestation_data: HashSet<AttestationData> = HashSet::new();
state
.previous_epoch_attestations
.iter()
.chain(state.current_epoch_attestations.iter())
.for_each(|attestation| {
known_attestation_data.insert(attestation.data.clone());
});
self.store
.values()
.filter_map(|attestation| {
if validate_attestation_without_signature(&state, attestation, spec).is_ok()
&& !known_attestation_data.contains(&attestation.data)
{
Some(attestation.clone())
} else {
None
}
})
.collect()
}
}
/// Produces a new `Attestation` where:
///
/// - `signature` is added to `Attestation.aggregate_signature`
/// - Attestation.aggregation_bitfield[committee_index]` is set to true.
fn aggregate_attestation(
existing_attestation: &Attestation,
signature: &Signature,
committee_index: usize,
) -> Option<Attestation> {
let already_signed = existing_attestation
.aggregation_bitfield
.get(committee_index)
.unwrap_or(false);
if already_signed {
None
} else {
let mut aggregation_bitfield = existing_attestation.aggregation_bitfield.clone();
aggregation_bitfield.set(committee_index, true);
let mut aggregate_signature = existing_attestation.aggregate_signature.clone();
aggregate_signature.add(&signature);
Some(Attestation {
aggregation_bitfield,
aggregate_signature,
..existing_attestation.clone()
})
}
}

View File

@ -1,4 +1,3 @@
use crate::attestation_aggregator::{AttestationAggregator, Outcome as AggregationOutcome};
use crate::checkpoint::CheckPoint; use crate::checkpoint::CheckPoint;
use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::errors::{BeaconChainError as Error, BlockProductionError};
use db::{ use db::{
@ -7,9 +6,15 @@ use db::{
}; };
use fork_choice::{ForkChoice, ForkChoiceError}; use fork_choice::{ForkChoice, ForkChoiceError};
use log::{debug, trace}; use log::{debug, trace};
use operation_pool::DepositInsertStatus;
use operation_pool::OperationPool;
use parking_lot::{RwLock, RwLockReadGuard}; use parking_lot::{RwLock, RwLockReadGuard};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use ssz::ssz_encode; use ssz::ssz_encode;
use state_processing::per_block_processing::errors::{
AttestationValidationError, AttesterSlashingValidationError, DepositValidationError,
ExitValidationError, ProposerSlashingValidationError, TransferValidationError,
};
use state_processing::{ use state_processing::{
per_block_processing, per_block_processing_without_verifying_block_signature, per_block_processing, per_block_processing_without_verifying_block_signature,
per_slot_processing, BlockProcessingError, SlotProcessingError, per_slot_processing, BlockProcessingError, SlotProcessingError,
@ -26,7 +31,10 @@ pub enum ValidBlock {
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum InvalidBlock { pub enum InvalidBlock {
/// The block slot is greater than the present slot. /// The block slot is greater than the present slot.
FutureSlot, FutureSlot {
present_slot: Slot,
block_slot: Slot,
},
/// The block state_root does not match the generated state. /// The block state_root does not match the generated state.
StateRootMismatch, StateRootMismatch,
/// The blocks parent_root is unknown. /// The blocks parent_root is unknown.
@ -46,16 +54,40 @@ pub enum BlockProcessingOutcome {
InvalidBlock(InvalidBlock), InvalidBlock(InvalidBlock),
} }
impl BlockProcessingOutcome {
/// Returns `true` if the block was objectively invalid and we should disregard the peer who
/// sent it.
pub fn is_invalid(&self) -> bool {
match self {
BlockProcessingOutcome::ValidBlock(_) => false,
BlockProcessingOutcome::InvalidBlock(r) => match r {
InvalidBlock::FutureSlot { .. } => true,
InvalidBlock::StateRootMismatch => true,
InvalidBlock::ParentUnknown => false,
InvalidBlock::SlotProcessingError(_) => false,
InvalidBlock::PerBlockProcessingError(e) => match e {
BlockProcessingError::Invalid(_) => true,
BlockProcessingError::BeaconStateError(_) => false,
},
},
}
}
/// Returns `true` if the block was successfully processed and can be removed from any import
/// queues or temporary storage.
pub fn sucessfully_processed(&self) -> bool {
match self {
BlockProcessingOutcome::ValidBlock(_) => true,
_ => false,
}
}
}
pub struct BeaconChain<T: ClientDB + Sized, U: SlotClock, F: ForkChoice> { pub struct BeaconChain<T: ClientDB + Sized, U: SlotClock, F: ForkChoice> {
pub block_store: Arc<BeaconBlockStore<T>>, pub block_store: Arc<BeaconBlockStore<T>>,
pub state_store: Arc<BeaconStateStore<T>>, pub state_store: Arc<BeaconStateStore<T>>,
pub slot_clock: U, pub slot_clock: U,
pub attestation_aggregator: RwLock<AttestationAggregator>, pub op_pool: OperationPool,
pub deposits_for_inclusion: RwLock<Vec<Deposit>>,
pub exits_for_inclusion: RwLock<Vec<VoluntaryExit>>,
pub transfers_for_inclusion: RwLock<Vec<Transfer>>,
pub proposer_slashings_for_inclusion: RwLock<Vec<ProposerSlashing>>,
pub attester_slashings_for_inclusion: RwLock<Vec<AttesterSlashing>>,
canonical_head: RwLock<CheckPoint>, canonical_head: RwLock<CheckPoint>,
finalized_head: RwLock<CheckPoint>, finalized_head: RwLock<CheckPoint>,
pub state: RwLock<BeaconState>, pub state: RwLock<BeaconState>,
@ -97,23 +129,14 @@ where
genesis_state.clone(), genesis_state.clone(),
state_root, state_root,
)); ));
let attestation_aggregator = RwLock::new(AttestationAggregator::new());
genesis_state.build_epoch_cache(RelativeEpoch::Previous, &spec)?; genesis_state.build_all_caches(&spec)?;
genesis_state.build_epoch_cache(RelativeEpoch::Current, &spec)?;
genesis_state.build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, &spec)?;
genesis_state.build_epoch_cache(RelativeEpoch::NextWithRegistryChange, &spec)?;
Ok(Self { Ok(Self {
block_store, block_store,
state_store, state_store,
slot_clock, slot_clock,
attestation_aggregator, op_pool: OperationPool::new(),
deposits_for_inclusion: RwLock::new(vec![]),
exits_for_inclusion: RwLock::new(vec![]),
transfers_for_inclusion: RwLock::new(vec![]),
proposer_slashings_for_inclusion: RwLock::new(vec![]),
attester_slashings_for_inclusion: RwLock::new(vec![]),
state: RwLock::new(genesis_state), state: RwLock::new(genesis_state),
finalized_head, finalized_head,
canonical_head, canonical_head,
@ -122,6 +145,126 @@ where
}) })
} }
/// Returns the beacon block body for each beacon block root in `roots`.
///
/// Fails if any root in `roots` does not have a corresponding block.
pub fn get_block_bodies(&self, roots: &[Hash256]) -> Result<Vec<BeaconBlockBody>, Error> {
let bodies: Result<Vec<BeaconBlockBody>, _> = roots
.iter()
.map(|root| match self.get_block(root)? {
Some(block) => Ok(block.body),
None => Err(Error::DBInconsistent("Missing block".into())),
})
.collect();
Ok(bodies?)
}
/// Returns the beacon block header for each beacon block root in `roots`.
///
/// Fails if any root in `roots` does not have a corresponding block.
pub fn get_block_headers(&self, roots: &[Hash256]) -> Result<Vec<BeaconBlockHeader>, Error> {
let headers: Result<Vec<BeaconBlockHeader>, _> = roots
.iter()
.map(|root| match self.get_block(root)? {
Some(block) => Ok(block.block_header()),
None => Err(Error::DBInconsistent("Missing block".into())),
})
.collect();
Ok(headers?)
}
/// Returns `count `beacon block roots, starting from `start_slot` with an
/// interval of `skip` slots between each root.
///
/// ## Errors:
///
/// - `SlotOutOfBounds`: Unable to return the full specified range.
/// - `SlotOutOfBounds`: Unable to load a state from the DB.
/// - `SlotOutOfBounds`: Start slot is higher than the first slot.
/// - Other: BeaconState` is inconsistent.
pub fn get_block_roots(
&self,
earliest_slot: Slot,
count: usize,
skip: usize,
) -> Result<Vec<Hash256>, Error> {
let spec = &self.spec;
let step_by = Slot::from(skip + 1);
let mut roots: Vec<Hash256> = vec![];
// The state for reading block roots. Will be updated with an older state if slots go too
// far back in history.
let mut state = self.state.read().clone();
// The final slot in this series, will be reduced by `skip` each loop iteration.
let mut slot = earliest_slot + Slot::from(count * (skip + 1)) - 1;
// If the highest slot requested is that of the current state insert the root of the
// head block, unless the head block's slot is not matching.
if slot == state.slot && self.head().beacon_block.slot == slot {
roots.push(self.head().beacon_block_root);
slot -= step_by;
} else if slot >= state.slot {
return Err(BeaconStateError::SlotOutOfBounds.into());
}
loop {
// If the slot is within the range of the current state's block roots, append the root
// to the output vec.
//
// If we get `SlotOutOfBounds` error, load the oldest available historic
// state from the DB.
match state.get_block_root(slot, spec) {
Ok(root) => {
if slot < earliest_slot {
break;
} else {
roots.push(*root);
slot -= step_by;
}
}
Err(BeaconStateError::SlotOutOfBounds) => {
// Read the earliest historic state in the current slot.
let earliest_historic_slot =
state.slot - Slot::from(spec.slots_per_historical_root);
// Load the earlier state from disk.
let new_state_root = state.get_state_root(earliest_historic_slot, spec)?;
// Break if the DB is unable to load the state.
state = match self.state_store.get_deserialized(&new_state_root) {
Ok(Some(state)) => state,
_ => break,
}
}
Err(e) => return Err(e.into()),
};
}
// Return the results if they pass a sanity check.
if (slot <= earliest_slot) && (roots.len() == count) {
// Reverse the ordering of the roots. We extracted them in reverse order to make it
// simpler to lookup historic states.
//
// This is a potential optimisation target.
Ok(roots.iter().rev().cloned().collect())
} else {
Err(BeaconStateError::SlotOutOfBounds.into())
}
}
/// Returns the block at the given root, if any.
///
/// ## Errors
///
/// May return a database error.
pub fn get_block(&self, block_root: &Hash256) -> Result<Option<BeaconBlock>, Error> {
Ok(self.block_store.get_deserialized(block_root)?)
}
/// Update the canonical head to some new values. /// Update the canonical head to some new values.
pub fn update_canonical_head( pub fn update_canonical_head(
&self, &self,
@ -147,12 +290,72 @@ where
/// fork-choice rule). /// fork-choice rule).
/// ///
/// It is important to note that the `beacon_state` returned may not match the present slot. It /// It is important to note that the `beacon_state` returned may not match the present slot. It
/// is the state as it was when the head block was recieved, which could be some slots prior to /// is the state as it was when the head block was received, which could be some slots prior to
/// now. /// now.
pub fn head(&self) -> RwLockReadGuard<CheckPoint> { pub fn head(&self) -> RwLockReadGuard<CheckPoint> {
self.canonical_head.read() self.canonical_head.read()
} }
/// Updates the canonical `BeaconState` with the supplied state.
///
/// Advances the chain forward to the present slot. This method is better than just setting
/// state and calling `catchup_state` as it will not result in an old state being installed and
/// then having it iteratively updated -- in such a case it's possible for another thread to
/// find the state at an old slot.
pub fn update_state(&self, mut state: BeaconState) -> Result<(), Error> {
let latest_block_header = self.head().beacon_block.block_header();
let present_slot = match self.slot_clock.present_slot() {
Ok(Some(slot)) => slot,
_ => return Err(Error::UnableToReadSlot),
};
// If required, transition the new state to the present slot.
for _ in state.slot.as_u64()..present_slot.as_u64() {
per_slot_processing(&mut state, &latest_block_header, &self.spec)?;
}
state.build_all_caches(&self.spec)?;
*self.state.write() = state;
Ok(())
}
/// Ensures the current canonical `BeaconState` has been transitioned to match the `slot_clock`.
pub fn catchup_state(&self) -> Result<(), Error> {
let latest_block_header = self.head().beacon_block.block_header();
let present_slot = match self.slot_clock.present_slot() {
Ok(Some(slot)) => slot,
_ => return Err(Error::UnableToReadSlot),
};
let mut state = self.state.write();
// If required, transition the new state to the present slot.
for _ in state.slot.as_u64()..present_slot.as_u64() {
// Ensure the next epoch state caches are built in case of an epoch transition.
state.build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, &self.spec)?;
state.build_epoch_cache(RelativeEpoch::NextWithRegistryChange, &self.spec)?;
per_slot_processing(&mut *state, &latest_block_header, &self.spec)?;
}
state.build_all_caches(&self.spec)?;
Ok(())
}
/// Build all of the caches on the current state.
///
/// Ideally this shouldn't be required, however we leave it here for testing.
pub fn ensure_state_caches_are_built(&self) -> Result<(), Error> {
self.state.write().build_all_caches(&self.spec)?;
Ok(())
}
/// Update the justified head to some new values. /// Update the justified head to some new values.
pub fn update_finalized_head( pub fn update_finalized_head(
&self, &self,
@ -176,28 +379,6 @@ where
self.finalized_head.read() self.finalized_head.read()
} }
/// Advance the `self.state` `BeaconState` to the supplied slot.
///
/// This will perform per_slot and per_epoch processing as required.
///
/// The `previous_block_root` will be set to the root of the current head block (as determined
/// by the fork-choice rule).
///
/// It is important to note that this is _not_ the state corresponding to the canonical head
/// block, instead it is that state which may or may not have had additional per slot/epoch
/// processing applied to it.
pub fn advance_state(&self, slot: Slot) -> Result<(), SlotProcessingError> {
let state_slot = self.state.read().slot;
let latest_block_header = self.head().beacon_block.block_header();
for _ in state_slot.as_u64()..slot.as_u64() {
per_slot_processing(&mut *self.state.write(), &latest_block_header, &self.spec)?;
}
Ok(())
}
/// Returns the validator index (if any) for the given public key. /// Returns the validator index (if any) for the given public key.
/// ///
/// Information is retrieved from the present `beacon_state.validator_registry`. /// Information is retrieved from the present `beacon_state.validator_registry`.
@ -232,6 +413,20 @@ where
} }
} }
/// Reads the slot clock (see `self.read_slot_clock()` and returns the number of slots since
/// genesis.
pub fn slots_since_genesis(&self) -> Option<SlotHeight> {
let now = self.read_slot_clock()?;
if now < self.spec.genesis_slot {
None
} else {
Some(SlotHeight::from(
now.as_u64() - self.spec.genesis_slot.as_u64(),
))
}
}
/// Returns slot of the present state. /// Returns slot of the present state.
/// ///
/// This is distinct to `read_slot_clock`, which reads from the actual system clock. If /// This is distinct to `read_slot_clock`, which reads from the actual system clock. If
@ -246,7 +441,10 @@ where
/// Information is read from the present `beacon_state` shuffling, so only information from the /// Information is read from the present `beacon_state` shuffling, so only information from the
/// present and prior epoch is available. /// present and prior epoch is available.
pub fn block_proposer(&self, slot: Slot) -> Result<usize, BeaconStateError> { pub fn block_proposer(&self, slot: Slot) -> Result<usize, BeaconStateError> {
trace!("BeaconChain::block_proposer: slot: {}", slot); self.state
.write()
.build_epoch_cache(RelativeEpoch::Current, &self.spec)?;
let index = self.state.read().get_beacon_proposer_index( let index = self.state.read().get_beacon_proposer_index(
slot, slot,
RelativeEpoch::Current, RelativeEpoch::Current,
@ -281,21 +479,36 @@ where
/// Produce an `AttestationData` that is valid for the present `slot` and given `shard`. /// Produce an `AttestationData` that is valid for the present `slot` and given `shard`.
pub fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, Error> { pub fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, Error> {
trace!("BeaconChain::produce_attestation_data: shard: {}", shard); trace!("BeaconChain::produce_attestation: shard: {}", shard);
let source_epoch = self.state.read().current_justified_epoch; let state = self.state.read();
let source_root = *self.state.read().get_block_root(
source_epoch.start_slot(self.spec.slots_per_epoch),
&self.spec,
)?;
let target_root = *self.state.read().get_block_root( let current_epoch_start_slot = self
self.state .state
.read()
.slot
.epoch(self.spec.slots_per_epoch)
.start_slot(self.spec.slots_per_epoch);
let target_root = if state.slot == current_epoch_start_slot {
// If we're on the first slot of the state's epoch.
if self.head().beacon_block.slot == state.slot {
// If the current head block is from the current slot, use its block root.
self.head().beacon_block_root
} else {
// If the current head block is not from this slot, use the slot from the previous
// epoch.
*self.state.read().get_block_root(
current_epoch_start_slot - self.spec.slots_per_epoch,
&self.spec,
)?
}
} else {
// If we're not on the first slot of the epoch.
*self
.state
.read() .read()
.slot .get_block_root(current_epoch_start_slot, &self.spec)?
.epoch(self.spec.slots_per_epoch) };
.start_slot(self.spec.slots_per_epoch),
&self.spec,
)?;
Ok(AttestationData { Ok(AttestationData {
slot: self.state.read().slot, slot: self.state.read().slot,
@ -303,256 +516,61 @@ where
beacon_block_root: self.head().beacon_block_root, beacon_block_root: self.head().beacon_block_root,
target_root, target_root,
crosslink_data_root: Hash256::zero(), crosslink_data_root: Hash256::zero(),
previous_crosslink: Crosslink { previous_crosslink: state.latest_crosslinks[shard as usize].clone(),
epoch: self.state.read().slot.epoch(self.spec.slots_per_epoch), source_epoch: state.current_justified_epoch,
crosslink_data_root: Hash256::zero(), source_root: state.current_justified_root,
},
source_epoch,
source_root,
}) })
} }
/// Validate a `FreeAttestation` and either: /// Accept a new attestation from the network.
/// ///
/// - Create a new `Attestation`. /// If valid, the attestation is added to the `op_pool` and aggregated with another attestation
/// - Aggregate it to an existing `Attestation`. /// if possible.
pub fn process_free_attestation( pub fn process_attestation(
&self, &self,
free_attestation: FreeAttestation, attestation: Attestation,
) -> Result<AggregationOutcome, Error> { ) -> Result<(), AttestationValidationError> {
let aggregation_outcome = self self.op_pool
.attestation_aggregator .insert_attestation(attestation, &*self.state.read(), &self.spec)
.write()
.process_free_attestation(&self.state.read(), &free_attestation, &self.spec)?;
// return if the attestation is invalid
if !aggregation_outcome.valid {
return Ok(aggregation_outcome);
}
// valid attestation, proceed with fork-choice logic
self.fork_choice.write().add_attestation(
free_attestation.validator_index,
&free_attestation.data.beacon_block_root,
&self.spec,
)?;
Ok(aggregation_outcome)
} }
/// Accept some deposit and queue it for inclusion in an appropriate block. /// Accept some deposit and queue it for inclusion in an appropriate block.
pub fn receive_deposit_for_inclusion(&self, deposit: Deposit) { pub fn process_deposit(
// TODO: deposits are not checked for validity; check them. &self,
// deposit: Deposit,
// https://github.com/sigp/lighthouse/issues/276 ) -> Result<DepositInsertStatus, DepositValidationError> {
self.deposits_for_inclusion.write().push(deposit); self.op_pool
} .insert_deposit(deposit, &*self.state.read(), &self.spec)
/// Return a vec of deposits suitable for inclusion in some block.
pub fn get_deposits_for_block(&self) -> Vec<Deposit> {
// TODO: deposits are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.deposits_for_inclusion.read().clone()
}
/// Takes a list of `Deposits` that were included in recent blocks and removes them from the
/// inclusion queue.
///
/// This ensures that `Deposits` are not included twice in successive blocks.
pub fn set_deposits_as_included(&self, included_deposits: &[Deposit]) {
// TODO: method does not take forks into account; consider this.
//
// https://github.com/sigp/lighthouse/issues/275
let mut indices_to_delete = vec![];
for included in included_deposits {
for (i, for_inclusion) in self.deposits_for_inclusion.read().iter().enumerate() {
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let deposits_for_inclusion = &mut self.deposits_for_inclusion.write();
for i in indices_to_delete {
deposits_for_inclusion.remove(i);
}
} }
/// Accept some exit and queue it for inclusion in an appropriate block. /// Accept some exit and queue it for inclusion in an appropriate block.
pub fn receive_exit_for_inclusion(&self, exit: VoluntaryExit) { pub fn process_voluntary_exit(&self, exit: VoluntaryExit) -> Result<(), ExitValidationError> {
// TODO: exits are not checked for validity; check them. self.op_pool
// .insert_voluntary_exit(exit, &*self.state.read(), &self.spec)
// https://github.com/sigp/lighthouse/issues/276
self.exits_for_inclusion.write().push(exit);
}
/// Return a vec of exits suitable for inclusion in some block.
pub fn get_exits_for_block(&self) -> Vec<VoluntaryExit> {
// TODO: exits are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.exits_for_inclusion.read().clone()
}
/// Takes a list of `Deposits` that were included in recent blocks and removes them from the
/// inclusion queue.
///
/// This ensures that `Deposits` are not included twice in successive blocks.
pub fn set_exits_as_included(&self, included_exits: &[VoluntaryExit]) {
// TODO: method does not take forks into account; consider this.
let mut indices_to_delete = vec![];
for included in included_exits {
for (i, for_inclusion) in self.exits_for_inclusion.read().iter().enumerate() {
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let exits_for_inclusion = &mut self.exits_for_inclusion.write();
for i in indices_to_delete {
exits_for_inclusion.remove(i);
}
} }
/// Accept some transfer and queue it for inclusion in an appropriate block. /// Accept some transfer and queue it for inclusion in an appropriate block.
pub fn receive_transfer_for_inclusion(&self, transfer: Transfer) { pub fn process_transfer(&self, transfer: Transfer) -> Result<(), TransferValidationError> {
// TODO: transfers are not checked for validity; check them. self.op_pool
// .insert_transfer(transfer, &*self.state.read(), &self.spec)
// https://github.com/sigp/lighthouse/issues/276
self.transfers_for_inclusion.write().push(transfer);
}
/// Return a vec of transfers suitable for inclusion in some block.
pub fn get_transfers_for_block(&self) -> Vec<Transfer> {
// TODO: transfers are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.transfers_for_inclusion.read().clone()
}
/// Takes a list of `Deposits` that were included in recent blocks and removes them from the
/// inclusion queue.
///
/// This ensures that `Deposits` are not included twice in successive blocks.
pub fn set_transfers_as_included(&self, included_transfers: &[Transfer]) {
// TODO: method does not take forks into account; consider this.
let mut indices_to_delete = vec![];
for included in included_transfers {
for (i, for_inclusion) in self.transfers_for_inclusion.read().iter().enumerate() {
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let transfers_for_inclusion = &mut self.transfers_for_inclusion.write();
for i in indices_to_delete {
transfers_for_inclusion.remove(i);
}
} }
/// Accept some proposer slashing and queue it for inclusion in an appropriate block. /// Accept some proposer slashing and queue it for inclusion in an appropriate block.
pub fn receive_proposer_slashing_for_inclusion(&self, proposer_slashing: ProposerSlashing) { pub fn process_proposer_slashing(
// TODO: proposer_slashings are not checked for validity; check them.
//
// https://github.com/sigp/lighthouse/issues/276
self.proposer_slashings_for_inclusion
.write()
.push(proposer_slashing);
}
/// Return a vec of proposer slashings suitable for inclusion in some block.
pub fn get_proposer_slashings_for_block(&self) -> Vec<ProposerSlashing> {
// TODO: proposer_slashings are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.proposer_slashings_for_inclusion.read().clone()
}
/// Takes a list of `ProposerSlashings` that were included in recent blocks and removes them
/// from the inclusion queue.
///
/// This ensures that `ProposerSlashings` are not included twice in successive blocks.
pub fn set_proposer_slashings_as_included(
&self, &self,
included_proposer_slashings: &[ProposerSlashing], proposer_slashing: ProposerSlashing,
) { ) -> Result<(), ProposerSlashingValidationError> {
// TODO: method does not take forks into account; consider this. self.op_pool
// .insert_proposer_slashing(proposer_slashing, &*self.state.read(), &self.spec)
// https://github.com/sigp/lighthouse/issues/275
let mut indices_to_delete = vec![];
for included in included_proposer_slashings {
for (i, for_inclusion) in self
.proposer_slashings_for_inclusion
.read()
.iter()
.enumerate()
{
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let proposer_slashings_for_inclusion = &mut self.proposer_slashings_for_inclusion.write();
for i in indices_to_delete {
proposer_slashings_for_inclusion.remove(i);
}
} }
/// Accept some attester slashing and queue it for inclusion in an appropriate block. /// Accept some attester slashing and queue it for inclusion in an appropriate block.
pub fn receive_attester_slashing_for_inclusion(&self, attester_slashing: AttesterSlashing) { pub fn process_attester_slashing(
// TODO: attester_slashings are not checked for validity; check them.
//
// https://github.com/sigp/lighthouse/issues/276
self.attester_slashings_for_inclusion
.write()
.push(attester_slashing);
}
/// Return a vec of attester slashings suitable for inclusion in some block.
pub fn get_attester_slashings_for_block(&self) -> Vec<AttesterSlashing> {
// TODO: attester_slashings are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.attester_slashings_for_inclusion.read().clone()
}
/// Takes a list of `AttesterSlashings` that were included in recent blocks and removes them
/// from the inclusion queue.
///
/// This ensures that `AttesterSlashings` are not included twice in successive blocks.
pub fn set_attester_slashings_as_included(
&self, &self,
included_attester_slashings: &[AttesterSlashing], attester_slashing: AttesterSlashing,
) { ) -> Result<(), AttesterSlashingValidationError> {
// TODO: method does not take forks into account; consider this. self.op_pool
// .insert_attester_slashing(attester_slashing, &*self.state.read(), &self.spec)
// https://github.com/sigp/lighthouse/issues/275
let mut indices_to_delete = vec![];
for included in included_attester_slashings {
for (i, for_inclusion) in self
.attester_slashings_for_inclusion
.read()
.iter()
.enumerate()
{
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let attester_slashings_for_inclusion = &mut self.attester_slashings_for_inclusion.write();
for i in indices_to_delete {
attester_slashings_for_inclusion.remove(i);
}
} }
/// Accept some block and attempt to add it to block DAG. /// Accept some block and attempt to add it to block DAG.
@ -567,7 +585,10 @@ where
if block.slot > present_slot { if block.slot > present_slot {
return Ok(BlockProcessingOutcome::InvalidBlock( return Ok(BlockProcessingOutcome::InvalidBlock(
InvalidBlock::FutureSlot, InvalidBlock::FutureSlot {
present_slot,
block_slot: block.slot,
},
)); ));
} }
@ -594,10 +615,10 @@ where
// TODO: check the block proposer signature BEFORE doing a state transition. This will // TODO: check the block proposer signature BEFORE doing a state transition. This will
// significantly lower exposure surface to DoS attacks. // significantly lower exposure surface to DoS attacks.
// Transition the parent state to the present slot. // Transition the parent state to the block slot.
let mut state = parent_state; let mut state = parent_state;
let previous_block_header = parent_block.block_header(); let previous_block_header = parent_block.block_header();
for _ in state.slot.as_u64()..present_slot.as_u64() { for _ in state.slot.as_u64()..block.slot.as_u64() {
if let Err(e) = per_slot_processing(&mut state, &previous_block_header, &self.spec) { if let Err(e) = per_slot_processing(&mut state, &previous_block_header, &self.spec) {
return Ok(BlockProcessingOutcome::InvalidBlock( return Ok(BlockProcessingOutcome::InvalidBlock(
InvalidBlock::SlotProcessingError(e), InvalidBlock::SlotProcessingError(e),
@ -625,13 +646,6 @@ where
self.block_store.put(&block_root, &ssz_encode(&block)[..])?; self.block_store.put(&block_root, &ssz_encode(&block)[..])?;
self.state_store.put(&state_root, &ssz_encode(&state)[..])?; self.state_store.put(&state_root, &ssz_encode(&state)[..])?;
// Update the inclusion queues so they aren't re-submitted.
self.set_deposits_as_included(&block.body.deposits[..]);
self.set_transfers_as_included(&block.body.transfers[..]);
self.set_exits_as_included(&block.body.voluntary_exits[..]);
self.set_proposer_slashings_as_included(&block.body.proposer_slashings[..]);
self.set_attester_slashings_as_included(&block.body.attester_slashings[..]);
// run the fork_choice add_block logic // run the fork_choice add_block logic
self.fork_choice self.fork_choice
.write() .write()
@ -643,8 +657,9 @@ where
// run instead. // run instead.
if self.head().beacon_block_root == parent_block_root { if self.head().beacon_block_root == parent_block_root {
self.update_canonical_head(block.clone(), block_root, state.clone(), state_root); self.update_canonical_head(block.clone(), block_root, state.clone(), state_root);
// Update the local state variable.
*self.state.write() = state; // Update the canonical `BeaconState`.
self.update_state(state)?;
} }
Ok(BlockProcessingOutcome::ValidBlock(ValidBlock::Processed)) Ok(BlockProcessingOutcome::ValidBlock(ValidBlock::Processed))
@ -662,22 +677,17 @@ where
let mut state = self.state.read().clone(); let mut state = self.state.read().clone();
state.build_epoch_cache(RelativeEpoch::Current, &self.spec)?;
trace!("Finding attestations for new block..."); trace!("Finding attestations for new block...");
let attestations = self
.attestation_aggregator
.read()
.get_attestations_for_state(&state, &self.spec);
trace!(
"Inserting {} attestation(s) into new block.",
attestations.len()
);
let previous_block_root = *state let previous_block_root = *state
.get_block_root(state.slot - 1, &self.spec) .get_block_root(state.slot - 1, &self.spec)
.map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?; .map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?;
let (proposer_slashings, attester_slashings) =
self.op_pool.get_slashings(&*self.state.read(), &self.spec);
let mut block = BeaconBlock { let mut block = BeaconBlock {
slot: state.slot, slot: state.slot,
previous_block_root, previous_block_root,
@ -690,16 +700,23 @@ where
deposit_root: Hash256::zero(), deposit_root: Hash256::zero(),
block_hash: Hash256::zero(), block_hash: Hash256::zero(),
}, },
proposer_slashings: self.get_proposer_slashings_for_block(), proposer_slashings,
attester_slashings: self.get_attester_slashings_for_block(), attester_slashings,
attestations, attestations: self
deposits: self.get_deposits_for_block(), .op_pool
voluntary_exits: self.get_exits_for_block(), .get_attestations(&*self.state.read(), &self.spec),
transfers: self.get_transfers_for_block(), deposits: self.op_pool.get_deposits(&*self.state.read(), &self.spec),
voluntary_exits: self
.op_pool
.get_voluntary_exits(&*self.state.read(), &self.spec),
transfers: self.op_pool.get_transfers(&*self.state.read(), &self.spec),
}, },
}; };
trace!("BeaconChain::produce_block: updating state for new block.",); debug!(
"Produced block with {} attestations, updating state.",
block.body.attestations.len()
);
per_block_processing_without_verifying_block_signature(&mut state, &block, &self.spec)?; per_block_processing_without_verifying_block_signature(&mut state, &block, &self.spec)?;
@ -732,12 +749,20 @@ where
.ok_or_else(|| Error::MissingBeaconState(block.state_root))?; .ok_or_else(|| Error::MissingBeaconState(block.state_root))?;
let state_root = state.canonical_root(); let state_root = state.canonical_root();
self.update_canonical_head(block, block_root, state, state_root); self.update_canonical_head(block, block_root, state.clone(), state_root);
// Update the canonical `BeaconState`.
self.update_state(state)?;
} }
Ok(()) Ok(())
} }
/// Returns `true` if the given block root has not been processed.
pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, Error> {
Ok(!self.block_store.exists(beacon_block_root)?)
}
/// Dumps the entire canonical chain, from the head to genesis to a vector for analysis. /// Dumps the entire canonical chain, from the head to genesis to a vector for analysis.
/// ///
/// This could be a very expensive operation and should only be done in testing/analysis /// This could be a very expensive operation and should only be done in testing/analysis

View File

@ -3,7 +3,7 @@ use types::{BeaconBlock, BeaconState, Hash256};
/// Represents some block and it's associated state. Generally, this will be used for tracking the /// Represents some block and it's associated state. Generally, this will be used for tracking the
/// head, justified head and finalized head. /// head, justified head and finalized head.
#[derive(Clone, Serialize)] #[derive(Clone, Serialize, PartialEq, Debug)]
pub struct CheckPoint { pub struct CheckPoint {
pub beacon_block: BeaconBlock, pub beacon_block: BeaconBlock,
pub beacon_block_root: Hash256, pub beacon_block_root: Hash256,

View File

@ -1,5 +1,6 @@
use fork_choice::ForkChoiceError; use fork_choice::ForkChoiceError;
use state_processing::BlockProcessingError; use state_processing::BlockProcessingError;
use state_processing::SlotProcessingError;
use types::*; use types::*;
macro_rules! easy_from_to { macro_rules! easy_from_to {
@ -16,18 +17,24 @@ macro_rules! easy_from_to {
pub enum BeaconChainError { pub enum BeaconChainError {
InsufficientValidators, InsufficientValidators,
BadRecentBlockRoots, BadRecentBlockRoots,
UnableToReadSlot,
BeaconStateError(BeaconStateError), BeaconStateError(BeaconStateError),
DBInconsistent(String), DBInconsistent(String),
DBError(String), DBError(String),
ForkChoiceError(ForkChoiceError), ForkChoiceError(ForkChoiceError),
MissingBeaconBlock(Hash256), MissingBeaconBlock(Hash256),
MissingBeaconState(Hash256), MissingBeaconState(Hash256),
SlotProcessingError(SlotProcessingError),
} }
easy_from_to!(SlotProcessingError, BeaconChainError);
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum BlockProductionError { pub enum BlockProductionError {
UnableToGetBlockRootFromState, UnableToGetBlockRootFromState,
BlockProcessingError(BlockProcessingError), BlockProcessingError(BlockProcessingError),
BeaconStateError(BeaconStateError),
} }
easy_from_to!(BlockProcessingError, BlockProductionError); easy_from_to!(BlockProcessingError, BlockProductionError);
easy_from_to!(BeaconStateError, BlockProductionError);

View File

@ -28,15 +28,19 @@ pub fn initialise_beacon_chain(
let block_store = Arc::new(BeaconBlockStore::new(db.clone())); let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
let state_store = Arc::new(BeaconStateStore::new(db.clone())); let state_store = Arc::new(BeaconStateStore::new(db.clone()));
let state_builder = TestingBeaconStateBuilder::from_deterministic_keypairs(8, &spec); let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(8, &spec);
let (genesis_state, _keypairs) = state_builder.build(); let (genesis_state, _keypairs) = state_builder.build();
let mut genesis_block = BeaconBlock::empty(&spec); let mut genesis_block = BeaconBlock::empty(&spec);
genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root());
// Slot clock // Slot clock
let slot_clock = SystemTimeSlotClock::new(genesis_state.genesis_time, spec.seconds_per_slot) let slot_clock = SystemTimeSlotClock::new(
.expect("Unable to load SystemTimeSlotClock"); spec.genesis_slot,
genesis_state.genesis_time,
spec.seconds_per_slot,
)
.expect("Unable to load SystemTimeSlotClock");
// Choose the fork choice // Choose the fork choice
let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone());
@ -65,15 +69,19 @@ pub fn initialise_test_beacon_chain(
let block_store = Arc::new(BeaconBlockStore::new(db.clone())); let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
let state_store = Arc::new(BeaconStateStore::new(db.clone())); let state_store = Arc::new(BeaconStateStore::new(db.clone()));
let state_builder = TestingBeaconStateBuilder::from_deterministic_keypairs(8, spec); let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(8, spec);
let (genesis_state, _keypairs) = state_builder.build(); let (genesis_state, _keypairs) = state_builder.build();
let mut genesis_block = BeaconBlock::empty(spec); let mut genesis_block = BeaconBlock::empty(spec);
genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root());
// Slot clock // Slot clock
let slot_clock = SystemTimeSlotClock::new(genesis_state.genesis_time, spec.seconds_per_slot) let slot_clock = SystemTimeSlotClock::new(
.expect("Unable to load SystemTimeSlotClock"); spec.genesis_slot,
genesis_state.genesis_time,
spec.seconds_per_slot,
)
.expect("Unable to load SystemTimeSlotClock");
// Choose the fork choice // Choose the fork choice
let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone());

View File

@ -1,14 +1,18 @@
mod attestation_aggregator;
mod beacon_chain; mod beacon_chain;
mod checkpoint; mod checkpoint;
mod errors; mod errors;
pub mod initialise; pub mod initialise;
pub mod test_utils;
pub use self::beacon_chain::{BeaconChain, BlockProcessingOutcome, InvalidBlock, ValidBlock}; pub use self::beacon_chain::{BeaconChain, BlockProcessingOutcome, InvalidBlock, ValidBlock};
pub use self::checkpoint::CheckPoint; pub use self::checkpoint::CheckPoint;
pub use self::errors::BeaconChainError; pub use self::errors::{BeaconChainError, BlockProductionError};
pub use db; pub use db;
pub use fork_choice; pub use fork_choice;
pub use parking_lot; pub use parking_lot;
pub use slot_clock; pub use slot_clock;
pub use state_processing::per_block_processing::errors::{
AttestationValidationError, AttesterSlashingValidationError, DepositValidationError,
ExitValidationError, ProposerSlashingValidationError, TransferValidationError,
};
pub use types; pub use types;

View File

@ -0,0 +1,3 @@
mod testing_beacon_chain_builder;
pub use testing_beacon_chain_builder::TestingBeaconChainBuilder;

View File

@ -0,0 +1,50 @@
pub use crate::{BeaconChain, BeaconChainError, CheckPoint};
use db::{
stores::{BeaconBlockStore, BeaconStateStore},
MemoryDB,
};
use fork_choice::BitwiseLMDGhost;
use slot_clock::TestingSlotClock;
use ssz::TreeHash;
use std::sync::Arc;
use types::test_utils::TestingBeaconStateBuilder;
use types::*;
type TestingBeaconChain = BeaconChain<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>;
pub struct TestingBeaconChainBuilder {
state_builder: TestingBeaconStateBuilder,
}
impl TestingBeaconChainBuilder {
pub fn build(self, spec: &ChainSpec) -> TestingBeaconChain {
let db = Arc::new(MemoryDB::open());
let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
let state_store = Arc::new(BeaconStateStore::new(db.clone()));
let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64());
let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone());
let (genesis_state, _keypairs) = self.state_builder.build();
let mut genesis_block = BeaconBlock::empty(&spec);
genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root());
// Create the Beacon Chain
BeaconChain::from_genesis(
state_store.clone(),
block_store.clone(),
slot_clock,
genesis_state,
genesis_block,
spec.clone(),
fork_choice,
)
.unwrap()
}
}
impl From<TestingBeaconStateBuilder> for TestingBeaconChainBuilder {
fn from(state_builder: TestingBeaconStateBuilder) -> TestingBeaconChainBuilder {
TestingBeaconChainBuilder { state_builder }
}
}

View File

@ -47,6 +47,9 @@ test_cases:
states: states:
- slot: 63 - slot: 63
num_validators: 1003 num_validators: 1003
num_previous_epoch_attestations: 0
# slots_per_epoch - attestation_inclusion_delay - skip_slots
num_current_epoch_attestations: 57
slashed_validators: [11, 12, 13, 14, 42] slashed_validators: [11, 12, 13, 14, 42]
exited_validators: [] exited_validators: []
exit_initiated_validators: [50] exit_initiated_validators: [50]

View File

@ -10,11 +10,11 @@ use log::debug;
use rayon::prelude::*; use rayon::prelude::*;
use slot_clock::TestingSlotClock; use slot_clock::TestingSlotClock;
use ssz::TreeHash; use ssz::TreeHash;
use std::collections::HashSet;
use std::iter::FromIterator;
use std::sync::Arc; use std::sync::Arc;
use types::{test_utils::TestingBeaconStateBuilder, *}; use types::{test_utils::TestingBeaconStateBuilder, *};
type TestingBeaconChain = BeaconChain<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>;
/// The beacon chain harness simulates a single beacon node with `validator_count` validators connected /// The beacon chain harness simulates a single beacon node with `validator_count` validators connected
/// to it. Each validator is provided a borrow to the beacon chain, where it may read /// to it. Each validator is provided a borrow to the beacon chain, where it may read
/// information and submit blocks/attestations for processing. /// information and submit blocks/attestations for processing.
@ -23,7 +23,7 @@ use types::{test_utils::TestingBeaconStateBuilder, *};
/// is not useful for testing that multiple beacon nodes can reach consensus. /// is not useful for testing that multiple beacon nodes can reach consensus.
pub struct BeaconChainHarness { pub struct BeaconChainHarness {
pub db: Arc<MemoryDB>, pub db: Arc<MemoryDB>,
pub beacon_chain: Arc<BeaconChain<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>>, pub beacon_chain: Arc<TestingBeaconChain>,
pub block_store: Arc<BeaconBlockStore<MemoryDB>>, pub block_store: Arc<BeaconBlockStore<MemoryDB>>,
pub state_store: Arc<BeaconStateStore<MemoryDB>>, pub state_store: Arc<BeaconStateStore<MemoryDB>>,
pub validators: Vec<ValidatorHarness>, pub validators: Vec<ValidatorHarness>,
@ -36,19 +36,39 @@ impl BeaconChainHarness {
/// - A keypair, `BlockProducer` and `Attester` for each validator. /// - A keypair, `BlockProducer` and `Attester` for each validator.
/// - A new BeaconChain struct where the given validators are in the genesis. /// - A new BeaconChain struct where the given validators are in the genesis.
pub fn new(spec: ChainSpec, validator_count: usize) -> Self { pub fn new(spec: ChainSpec, validator_count: usize) -> Self {
let state_builder =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec);
Self::from_beacon_state_builder(state_builder, spec)
}
pub fn from_beacon_state_builder(
state_builder: TestingBeaconStateBuilder,
spec: ChainSpec,
) -> Self {
let db = Arc::new(MemoryDB::open()); let db = Arc::new(MemoryDB::open());
let block_store = Arc::new(BeaconBlockStore::new(db.clone())); let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
let state_store = Arc::new(BeaconStateStore::new(db.clone())); let state_store = Arc::new(BeaconStateStore::new(db.clone()));
let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64()); let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64());
let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone());
let state_builder = let (mut genesis_state, keypairs) = state_builder.build();
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec);
let (genesis_state, keypairs) = state_builder.build();
let mut genesis_block = BeaconBlock::empty(&spec); let mut genesis_block = BeaconBlock::empty(&spec);
genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root());
genesis_state
.build_epoch_cache(RelativeEpoch::Previous, &spec)
.unwrap();
genesis_state
.build_epoch_cache(RelativeEpoch::Current, &spec)
.unwrap();
genesis_state
.build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, &spec)
.unwrap();
genesis_state
.build_epoch_cache(RelativeEpoch::NextWithRegistryChange, &spec)
.unwrap();
// Create the Beacon Chain // Create the Beacon Chain
let beacon_chain = Arc::new( let beacon_chain = Arc::new(
BeaconChain::from_genesis( BeaconChain::from_genesis(
@ -109,55 +129,70 @@ impl BeaconChainHarness {
); );
self.beacon_chain.slot_clock.set_slot(slot.as_u64()); self.beacon_chain.slot_clock.set_slot(slot.as_u64());
self.beacon_chain.advance_state(slot).unwrap(); self.beacon_chain
.catchup_state()
.expect("Failed to catch state");
slot slot
} }
/// Gather the `FreeAttestation`s from the valiators. pub fn gather_attesations(&mut self) -> Vec<Attestation> {
///
/// Note: validators will only produce attestations _once per slot_. So, if you call this twice
/// you'll only get attestations on the first run.
pub fn gather_free_attesations(&mut self) -> Vec<FreeAttestation> {
let present_slot = self.beacon_chain.present_slot(); let present_slot = self.beacon_chain.present_slot();
let state = self.beacon_chain.state.read();
let attesting_validators = self let mut attestations = vec![];
.beacon_chain
.state for committee in state
.read()
.get_crosslink_committees_at_slot(present_slot, &self.spec) .get_crosslink_committees_at_slot(present_slot, &self.spec)
.unwrap() .unwrap()
.iter() {
.fold(vec![], |mut acc, c| { for &validator in &committee.committee {
acc.append(&mut c.committee.clone()); let duties = state
acc .get_attestation_duties(validator, &self.spec)
}); .unwrap()
let attesting_validators: HashSet<usize> = .expect("Attesting validators by definition have duties");
HashSet::from_iter(attesting_validators.iter().cloned());
let free_attestations: Vec<FreeAttestation> = self // Obtain `AttestationData` from the beacon chain.
.validators let data = self
.par_iter_mut() .beacon_chain
.enumerate() .produce_attestation_data(duties.shard)
.filter_map(|(i, validator)| { .unwrap();
if attesting_validators.contains(&i) {
// Advance the validator slot.
validator.set_slot(present_slot);
// Prompt the validator to produce an attestation (if required). // Produce an aggregate signature with a single signature.
validator.produce_free_attestation().ok() let aggregate_signature = {
} else { let message = AttestationDataAndCustodyBit {
None data: data.clone(),
} custody_bit: false,
}) }
.collect(); .hash_tree_root();
let domain = self.spec.get_domain(
state.slot.epoch(self.spec.slots_per_epoch),
Domain::Attestation,
&state.fork,
);
let sig =
Signature::new(&message, domain, &self.validators[validator].keypair.sk);
debug!( let mut agg_sig = AggregateSignature::new();
"Gathered {} FreeAttestations for slot {}.", agg_sig.add(&sig);
free_attestations.len(),
present_slot
);
free_attestations agg_sig
};
let mut aggregation_bitfield = Bitfield::with_capacity(duties.committee_len);
let custody_bitfield = Bitfield::with_capacity(duties.committee_len);
aggregation_bitfield.set(duties.committee_index, true);
attestations.push(Attestation {
aggregation_bitfield,
data,
custody_bitfield,
aggregate_signature,
})
}
}
attestations
} }
/// Get the block from the proposer for the slot. /// Get the block from the proposer for the slot.
@ -176,6 +211,7 @@ impl BeaconChainHarness {
// Ensure the validators slot clock is accurate. // Ensure the validators slot clock is accurate.
self.validators[proposer].set_slot(present_slot); self.validators[proposer].set_slot(present_slot);
self.validators[proposer].produce_block().unwrap() self.validators[proposer].produce_block().unwrap()
} }
@ -183,33 +219,37 @@ impl BeaconChainHarness {
/// ///
/// This is the ideal scenario for the Beacon Chain, 100% honest participation from /// This is the ideal scenario for the Beacon Chain, 100% honest participation from
/// validators. /// validators.
pub fn advance_chain_with_block(&mut self) { pub fn advance_chain_with_block(&mut self) -> BeaconBlock {
self.increment_beacon_chain_slot(); self.increment_beacon_chain_slot();
// Produce a new block. // Produce a new block.
debug!("Producing block...");
let block = self.produce_block(); let block = self.produce_block();
debug!("Submitting block for processing..."); debug!("Submitting block for processing...");
match self.beacon_chain.process_block(block) { match self.beacon_chain.process_block(block.clone()) {
Ok(BlockProcessingOutcome::ValidBlock(_)) => {} Ok(BlockProcessingOutcome::ValidBlock(_)) => {}
other => panic!("block processing failed with {:?}", other), other => panic!("block processing failed with {:?}", other),
}; };
debug!("...block processed by BeaconChain."); debug!("...block processed by BeaconChain.");
debug!("Producing free attestations..."); debug!("Producing attestations...");
// Produce new attestations. // Produce new attestations.
let free_attestations = self.gather_free_attesations(); let attestations = self.gather_attesations();
debug!("Processing free attestations..."); debug!("Processing {} attestations...", attestations.len());
free_attestations.par_iter().for_each(|free_attestation| { attestations
self.beacon_chain .par_iter()
.process_free_attestation(free_attestation.clone()) .enumerate()
.unwrap(); .for_each(|(i, attestation)| {
}); self.beacon_chain
.process_attestation(attestation.clone())
.unwrap_or_else(|_| panic!("Attestation {} invalid: {:?}", i, attestation));
});
debug!("Free attestations processed."); debug!("Attestations processed.");
block
} }
/// Signs a message using some validators secret key with the `Fork` info from the latest state /// Signs a message using some validators secret key with the `Fork` info from the latest state
@ -260,7 +300,7 @@ impl BeaconChainHarness {
/// If a new `ValidatorHarness` was created, the validator should become fully operational as /// If a new `ValidatorHarness` was created, the validator should become fully operational as
/// if the validator were created during `BeaconChainHarness` instantiation. /// if the validator were created during `BeaconChainHarness` instantiation.
pub fn add_deposit(&mut self, deposit: Deposit, keypair: Option<Keypair>) { pub fn add_deposit(&mut self, deposit: Deposit, keypair: Option<Keypair>) {
self.beacon_chain.receive_deposit_for_inclusion(deposit); self.beacon_chain.process_deposit(deposit).unwrap();
// If a keypair is present, add a new `ValidatorHarness` to the rig. // If a keypair is present, add a new `ValidatorHarness` to the rig.
if let Some(keypair) = keypair { if let Some(keypair) = keypair {
@ -276,24 +316,26 @@ impl BeaconChainHarness {
/// will stop receiving duties from the beacon chain and just do nothing when prompted to /// will stop receiving duties from the beacon chain and just do nothing when prompted to
/// produce/attest. /// produce/attest.
pub fn add_exit(&mut self, exit: VoluntaryExit) { pub fn add_exit(&mut self, exit: VoluntaryExit) {
self.beacon_chain.receive_exit_for_inclusion(exit); self.beacon_chain.process_voluntary_exit(exit).unwrap();
} }
/// Submit an transfer to the `BeaconChain` for inclusion in some block. /// Submit an transfer to the `BeaconChain` for inclusion in some block.
pub fn add_transfer(&mut self, transfer: Transfer) { pub fn add_transfer(&mut self, transfer: Transfer) {
self.beacon_chain.receive_transfer_for_inclusion(transfer); self.beacon_chain.process_transfer(transfer).unwrap();
} }
/// Submit a proposer slashing to the `BeaconChain` for inclusion in some block. /// Submit a proposer slashing to the `BeaconChain` for inclusion in some block.
pub fn add_proposer_slashing(&mut self, proposer_slashing: ProposerSlashing) { pub fn add_proposer_slashing(&mut self, proposer_slashing: ProposerSlashing) {
self.beacon_chain self.beacon_chain
.receive_proposer_slashing_for_inclusion(proposer_slashing); .process_proposer_slashing(proposer_slashing)
.unwrap();
} }
/// Submit an attester slashing to the `BeaconChain` for inclusion in some block. /// Submit an attester slashing to the `BeaconChain` for inclusion in some block.
pub fn add_attester_slashing(&mut self, attester_slashing: AttesterSlashing) { pub fn add_attester_slashing(&mut self, attester_slashing: AttesterSlashing) {
self.beacon_chain self.beacon_chain
.receive_attester_slashing_for_inclusion(attester_slashing); .process_attester_slashing(attester_slashing)
.unwrap();
} }
/// Executes the fork choice rule on the `BeaconChain`, selecting a new canonical head. /// Executes the fork choice rule on the `BeaconChain`, selecting a new canonical head.

View File

@ -16,6 +16,10 @@ pub struct StateCheck {
pub slot: Slot, pub slot: Slot,
/// Checked against `beacon_state.validator_registry.len()`. /// Checked against `beacon_state.validator_registry.len()`.
pub num_validators: Option<usize>, pub num_validators: Option<usize>,
/// The number of pending attestations from the previous epoch that should be in the state.
pub num_previous_epoch_attestations: Option<usize>,
/// The number of pending attestations from the current epoch that should be in the state.
pub num_current_epoch_attestations: Option<usize>,
/// A list of validator indices which have been penalized. Must be in ascending order. /// A list of validator indices which have been penalized. Must be in ascending order.
pub slashed_validators: Option<Vec<u64>>, pub slashed_validators: Option<Vec<u64>>,
/// A list of validator indices which have been fully exited. Must be in ascending order. /// A list of validator indices which have been fully exited. Must be in ascending order.
@ -34,6 +38,8 @@ impl StateCheck {
Self { Self {
slot: Slot::from(as_u64(&yaml, "slot").expect("State must specify slot")), slot: Slot::from(as_u64(&yaml, "slot").expect("State must specify slot")),
num_validators: as_usize(&yaml, "num_validators"), num_validators: as_usize(&yaml, "num_validators"),
num_previous_epoch_attestations: as_usize(&yaml, "num_previous_epoch_attestations"),
num_current_epoch_attestations: as_usize(&yaml, "num_current_epoch_attestations"),
slashed_validators: as_vec_u64(&yaml, "slashed_validators"), slashed_validators: as_vec_u64(&yaml, "slashed_validators"),
exited_validators: as_vec_u64(&yaml, "exited_validators"), exited_validators: as_vec_u64(&yaml, "exited_validators"),
exit_initiated_validators: as_vec_u64(&yaml, "exit_initiated_validators"), exit_initiated_validators: as_vec_u64(&yaml, "exit_initiated_validators"),
@ -46,6 +52,7 @@ impl StateCheck {
/// # Panics /// # Panics
/// ///
/// Panics with an error message if any test fails. /// Panics with an error message if any test fails.
#[allow(clippy::cyclomatic_complexity)]
pub fn assert_valid(&self, state: &BeaconState, spec: &ChainSpec) { pub fn assert_valid(&self, state: &BeaconState, spec: &ChainSpec) {
let state_epoch = state.slot.epoch(spec.slots_per_epoch); let state_epoch = state.slot.epoch(spec.slots_per_epoch);
@ -58,6 +65,7 @@ impl StateCheck {
"State slot is invalid." "State slot is invalid."
); );
// Check the validator count
if let Some(num_validators) = self.num_validators { if let Some(num_validators) = self.num_validators {
assert_eq!( assert_eq!(
state.validator_registry.len(), state.validator_registry.len(),
@ -67,6 +75,26 @@ impl StateCheck {
info!("OK: num_validators = {}.", num_validators); info!("OK: num_validators = {}.", num_validators);
} }
// Check the previous epoch attestations
if let Some(n) = self.num_previous_epoch_attestations {
assert_eq!(
state.previous_epoch_attestations.len(),
n,
"previous epoch attestations count != expected."
);
info!("OK: num_previous_epoch_attestations = {}.", n);
}
// Check the current epoch attestations
if let Some(n) = self.num_current_epoch_attestations {
assert_eq!(
state.current_epoch_attestations.len(),
n,
"current epoch attestations count != expected."
);
info!("OK: num_current_epoch_attestations = {}.", n);
}
// Check for slashed validators. // Check for slashed validators.
if let Some(ref slashed_validators) = self.slashed_validators { if let Some(ref slashed_validators) = self.slashed_validators {
let actually_slashed_validators: Vec<u64> = state let actually_slashed_validators: Vec<u64> = state

View File

@ -14,9 +14,6 @@ use slot_clock::SlotClock;
use std::sync::Arc; use std::sync::Arc;
use types::{AttestationData, BeaconBlock, FreeAttestation, Signature, Slot}; use types::{AttestationData, BeaconBlock, FreeAttestation, Signature, Slot};
// mod attester;
// mod producer;
/// Connect directly to a borrowed `BeaconChain` instance so an attester/producer can request/submit /// Connect directly to a borrowed `BeaconChain` instance so an attester/producer can request/submit
/// blocks/attestations. /// blocks/attestations.
/// ///
@ -42,11 +39,6 @@ impl<T: ClientDB, U: SlotClock, F: ForkChoice> DirectBeaconNode<T, U, F> {
pub fn last_published_block(&self) -> Option<BeaconBlock> { pub fn last_published_block(&self) -> Option<BeaconBlock> {
Some(self.published_blocks.read().last()?.clone()) Some(self.published_blocks.read().last()?.clone())
} }
/// Get the last published attestation (if any).
pub fn last_published_free_attestation(&self) -> Option<FreeAttestation> {
Some(self.published_attestations.read().last()?.clone())
}
} }
impl<T: ClientDB, U: SlotClock, F: ForkChoice> AttesterBeaconNode for DirectBeaconNode<T, U, F> { impl<T: ClientDB, U: SlotClock, F: ForkChoice> AttesterBeaconNode for DirectBeaconNode<T, U, F> {
@ -61,7 +53,7 @@ impl<T: ClientDB, U: SlotClock, F: ForkChoice> AttesterBeaconNode for DirectBeac
} }
} }
fn publish_attestation_data( fn publish_attestation(
&self, &self,
free_attestation: FreeAttestation, free_attestation: FreeAttestation,
) -> Result<AttestationPublishOutcome, NodeError> { ) -> Result<AttestationPublishOutcome, NodeError> {

View File

@ -2,8 +2,7 @@ mod direct_beacon_node;
mod direct_duties; mod direct_duties;
mod local_signer; mod local_signer;
use attester::PollOutcome as AttestationPollOutcome; use attester::Attester;
use attester::{Attester, Error as AttestationPollError};
use beacon_chain::BeaconChain; use beacon_chain::BeaconChain;
use block_proposer::PollOutcome as BlockPollOutcome; use block_proposer::PollOutcome as BlockPollOutcome;
use block_proposer::{BlockProducer, Error as BlockPollError}; use block_proposer::{BlockProducer, Error as BlockPollError};
@ -14,7 +13,7 @@ use fork_choice::BitwiseLMDGhost;
use local_signer::LocalSigner; use local_signer::LocalSigner;
use slot_clock::TestingSlotClock; use slot_clock::TestingSlotClock;
use std::sync::Arc; use std::sync::Arc;
use types::{BeaconBlock, ChainSpec, FreeAttestation, Keypair, Slot}; use types::{BeaconBlock, ChainSpec, Keypair, Slot};
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum BlockProduceError { pub enum BlockProduceError {
@ -22,12 +21,6 @@ pub enum BlockProduceError {
PollError(BlockPollError), PollError(BlockPollError),
} }
#[derive(Debug, PartialEq)]
pub enum AttestationProduceError {
DidNotProduce(AttestationPollOutcome),
PollError(AttestationPollError),
}
type TestingBlockProducer = BlockProducer< type TestingBlockProducer = BlockProducer<
TestingSlotClock, TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>, DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
@ -117,21 +110,6 @@ impl ValidatorHarness {
.expect("Unable to obtain produced block.")) .expect("Unable to obtain produced block."))
} }
/// Run the `poll` function on the `Attester` and produce a `FreeAttestation`.
///
/// An error is returned if the attester refuses to attest.
pub fn produce_free_attestation(&mut self) -> Result<FreeAttestation, AttestationProduceError> {
match self.attester.poll() {
Ok(AttestationPollOutcome::AttestationProduced(_)) => {}
Ok(outcome) => return Err(AttestationProduceError::DidNotProduce(outcome)),
Err(error) => return Err(AttestationProduceError::PollError(error)),
};
Ok(self
.beacon_node
.last_published_free_attestation()
.expect("Unable to obtain produced attestation."))
}
/// Set the validators slot clock to the specified slot. /// Set the validators slot clock to the specified slot.
/// ///
/// The validators slot clock will always read this value until it is set to something else. /// The validators slot clock will always read this value until it is set to something else.

View File

@ -14,6 +14,7 @@ types = { path = "../../eth2/types" }
slot_clock = { path = "../../eth2/utils/slot_clock" } slot_clock = { path = "../../eth2/utils/slot_clock" }
error-chain = "0.12.0" error-chain = "0.12.0"
slog = "^2.2.3" slog = "^2.2.3"
ssz = { path = "../../eth2/utils/ssz" }
tokio = "0.1.15" tokio = "0.1.15"
clap = "2.32.0" clap = "2.32.0"
dirs = "1.0.3" dirs = "1.0.3"

View File

@ -10,6 +10,7 @@ use std::path::PathBuf;
use types::multiaddr::Protocol; use types::multiaddr::Protocol;
use types::multiaddr::ToMultiaddr; use types::multiaddr::ToMultiaddr;
use types::ChainSpec; use types::ChainSpec;
use types::Multiaddr;
/// Stores the client configuration for this Lighthouse instance. /// Stores the client configuration for this Lighthouse instance.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -76,7 +77,7 @@ impl ClientConfig {
} }
// Custom listening address ipv4/ipv6 // Custom listening address ipv4/ipv6
// TODO: Handle list of addresses // TODO: Handle list of addresses
if let Some(listen_address_str) = args.value_of("listen_address") { if let Some(listen_address_str) = args.value_of("listen-address") {
if let Ok(listen_address) = listen_address_str.parse::<IpAddr>() { if let Ok(listen_address) = listen_address_str.parse::<IpAddr>() {
let multiaddr = SocketAddr::new(listen_address, config.net_conf.listen_port) let multiaddr = SocketAddr::new(listen_address, config.net_conf.listen_port)
.to_multiaddr() .to_multiaddr()
@ -88,6 +89,17 @@ impl ClientConfig {
} }
} }
// Custom bootnodes
// TODO: Handle list of addresses
if let Some(boot_addresses_str) = args.value_of("boot-nodes") {
if let Ok(boot_address) = boot_addresses_str.parse::<Multiaddr>() {
config.net_conf.boot_nodes.append(&mut vec![boot_address]);
} else {
error!(log, "Invalid Bootnode multiaddress"; "Multiaddr" => boot_addresses_str);
return Err("Invalid IP Address");
}
}
/* Filesystem related arguments */ /* Filesystem related arguments */
// Custom datadir // Custom datadir

View File

@ -8,28 +8,32 @@ pub mod notifier;
use beacon_chain::BeaconChain; use beacon_chain::BeaconChain;
pub use client_config::ClientConfig; pub use client_config::ClientConfig;
pub use client_types::ClientTypes; pub use client_types::ClientTypes;
use db::ClientDB;
use exit_future::Signal; use exit_future::Signal;
use fork_choice::ForkChoice;
use futures::{future::Future, Stream};
use network::Service as NetworkService; use network::Service as NetworkService;
use slog::o; use slog::{error, info, o};
use slot_clock::SlotClock;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::runtime::TaskExecutor; use tokio::runtime::TaskExecutor;
use tokio::timer::Interval;
/// Main beacon node client service. This provides the connection and initialisation of the clients /// Main beacon node client service. This provides the connection and initialisation of the clients
/// sub-services in multiple threads. /// sub-services in multiple threads.
pub struct Client<T: ClientTypes> { pub struct Client<T: ClientTypes> {
/// Configuration for the lighthouse client. /// Configuration for the lighthouse client.
config: ClientConfig, _config: ClientConfig,
/// The beacon chain for the running client. /// The beacon chain for the running client.
beacon_chain: Arc<BeaconChain<T::DB, T::SlotClock, T::ForkChoice>>, _beacon_chain: Arc<BeaconChain<T::DB, T::SlotClock, T::ForkChoice>>,
/// Reference to the network service. /// Reference to the network service.
pub network: Arc<NetworkService>, pub network: Arc<NetworkService>,
/// Future to stop and begin shutdown of the Client. /// Signal to terminate the RPC server.
//TODO: Decide best way to handle shutdown pub rpc_exit_signal: Option<Signal>,
pub exit: exit_future::Exit, /// Signal to terminate the slot timer.
/// The sending future to call to terminate the Client. pub slot_timer_exit_signal: Option<Signal>,
//TODO: Decide best way to handle shutdown
pub exit_signal: Signal,
/// The clients logger. /// The clients logger.
log: slog::Logger, log: slog::Logger,
/// Marker to pin the beacon chain generics. /// Marker to pin the beacon chain generics.
@ -43,16 +47,43 @@ impl<TClientType: ClientTypes> Client<TClientType> {
log: slog::Logger, log: slog::Logger,
executor: &TaskExecutor, executor: &TaskExecutor,
) -> error::Result<Self> { ) -> error::Result<Self> {
let (exit_signal, exit) = exit_future::signal();
// generate a beacon chain // generate a beacon chain
let beacon_chain = TClientType::initialise_beacon_chain(&config); let beacon_chain = TClientType::initialise_beacon_chain(&config);
if beacon_chain.read_slot_clock().is_none() {
panic!("Cannot start client before genesis!")
}
// Block starting the client until we have caught the state up to the current slot.
//
// If we don't block here we create an initial scenario where we're unable to process any
// blocks and we're basically useless.
{
let state_slot = beacon_chain.state.read().slot;
let wall_clock_slot = beacon_chain.read_slot_clock().unwrap();
let slots_since_genesis = beacon_chain.slots_since_genesis().unwrap();
info!(
log,
"Initializing state";
"state_slot" => state_slot,
"wall_clock_slot" => wall_clock_slot,
"slots_since_genesis" => slots_since_genesis,
"catchup_distance" => wall_clock_slot - state_slot,
);
}
do_state_catchup(&beacon_chain, &log);
info!(
log,
"State initialized";
"state_slot" => beacon_chain.state.read().slot,
"wall_clock_slot" => beacon_chain.read_slot_clock().unwrap(),
);
// Start the network service, libp2p and syncing threads // Start the network service, libp2p and syncing threads
// TODO: Add beacon_chain reference to network parameters // TODO: Add beacon_chain reference to network parameters
let network_config = &config.net_conf; let network_config = &config.net_conf;
let network_logger = log.new(o!("Service" => "Network")); let network_logger = log.new(o!("Service" => "Network"));
let (network, _network_send) = NetworkService::new( let (network, network_send) = NetworkService::new(
beacon_chain.clone(), beacon_chain.clone(),
network_config, network_config,
executor, executor,
@ -60,18 +91,85 @@ impl<TClientType: ClientTypes> Client<TClientType> {
)?; )?;
// spawn the RPC server // spawn the RPC server
if config.rpc_conf.enabled { let rpc_exit_signal = if config.rpc_conf.enabled {
rpc::start_server(&config.rpc_conf, &log); Some(rpc::start_server(
&config.rpc_conf,
executor,
network_send,
beacon_chain.clone(),
&log,
))
} else {
None
};
let (slot_timer_exit_signal, exit) = exit_future::signal();
if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() {
// set up the validator work interval - start at next slot and proceed every slot
let interval = {
// Set the interval to start at the next slot, and every slot after
let slot_duration = Duration::from_secs(config.spec.seconds_per_slot);
//TODO: Handle checked add correctly
Interval::new(Instant::now() + duration_to_next_slot, slot_duration)
};
let chain = beacon_chain.clone();
let log = log.new(o!("Service" => "SlotTimer"));
executor.spawn(
exit.until(
interval
.for_each(move |_| {
do_state_catchup(&chain, &log);
Ok(())
})
.map_err(|_| ()),
)
.map(|_| ()),
);
} }
Ok(Client { Ok(Client {
config, _config: config,
beacon_chain, _beacon_chain: beacon_chain,
exit, rpc_exit_signal,
exit_signal, slot_timer_exit_signal: Some(slot_timer_exit_signal),
log, log,
network, network,
phantom: PhantomData, phantom: PhantomData,
}) })
} }
} }
fn do_state_catchup<T, U, F>(chain: &Arc<BeaconChain<T, U, F>>, log: &slog::Logger)
where
T: ClientDB,
U: SlotClock,
F: ForkChoice,
{
if let Some(genesis_height) = chain.slots_since_genesis() {
let result = chain.catchup_state();
let common = o!(
"best_slot" => chain.head().beacon_block.slot,
"latest_block_root" => format!("{}", chain.head().beacon_block_root),
"wall_clock_slot" => chain.read_slot_clock().unwrap(),
"state_slot" => chain.state.read().slot,
"slots_since_genesis" => genesis_height,
);
match result {
Ok(_) => info!(
log,
"NewSlot";
common
),
Err(e) => error!(
log,
"StateCatchupFailed";
"error" => format!("{:?}", e),
common
),
};
}
}

View File

@ -2,7 +2,7 @@ use crate::Client;
use crate::ClientTypes; use crate::ClientTypes;
use exit_future::Exit; use exit_future::Exit;
use futures::{Future, Stream}; use futures::{Future, Stream};
use slog::{debug, info, o}; use slog::{debug, o};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use tokio::runtime::TaskExecutor; use tokio::runtime::TaskExecutor;
@ -14,7 +14,7 @@ pub fn run<T: ClientTypes>(client: &Client<T>, executor: TaskExecutor, exit: Exi
// notification heartbeat // notification heartbeat
let interval = Interval::new(Instant::now(), Duration::from_secs(5)); let interval = Interval::new(Instant::now(), Duration::from_secs(5));
let log = client.log.new(o!("Service" => "Notifier")); let _log = client.log.new(o!("Service" => "Notifier"));
// TODO: Debugging only // TODO: Debugging only
let counter = Arc::new(Mutex::new(0)); let counter = Arc::new(Mutex::new(0));
@ -22,13 +22,13 @@ pub fn run<T: ClientTypes>(client: &Client<T>, executor: TaskExecutor, exit: Exi
// build heartbeat logic here // build heartbeat logic here
let heartbeat = move |_| { let heartbeat = move |_| {
info!(log, "Temp heartbeat output"); //debug!(log, "Temp heartbeat output");
//TODO: Remove this logic. Testing only //TODO: Remove this logic. Testing only
let mut count = counter.lock().unwrap(); let mut count = counter.lock().unwrap();
*count += 1; *count += 1;
if *count % 5 == 0 { if *count % 5 == 0 {
debug!(log, "Sending Message"); // debug!(log, "Sending Message");
network.send_message(); network.send_message();
} }

View File

@ -5,6 +5,7 @@ authors = ["Age Manning <Age@AgeManning.com>"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
beacon_chain = { path = "../beacon_chain" }
# SigP repository until PR is merged # SigP repository until PR is merged
libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b3c32d9a821ae6cc89079499cc6e8a6bab0bffc3" } libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b3c32d9a821ae6cc89079499cc6e8a6bab0bffc3" }
types = { path = "../../eth2/types" } types = { path = "../../eth2/types" }

View File

@ -12,8 +12,10 @@ use libp2p::{
tokio_io::{AsyncRead, AsyncWrite}, tokio_io::{AsyncRead, AsyncWrite},
NetworkBehaviour, PeerId, NetworkBehaviour, PeerId,
}; };
use slog::{debug, o}; use slog::{debug, o, trace, warn};
use types::Topic; use ssz::{ssz_encode, Decodable, DecodeError, Encodable, SszStream};
use types::{Attestation, BeaconBlock};
use types::{Topic, TopicHash};
/// Builds the network behaviour for the libp2p Swarm. /// Builds the network behaviour for the libp2p Swarm.
/// Implements gossipsub message routing. /// Implements gossipsub message routing.
@ -44,13 +46,30 @@ impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<GossipsubE
{ {
fn inject_event(&mut self, event: GossipsubEvent) { fn inject_event(&mut self, event: GossipsubEvent) {
match event { match event {
GossipsubEvent::Message(message) => { GossipsubEvent::Message(gs_msg) => {
let gs_message = String::from_utf8_lossy(&message.data); trace!(self.log, "Received GossipEvent"; "msg" => format!("{:?}", gs_msg));
// TODO: Remove this type - debug only
self.events let pubsub_message = match PubsubMessage::ssz_decode(&gs_msg.data, 0) {
.push(BehaviourEvent::Message(gs_message.to_string())) //TODO: Punish peer on error
Err(e) => {
warn!(
self.log,
"Received undecodable message from Peer {:?} error", gs_msg.source;
"error" => format!("{:?}", e)
);
return;
}
Ok((msg, _index)) => msg,
};
self.events.push(BehaviourEvent::GossipMessage {
source: gs_msg.source,
topics: gs_msg.topics,
message: Box::new(pubsub_message),
});
} }
_ => {} GossipsubEvent::Subscribed { .. } => {}
GossipsubEvent::Unsubscribed { .. } => {}
} }
} }
} }
@ -85,7 +104,8 @@ impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<IdentifyEv
); );
info.listen_addrs.truncate(20); info.listen_addrs.truncate(20);
} }
self.events.push(BehaviourEvent::Identified(peer_id, info)); self.events
.push(BehaviourEvent::Identified(peer_id, Box::new(info)));
} }
IdentifyEvent::Error { .. } => {} IdentifyEvent::Error { .. } => {}
IdentifyEvent::SendBack { .. } => {} IdentifyEvent::SendBack { .. } => {}
@ -144,13 +164,86 @@ impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) {
self.serenity_rpc.send_rpc(peer_id, rpc_event); self.serenity_rpc.send_rpc(peer_id, rpc_event);
} }
/// Publishes a message on the pubsub (gossipsub) behaviour.
pub fn publish(&mut self, topics: Vec<Topic>, message: PubsubMessage) {
let message_bytes = ssz_encode(&message);
for topic in topics {
self.gossipsub.publish(topic, message_bytes.clone());
}
}
} }
/// The types of events than can be obtained from polling the behaviour. /// The types of events than can be obtained from polling the behaviour.
pub enum BehaviourEvent { pub enum BehaviourEvent {
RPC(PeerId, RPCEvent), RPC(PeerId, RPCEvent),
PeerDialed(PeerId), PeerDialed(PeerId),
Identified(PeerId, IdentifyInfo), Identified(PeerId, Box<IdentifyInfo>),
// TODO: This is a stub at the moment // TODO: This is a stub at the moment
Message(String), GossipMessage {
source: PeerId,
topics: Vec<TopicHash>,
message: Box<PubsubMessage>,
},
}
/// Messages that are passed to and from the pubsub (Gossipsub) behaviour.
#[derive(Debug, Clone, PartialEq)]
pub enum PubsubMessage {
/// Gossipsub message providing notification of a new block.
Block(BeaconBlock),
/// Gossipsub message providing notification of a new attestation.
Attestation(Attestation),
}
//TODO: Correctly encode/decode enums. Prefixing with integer for now.
impl Encodable for PubsubMessage {
fn ssz_append(&self, s: &mut SszStream) {
match self {
PubsubMessage::Block(block_gossip) => {
0u32.ssz_append(s);
block_gossip.ssz_append(s);
}
PubsubMessage::Attestation(attestation_gossip) => {
1u32.ssz_append(s);
attestation_gossip.ssz_append(s);
}
}
}
}
impl Decodable for PubsubMessage {
fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> {
let (id, index) = u32::ssz_decode(bytes, index)?;
match id {
0 => {
let (block, index) = BeaconBlock::ssz_decode(bytes, index)?;
Ok((PubsubMessage::Block(block), index))
}
1 => {
let (attestation, index) = Attestation::ssz_decode(bytes, index)?;
Ok((PubsubMessage::Attestation(attestation), index))
}
_ => Err(DecodeError::Invalid),
}
}
}
#[cfg(test)]
mod test {
use super::*;
use types::*;
#[test]
fn ssz_encoding() {
let original = PubsubMessage::Block(BeaconBlock::empty(&ChainSpec::foundation()));
let encoded = ssz_encode(&original);
println!("{:?}", encoded);
let (decoded, _i) = PubsubMessage::ssz_decode(&encoded, 0).unwrap();
assert_eq!(original, decoded);
}
} }

View File

@ -8,12 +8,13 @@ pub mod error;
pub mod rpc; pub mod rpc;
mod service; mod service;
pub use behaviour::PubsubMessage;
pub use config::Config as NetworkConfig; pub use config::Config as NetworkConfig;
pub use libp2p::{ pub use libp2p::{
gossipsub::{GossipsubConfig, GossipsubConfigBuilder}, gossipsub::{GossipsubConfig, GossipsubConfigBuilder},
PeerId, PeerId,
}; };
pub use rpc::{HelloMessage, RPCEvent}; pub use rpc::RPCEvent;
pub use service::Libp2pEvent; pub use service::Libp2pEvent;
pub use service::Service; pub use service::Service;
pub use types::multiaddr; pub use types::multiaddr;

View File

@ -1,3 +1,4 @@
use ssz::{Decodable, DecodeError, Encodable, SszStream};
/// Available RPC methods types and ids. /// Available RPC methods types and ids.
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use types::{BeaconBlockBody, BeaconBlockHeader, Epoch, Hash256, Slot}; use types::{BeaconBlockBody, BeaconBlockHeader, Epoch, Hash256, Slot};
@ -53,13 +54,27 @@ impl Into<u16> for RPCMethod {
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub enum RPCRequest { pub enum RPCRequest {
Hello(HelloMessage), Hello(HelloMessage),
Goodbye(u64), Goodbye(GoodbyeReason),
BeaconBlockRoots(BeaconBlockRootsRequest), BeaconBlockRoots(BeaconBlockRootsRequest),
BeaconBlockHeaders(BeaconBlockHeadersRequest), BeaconBlockHeaders(BeaconBlockHeadersRequest),
BeaconBlockBodies(BeaconBlockBodiesRequest), BeaconBlockBodies(BeaconBlockBodiesRequest),
BeaconChainState(BeaconChainStateRequest), BeaconChainState(BeaconChainStateRequest),
} }
impl RPCRequest {
pub fn method_id(&self) -> u16 {
let method = match self {
RPCRequest::Hello(_) => RPCMethod::Hello,
RPCRequest::Goodbye(_) => RPCMethod::Goodbye,
RPCRequest::BeaconBlockRoots(_) => RPCMethod::BeaconBlockRoots,
RPCRequest::BeaconBlockHeaders(_) => RPCMethod::BeaconBlockHeaders,
RPCRequest::BeaconBlockBodies(_) => RPCMethod::BeaconBlockBodies,
RPCRequest::BeaconChainState(_) => RPCMethod::BeaconChainState,
};
method.into()
}
}
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub enum RPCResponse { pub enum RPCResponse {
Hello(HelloMessage), Hello(HelloMessage),
@ -69,6 +84,19 @@ pub enum RPCResponse {
BeaconChainState(BeaconChainStateResponse), BeaconChainState(BeaconChainStateResponse),
} }
impl RPCResponse {
pub fn method_id(&self) -> u16 {
let method = match self {
RPCResponse::Hello(_) => RPCMethod::Hello,
RPCResponse::BeaconBlockRoots(_) => RPCMethod::BeaconBlockRoots,
RPCResponse::BeaconBlockHeaders(_) => RPCMethod::BeaconBlockHeaders,
RPCResponse::BeaconBlockBodies(_) => RPCMethod::BeaconBlockBodies,
RPCResponse::BeaconChainState(_) => RPCMethod::BeaconChainState,
};
method.into()
}
}
/* Request/Response data structures for RPC methods */ /* Request/Response data structures for RPC methods */
/// The HELLO request/response handshake message. /// The HELLO request/response handshake message.
@ -86,76 +114,138 @@ pub struct HelloMessage {
pub best_slot: Slot, pub best_slot: Slot,
} }
/// The reason given for a `Goodbye` message.
///
/// Note: any unknown `u64::into(n)` will resolve to `GoodbyeReason::Unknown` for any unknown `n`,
/// however `GoodbyeReason::Unknown.into()` will go into `0_u64`. Therefore de-serializing then
/// re-serializing may not return the same bytes.
#[derive(Debug, Clone)]
pub enum GoodbyeReason {
ClientShutdown,
IrreleventNetwork,
Fault,
Unknown,
}
impl From<u64> for GoodbyeReason {
fn from(id: u64) -> GoodbyeReason {
match id {
1 => GoodbyeReason::ClientShutdown,
2 => GoodbyeReason::IrreleventNetwork,
3 => GoodbyeReason::Fault,
_ => GoodbyeReason::Unknown,
}
}
}
impl Into<u64> for GoodbyeReason {
fn into(self) -> u64 {
match self {
GoodbyeReason::Unknown => 0,
GoodbyeReason::ClientShutdown => 1,
GoodbyeReason::IrreleventNetwork => 2,
GoodbyeReason::Fault => 3,
}
}
}
impl Encodable for GoodbyeReason {
fn ssz_append(&self, s: &mut SszStream) {
let id: u64 = (*self).clone().into();
id.ssz_append(s);
}
}
impl Decodable for GoodbyeReason {
fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> {
let (id, index) = u64::ssz_decode(bytes, index)?;
Ok((Self::from(id), index))
}
}
/// Request a number of beacon block roots from a peer. /// Request a number of beacon block roots from a peer.
#[derive(Encode, Decode, Clone, Debug)] #[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BeaconBlockRootsRequest { pub struct BeaconBlockRootsRequest {
/// The starting slot of the requested blocks. /// The starting slot of the requested blocks.
start_slot: Slot, pub start_slot: Slot,
/// The number of blocks from the start slot. /// The number of blocks from the start slot.
count: u64, // this must be less than 32768. //TODO: Enforce this in the lower layers pub count: u64, // this must be less than 32768. //TODO: Enforce this in the lower layers
} }
/// Response containing a number of beacon block roots from a peer. /// Response containing a number of beacon block roots from a peer.
#[derive(Encode, Decode, Clone, Debug)] #[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BeaconBlockRootsResponse { pub struct BeaconBlockRootsResponse {
/// List of requested blocks and associated slots. /// List of requested blocks and associated slots.
roots: Vec<BlockRootSlot>, pub roots: Vec<BlockRootSlot>,
}
impl BeaconBlockRootsResponse {
/// Returns `true` if each `self.roots.slot[i]` is higher than the preceeding `i`.
pub fn slots_are_ascending(&self) -> bool {
for i in 1..self.roots.len() {
if self.roots[i - 1].slot >= self.roots[i].slot {
return false;
}
}
true
}
} }
/// Contains a block root and associated slot. /// Contains a block root and associated slot.
#[derive(Encode, Decode, Clone, Debug)] #[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BlockRootSlot { pub struct BlockRootSlot {
/// The block root. /// The block root.
block_root: Hash256, pub block_root: Hash256,
/// The block slot. /// The block slot.
slot: Slot, pub slot: Slot,
} }
/// Request a number of beacon block headers from a peer. /// Request a number of beacon block headers from a peer.
#[derive(Encode, Decode, Clone, Debug)] #[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BeaconBlockHeadersRequest { pub struct BeaconBlockHeadersRequest {
/// The starting header hash of the requested headers. /// The starting header hash of the requested headers.
start_root: Hash256, pub start_root: Hash256,
/// The starting slot of the requested headers. /// The starting slot of the requested headers.
start_slot: Slot, pub start_slot: Slot,
/// The maximum number of headers than can be returned. /// The maximum number of headers than can be returned.
max_headers: u64, pub max_headers: u64,
/// The maximum number of slots to skip between blocks. /// The maximum number of slots to skip between blocks.
skip_slots: u64, pub skip_slots: u64,
} }
/// Response containing requested block headers. /// Response containing requested block headers.
#[derive(Encode, Decode, Clone, Debug)] #[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BeaconBlockHeadersResponse { pub struct BeaconBlockHeadersResponse {
/// The list of requested beacon block headers. /// The list of requested beacon block headers.
headers: Vec<BeaconBlockHeader>, pub headers: Vec<BeaconBlockHeader>,
} }
/// Request a number of beacon block bodies from a peer. /// Request a number of beacon block bodies from a peer.
#[derive(Encode, Decode, Clone, Debug)] #[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BeaconBlockBodiesRequest { pub struct BeaconBlockBodiesRequest {
/// The list of beacon block bodies being requested. /// The list of beacon block bodies being requested.
block_roots: Hash256, pub block_roots: Vec<Hash256>,
} }
/// Response containing the list of requested beacon block bodies. /// Response containing the list of requested beacon block bodies.
#[derive(Encode, Decode, Clone, Debug)] #[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BeaconBlockBodiesResponse { pub struct BeaconBlockBodiesResponse {
/// The list of beacon block bodies being requested. /// The list of beacon block bodies being requested.
block_bodies: Vec<BeaconBlockBody>, pub block_bodies: Vec<BeaconBlockBody>,
} }
/// Request values for tree hashes which yield a blocks `state_root`. /// Request values for tree hashes which yield a blocks `state_root`.
#[derive(Encode, Decode, Clone, Debug)] #[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BeaconChainStateRequest { pub struct BeaconChainStateRequest {
/// The tree hashes that a value is requested for. /// The tree hashes that a value is requested for.
hashes: Vec<Hash256>, pub hashes: Vec<Hash256>,
} }
/// Request values for tree hashes which yield a blocks `state_root`. /// Request values for tree hashes which yield a blocks `state_root`.
// Note: TBD // Note: TBD
#[derive(Encode, Decode, Clone, Debug)] #[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BeaconChainStateResponse { pub struct BeaconChainStateResponse {
/// The values corresponding the to the requested tree hashes. /// The values corresponding the to the requested tree hashes.
values: bool, //TBD - stubbed with encodeable bool pub values: bool, //TBD - stubbed with encodeable bool
} }

View File

@ -2,7 +2,7 @@
/// ///
/// This is purpose built for Ethereum 2.0 serenity and the protocol listens on /// This is purpose built for Ethereum 2.0 serenity and the protocol listens on
/// `/eth/serenity/rpc/1.0.0` /// `/eth/serenity/rpc/1.0.0`
mod methods; pub mod methods;
mod protocol; mod protocol;
use futures::prelude::*; use futures::prelude::*;
@ -12,7 +12,7 @@ use libp2p::core::swarm::{
}; };
use libp2p::{Multiaddr, PeerId}; use libp2p::{Multiaddr, PeerId};
pub use methods::{HelloMessage, RPCMethod, RPCRequest, RPCResponse}; pub use methods::{HelloMessage, RPCMethod, RPCRequest, RPCResponse};
pub use protocol::{RPCEvent, RPCProtocol}; pub use protocol::{RPCEvent, RPCProtocol, RequestId};
use slog::o; use slog::o;
use std::marker::PhantomData; use std::marker::PhantomData;
use tokio::io::{AsyncRead, AsyncWrite}; use tokio::io::{AsyncRead, AsyncWrite};
@ -26,7 +26,7 @@ pub struct Rpc<TSubstream> {
/// Pins the generic substream. /// Pins the generic substream.
marker: PhantomData<TSubstream>, marker: PhantomData<TSubstream>,
/// Slog logger for RPC behaviour. /// Slog logger for RPC behaviour.
log: slog::Logger, _log: slog::Logger,
} }
impl<TSubstream> Rpc<TSubstream> { impl<TSubstream> Rpc<TSubstream> {
@ -35,7 +35,7 @@ impl<TSubstream> Rpc<TSubstream> {
Rpc { Rpc {
events: Vec::new(), events: Vec::new(),
marker: PhantomData, marker: PhantomData,
log, _log: log,
} }
} }
@ -65,7 +65,7 @@ where
fn inject_connected(&mut self, peer_id: PeerId, connected_point: ConnectedPoint) { fn inject_connected(&mut self, peer_id: PeerId, connected_point: ConnectedPoint) {
// if initialised the connection, report this upwards to send the HELLO request // if initialised the connection, report this upwards to send the HELLO request
if let ConnectedPoint::Dialer { address: _ } = connected_point { if let ConnectedPoint::Dialer { .. } = connected_point {
self.events.push(NetworkBehaviourAction::GenerateEvent( self.events.push(NetworkBehaviourAction::GenerateEvent(
RPCMessage::PeerDialed(peer_id), RPCMessage::PeerDialed(peer_id),
)); ));

View File

@ -1,6 +1,7 @@
use super::methods::*; use super::methods::*;
use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo};
use ssz::{ssz_encode, Decodable, Encodable, SszStream}; use ssz::{ssz_encode, Decodable, DecodeError as SSZDecodeError, Encodable, SszStream};
use std::hash::{Hash, Hasher};
use std::io; use std::io;
use std::iter; use std::iter;
use tokio::io::{AsyncRead, AsyncWrite}; use tokio::io::{AsyncRead, AsyncWrite};
@ -29,16 +30,71 @@ impl Default for RPCProtocol {
} }
} }
/// A monotonic counter for ordering `RPCRequest`s.
#[derive(Debug, Clone, Default)]
pub struct RequestId(u64);
impl RequestId {
/// Increment the request id.
pub fn increment(&mut self) {
self.0 += 1
}
/// Return the previous id.
pub fn previous(&self) -> Self {
Self(self.0 - 1)
}
}
impl Eq for RequestId {}
impl PartialEq for RequestId {
fn eq(&self, other: &RequestId) -> bool {
self.0 == other.0
}
}
impl Hash for RequestId {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0.hash(state);
}
}
impl From<u64> for RequestId {
fn from(x: u64) -> RequestId {
RequestId(x)
}
}
impl Into<u64> for RequestId {
fn into(self) -> u64 {
self.0
}
}
impl Encodable for RequestId {
fn ssz_append(&self, s: &mut SszStream) {
self.0.ssz_append(s);
}
}
impl Decodable for RequestId {
fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), SSZDecodeError> {
let (id, index) = u64::ssz_decode(bytes, index)?;
Ok((Self::from(id), index))
}
}
/// The RPC types which are sent/received in this protocol. /// The RPC types which are sent/received in this protocol.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub enum RPCEvent { pub enum RPCEvent {
Request { Request {
id: u64, id: RequestId,
method_id: u16, method_id: u16,
body: RPCRequest, body: RPCRequest,
}, },
Response { Response {
id: u64, id: RequestId,
method_id: u16, //TODO: Remove and process decoding upstream method_id: u16, //TODO: Remove and process decoding upstream
result: RPCResponse, result: RPCResponse,
}, },
@ -54,17 +110,15 @@ impl UpgradeInfo for RPCEvent {
} }
} }
type FnDecodeRPCEvent = fn(Vec<u8>, ()) -> Result<RPCEvent, DecodeError>;
impl<TSocket> InboundUpgrade<TSocket> for RPCProtocol impl<TSocket> InboundUpgrade<TSocket> for RPCProtocol
where where
TSocket: AsyncRead + AsyncWrite, TSocket: AsyncRead + AsyncWrite,
{ {
type Output = RPCEvent; type Output = RPCEvent;
type Error = DecodeError; type Error = DecodeError;
type Future = upgrade::ReadOneThen< type Future = upgrade::ReadOneThen<upgrade::Negotiated<TSocket>, (), FnDecodeRPCEvent>;
upgrade::Negotiated<TSocket>,
(),
fn(Vec<u8>, ()) -> Result<RPCEvent, DecodeError>,
>;
fn upgrade_inbound(self, socket: upgrade::Negotiated<TSocket>, _: Self::Info) -> Self::Future { fn upgrade_inbound(self, socket: upgrade::Negotiated<TSocket>, _: Self::Info) -> Self::Future {
upgrade::read_one_then(socket, MAX_READ_SIZE, (), |packet, ()| Ok(decode(packet)?)) upgrade::read_one_then(socket, MAX_READ_SIZE, (), |packet, ()| Ok(decode(packet)?))
@ -75,7 +129,7 @@ fn decode(packet: Vec<u8>) -> Result<RPCEvent, DecodeError> {
// decode the header of the rpc // decode the header of the rpc
// request/response // request/response
let (request, index) = bool::ssz_decode(&packet, 0)?; let (request, index) = bool::ssz_decode(&packet, 0)?;
let (id, index) = u64::ssz_decode(&packet, index)?; let (id, index) = RequestId::ssz_decode(&packet, index)?;
let (method_id, index) = u16::ssz_decode(&packet, index)?; let (method_id, index) = u16::ssz_decode(&packet, index)?;
if request { if request {
@ -85,8 +139,8 @@ fn decode(packet: Vec<u8>) -> Result<RPCEvent, DecodeError> {
RPCRequest::Hello(hello_body) RPCRequest::Hello(hello_body)
} }
RPCMethod::Goodbye => { RPCMethod::Goodbye => {
let (goodbye_code, _index) = u64::ssz_decode(&packet, index)?; let (goodbye_reason, _index) = GoodbyeReason::ssz_decode(&packet, index)?;
RPCRequest::Goodbye(goodbye_code) RPCRequest::Goodbye(goodbye_reason)
} }
RPCMethod::BeaconBlockRoots => { RPCMethod::BeaconBlockRoots => {
let (block_roots_request, _index) = let (block_roots_request, _index) =

View File

@ -1,4 +1,4 @@
use crate::behaviour::{Behaviour, BehaviourEvent}; use crate::behaviour::{Behaviour, BehaviourEvent, PubsubMessage};
use crate::error; use crate::error;
use crate::multiaddr::Protocol; use crate::multiaddr::Protocol;
use crate::rpc::RPCEvent; use crate::rpc::RPCEvent;
@ -17,15 +17,18 @@ use libp2p::{core, secio, PeerId, Swarm, Transport};
use slog::{debug, info, trace, warn}; use slog::{debug, info, trace, warn};
use std::io::{Error, ErrorKind}; use std::io::{Error, ErrorKind};
use std::time::Duration; use std::time::Duration;
use types::TopicBuilder; use types::{TopicBuilder, TopicHash};
type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>;
type Libp2pBehaviour = Behaviour<Substream<StreamMuxerBox>>;
/// The configuration and state of the libp2p components for the beacon node. /// The configuration and state of the libp2p components for the beacon node.
pub struct Service { pub struct Service {
/// The libp2p Swarm handler. /// The libp2p Swarm handler.
//TODO: Make this private //TODO: Make this private
pub swarm: Swarm<Boxed<(PeerId, StreamMuxerBox), Error>, Behaviour<Substream<StreamMuxerBox>>>, pub swarm: Swarm<Libp2pStream, Libp2pBehaviour>,
/// This node's PeerId. /// This node's PeerId.
local_peer_id: PeerId, _local_peer_id: PeerId,
/// The libp2p logger handle. /// The libp2p logger handle.
pub log: slog::Logger, pub log: slog::Logger,
} }
@ -89,7 +92,7 @@ impl Service {
info!(log, "Subscribed to topics: {:?}", subscribed_topics); info!(log, "Subscribed to topics: {:?}", subscribed_topics);
Ok(Service { Ok(Service {
local_peer_id, _local_peer_id: local_peer_id,
swarm, swarm,
log, log,
}) })
@ -108,9 +111,17 @@ impl Stream for Service {
//Behaviour events //Behaviour events
Ok(Async::Ready(Some(event))) => match event { Ok(Async::Ready(Some(event))) => match event {
// TODO: Stub here for debugging // TODO: Stub here for debugging
BehaviourEvent::Message(m) => { BehaviourEvent::GossipMessage {
debug!(self.log, "Message received: {}", m); source,
return Ok(Async::Ready(Some(Libp2pEvent::Message(m)))); topics,
message,
} => {
trace!(self.log, "Pubsub message received: {:?}", message);
return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage {
source,
topics,
message,
})));
} }
BehaviourEvent::RPC(peer_id, event) => { BehaviourEvent::RPC(peer_id, event) => {
return Ok(Async::Ready(Some(Libp2pEvent::RPC(peer_id, event)))); return Ok(Async::Ready(Some(Libp2pEvent::RPC(peer_id, event))));
@ -171,7 +182,11 @@ pub enum Libp2pEvent {
/// Initiated the connection to a new peer. /// Initiated the connection to a new peer.
PeerDialed(PeerId), PeerDialed(PeerId),
/// Received information about a peer on the network. /// Received information about a peer on the network.
Identified(PeerId, IdentifyInfo), Identified(PeerId, Box<IdentifyInfo>),
// TODO: Pub-sub testing only. /// Received pubsub message.
Message(String), PubsubMessage {
source: PeerId,
topics: Vec<TopicHash>,
message: Box<PubsubMessage>,
},
} }

View File

@ -4,12 +4,17 @@ version = "0.1.0"
authors = ["Age Manning <Age@AgeManning.com>"] authors = ["Age Manning <Age@AgeManning.com>"]
edition = "2018" edition = "2018"
[dev-dependencies]
test_harness = { path = "../beacon_chain/test_harness" }
sloggers = "0.3.2"
[dependencies] [dependencies]
beacon_chain = { path = "../beacon_chain" } beacon_chain = { path = "../beacon_chain" }
eth2-libp2p = { path = "../eth2-libp2p" } eth2-libp2p = { path = "../eth2-libp2p" }
version = { path = "../version" } version = { path = "../version" }
types = { path = "../../eth2/types" } types = { path = "../../eth2/types" }
slog = "2.4.1" slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_debug"] }
ssz = { path = "../../eth2/utils/ssz" }
futures = "0.1.25" futures = "0.1.25"
error-chain = "0.12.0" error-chain = "0.12.0"
crossbeam-channel = "0.3.8" crossbeam-channel = "0.3.8"

View File

@ -5,8 +5,12 @@ use beacon_chain::{
parking_lot::RwLockReadGuard, parking_lot::RwLockReadGuard,
slot_clock::SlotClock, slot_clock::SlotClock,
types::{BeaconState, ChainSpec}, types::{BeaconState, ChainSpec},
CheckPoint, AttestationValidationError, CheckPoint,
}; };
use eth2_libp2p::rpc::HelloMessage;
use types::{Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, Hash256, Slot};
pub use beacon_chain::{BeaconChainError, BlockProcessingOutcome, InvalidBlock};
/// The network's API to the beacon chain. /// The network's API to the beacon chain.
pub trait BeaconChain: Send + Sync { pub trait BeaconChain: Send + Sync {
@ -14,9 +18,48 @@ pub trait BeaconChain: Send + Sync {
fn get_state(&self) -> RwLockReadGuard<BeaconState>; fn get_state(&self) -> RwLockReadGuard<BeaconState>;
fn slot(&self) -> Slot;
fn head(&self) -> RwLockReadGuard<CheckPoint>; fn head(&self) -> RwLockReadGuard<CheckPoint>;
fn get_block(&self, block_root: &Hash256) -> Result<Option<BeaconBlock>, BeaconChainError>;
fn best_slot(&self) -> Slot;
fn best_block_root(&self) -> Hash256;
fn finalized_head(&self) -> RwLockReadGuard<CheckPoint>; fn finalized_head(&self) -> RwLockReadGuard<CheckPoint>;
fn finalized_epoch(&self) -> Epoch;
fn hello_message(&self) -> HelloMessage;
fn process_block(&self, block: BeaconBlock)
-> Result<BlockProcessingOutcome, BeaconChainError>;
fn process_attestation(
&self,
attestation: Attestation,
) -> Result<(), AttestationValidationError>;
fn get_block_roots(
&self,
start_slot: Slot,
count: usize,
skip: usize,
) -> Result<Vec<Hash256>, BeaconChainError>;
fn get_block_headers(
&self,
start_slot: Slot,
count: usize,
skip: usize,
) -> Result<Vec<BeaconBlockHeader>, BeaconChainError>;
fn get_block_bodies(&self, roots: &[Hash256])
-> Result<Vec<BeaconBlockBody>, BeaconChainError>;
fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, BeaconChainError>;
} }
impl<T, U, F> BeaconChain for RawBeaconChain<T, U, F> impl<T, U, F> BeaconChain for RawBeaconChain<T, U, F>
@ -33,11 +76,88 @@ where
self.state.read() self.state.read()
} }
fn slot(&self) -> Slot {
self.get_state().slot
}
fn head(&self) -> RwLockReadGuard<CheckPoint> { fn head(&self) -> RwLockReadGuard<CheckPoint> {
self.head() self.head()
} }
fn get_block(&self, block_root: &Hash256) -> Result<Option<BeaconBlock>, BeaconChainError> {
self.get_block(block_root)
}
fn finalized_epoch(&self) -> Epoch {
self.get_state().finalized_epoch
}
fn finalized_head(&self) -> RwLockReadGuard<CheckPoint> { fn finalized_head(&self) -> RwLockReadGuard<CheckPoint> {
self.finalized_head() self.finalized_head()
} }
fn best_slot(&self) -> Slot {
self.head().beacon_block.slot
}
fn best_block_root(&self) -> Hash256 {
self.head().beacon_block_root
}
fn hello_message(&self) -> HelloMessage {
let spec = self.get_spec();
let state = self.get_state();
HelloMessage {
network_id: spec.chain_id,
latest_finalized_root: state.finalized_root,
latest_finalized_epoch: state.finalized_epoch,
best_root: self.best_block_root(),
best_slot: self.best_slot(),
}
}
fn process_block(
&self,
block: BeaconBlock,
) -> Result<BlockProcessingOutcome, BeaconChainError> {
self.process_block(block)
}
fn process_attestation(
&self,
attestation: Attestation,
) -> Result<(), AttestationValidationError> {
self.process_attestation(attestation)
}
fn get_block_roots(
&self,
start_slot: Slot,
count: usize,
skip: usize,
) -> Result<Vec<Hash256>, BeaconChainError> {
self.get_block_roots(start_slot, count, skip)
}
fn get_block_headers(
&self,
start_slot: Slot,
count: usize,
skip: usize,
) -> Result<Vec<BeaconBlockHeader>, BeaconChainError> {
let roots = self.get_block_roots(start_slot, count, skip)?;
self.get_block_headers(&roots)
}
fn get_block_bodies(
&self,
roots: &[Hash256],
) -> Result<Vec<BeaconBlockBody>, BeaconChainError> {
self.get_block_bodies(roots)
}
fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, BeaconChainError> {
self.is_new_block_root(beacon_block_root)
}
} }

View File

@ -1,9 +1,10 @@
/// This crate provides the network server for Lighthouse. /// This crate provides the network server for Lighthouse.
pub mod beacon_chain; pub mod beacon_chain;
pub mod error; pub mod error;
mod message_handler; pub mod message_handler;
mod service; pub mod service;
pub mod sync; pub mod sync;
pub use eth2_libp2p::NetworkConfig; pub use eth2_libp2p::NetworkConfig;
pub use service::NetworkMessage;
pub use service::Service; pub use service::Service;

View File

@ -4,33 +4,29 @@ use crate::service::{NetworkMessage, OutgoingMessage};
use crate::sync::SimpleSync; use crate::sync::SimpleSync;
use crossbeam_channel::{unbounded as channel, Sender}; use crossbeam_channel::{unbounded as channel, Sender};
use eth2_libp2p::{ use eth2_libp2p::{
rpc::{RPCMethod, RPCRequest, RPCResponse}, behaviour::PubsubMessage,
HelloMessage, PeerId, RPCEvent, rpc::{methods::GoodbyeReason, RPCRequest, RPCResponse, RequestId},
PeerId, RPCEvent,
}; };
use futures::future; use futures::future;
use slog::warn; use slog::{debug, warn};
use slog::{debug, trace};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::Instant;
/// Timeout for RPC requests. /// Timeout for RPC requests.
const REQUEST_TIMEOUT: Duration = Duration::from_secs(30); // const REQUEST_TIMEOUT: Duration = Duration::from_secs(30);
/// Timeout before banning a peer for non-identification. /// Timeout before banning a peer for non-identification.
const HELLO_TIMEOUT: Duration = Duration::from_secs(30); // const HELLO_TIMEOUT: Duration = Duration::from_secs(30);
/// Handles messages received from the network and client and organises syncing. /// Handles messages received from the network and client and organises syncing.
pub struct MessageHandler { pub struct MessageHandler {
/// Currently loaded and initialised beacon chain. /// Currently loaded and initialised beacon chain.
chain: Arc<BeaconChain>, _chain: Arc<BeaconChain>,
/// The syncing framework. /// The syncing framework.
sync: SimpleSync, sync: SimpleSync,
/// The network channel to relay messages to the Network service. /// The context required to send messages to, and process messages from peers.
network_send: crossbeam_channel::Sender<NetworkMessage>, network_context: NetworkContext,
/// A mapping of peers and the RPC id we have sent an RPC request to.
requests: HashMap<(PeerId, u64), Instant>,
/// A counter of request id for each peer.
request_ids: HashMap<PeerId, u64>,
/// The `MessageHandler` logger. /// The `MessageHandler` logger.
log: slog::Logger, log: slog::Logger,
} }
@ -44,8 +40,8 @@ pub enum HandlerMessage {
PeerDisconnected(PeerId), PeerDisconnected(PeerId),
/// An RPC response/request has been received. /// An RPC response/request has been received.
RPC(PeerId, RPCEvent), RPC(PeerId, RPCEvent),
/// A block has been imported. /// A gossip message has been received.
BlockImported(), //TODO: This comes from pub-sub - decide its contents PubsubMessage(PeerId, Box<PubsubMessage>),
} }
impl MessageHandler { impl MessageHandler {
@ -65,13 +61,9 @@ impl MessageHandler {
let sync = SimpleSync::new(beacon_chain.clone(), &log); let sync = SimpleSync::new(beacon_chain.clone(), &log);
let mut handler = MessageHandler { let mut handler = MessageHandler {
// TODO: The handler may not need a chain, perhaps only sync? _chain: beacon_chain.clone(),
chain: beacon_chain.clone(),
sync, sync,
network_send, network_context: NetworkContext::new(network_send, log.clone()),
requests: HashMap::new(),
request_ids: HashMap::new(),
log: log.clone(), log: log.clone(),
}; };
@ -93,13 +85,16 @@ impl MessageHandler {
match message { match message {
// we have initiated a connection to a peer // we have initiated a connection to a peer
HandlerMessage::PeerDialed(peer_id) => { HandlerMessage::PeerDialed(peer_id) => {
let id = self.generate_request_id(&peer_id); self.sync.on_connect(peer_id, &mut self.network_context);
self.send_hello(peer_id, id, true);
} }
// we have received an RPC message request/response // we have received an RPC message request/response
HandlerMessage::RPC(peer_id, rpc_event) => { HandlerMessage::RPC(peer_id, rpc_event) => {
self.handle_rpc_message(peer_id, rpc_event); self.handle_rpc_message(peer_id, rpc_event);
} }
// we have received an RPC message request/response
HandlerMessage::PubsubMessage(peer_id, gossip) => {
self.handle_gossip(peer_id, *gossip);
}
//TODO: Handle all messages //TODO: Handle all messages
_ => {} _ => {}
} }
@ -117,109 +112,196 @@ impl MessageHandler {
} }
/// A new RPC request has been received from the network. /// A new RPC request has been received from the network.
fn handle_rpc_request(&mut self, peer_id: PeerId, id: u64, request: RPCRequest) { fn handle_rpc_request(&mut self, peer_id: PeerId, request_id: RequestId, request: RPCRequest) {
// TODO: process the `id`.
match request { match request {
RPCRequest::Hello(hello_message) => { RPCRequest::Hello(hello_message) => self.sync.on_hello_request(
self.handle_hello_request(peer_id, id, hello_message) peer_id,
request_id,
hello_message,
&mut self.network_context,
),
RPCRequest::Goodbye(goodbye_reason) => self.sync.on_goodbye(peer_id, goodbye_reason),
RPCRequest::BeaconBlockRoots(request) => self.sync.on_beacon_block_roots_request(
peer_id,
request_id,
request,
&mut self.network_context,
),
RPCRequest::BeaconBlockHeaders(request) => self.sync.on_beacon_block_headers_request(
peer_id,
request_id,
request,
&mut self.network_context,
),
RPCRequest::BeaconBlockBodies(request) => self.sync.on_beacon_block_bodies_request(
peer_id,
request_id,
request,
&mut self.network_context,
),
RPCRequest::BeaconChainState(_) => {
// We do not implement this endpoint, it is not required and will only likely be
// useful for light-client support in later phases.
warn!(self.log, "BeaconChainState RPC call is not supported.");
} }
// TODO: Handle all requests
_ => {}
} }
} }
/// An RPC response has been received from the network. /// An RPC response has been received from the network.
// we match on id and ignore responses past the timeout. // we match on id and ignore responses past the timeout.
fn handle_rpc_response(&mut self, peer_id: PeerId, id: u64, response: RPCResponse) { fn handle_rpc_response(&mut self, peer_id: PeerId, id: RequestId, response: RPCResponse) {
// if response id is related to a request, ignore (likely RPC timeout) // if response id is not related to a request, ignore (likely RPC timeout)
if self.requests.remove(&(peer_id.clone(), id)).is_none() { if self
debug!(self.log, "Unrecognized response from peer: {:?}", peer_id); .network_context
.outstanding_outgoing_request_ids
.remove(&(peer_id.clone(), id.clone()))
.is_none()
{
warn!(
self.log,
"Unknown ResponseId for incoming RPCRequest";
"peer" => format!("{:?}", peer_id),
"request_id" => format!("{:?}", id)
);
return; return;
} }
match response { match response {
RPCResponse::Hello(hello_message) => { RPCResponse::Hello(hello_message) => {
debug!(self.log, "Hello response received from peer: {:?}", peer_id); self.sync
self.validate_hello(peer_id, hello_message); .on_hello_response(peer_id, hello_message, &mut self.network_context);
}
RPCResponse::BeaconBlockRoots(response) => {
self.sync.on_beacon_block_roots_response(
peer_id,
response,
&mut self.network_context,
);
}
RPCResponse::BeaconBlockHeaders(response) => {
self.sync.on_beacon_block_headers_response(
peer_id,
response,
&mut self.network_context,
);
}
RPCResponse::BeaconBlockBodies(response) => {
self.sync.on_beacon_block_bodies_response(
peer_id,
response,
&mut self.network_context,
);
}
RPCResponse::BeaconChainState(_) => {
// We do not implement this endpoint, it is not required and will only likely be
// useful for light-client support in later phases.
//
// Theoretically, we shouldn't reach this code because we should never send a
// beacon state RPC request.
warn!(self.log, "BeaconChainState RPC call is not supported.");
} }
// TODO: Handle all responses
_ => {}
}
}
/// Handle a HELLO RPC request message.
fn handle_hello_request(&mut self, peer_id: PeerId, id: u64, hello_message: HelloMessage) {
// send back a HELLO message
self.send_hello(peer_id.clone(), id, false);
// validate the peer
self.validate_hello(peer_id, hello_message);
}
/// Validate a HELLO RPC message.
fn validate_hello(&mut self, peer_id: PeerId, message: HelloMessage) {
// validate the peer
if !self.sync.validate_peer(peer_id.clone(), message) {
debug!(
self.log,
"Peer dropped due to mismatching HELLO messages: {:?}", peer_id
);
//TODO: block/ban the peer
}
}
/* General RPC helper functions */
/// Generates a new request id for a peer.
fn generate_request_id(&mut self, peer_id: &PeerId) -> u64 {
// generate a unique id for the peer
let id = {
let borrowed_id = self.request_ids.entry(peer_id.clone()).or_insert_with(|| 0);
let id = borrowed_id.clone();
//increment the counter
*borrowed_id += 1;
id
}; };
// register RPC request
self.requests.insert((peer_id.clone(), id), Instant::now());
debug!(
self.log,
"Hello request registered with peer: {:?}", peer_id
);
id
} }
/// Sends a HELLO RPC request or response to a newly connected peer. /// Handle RPC messages
//TODO: The boolean determines if sending request/respond, will be cleaner in the RPC re-write fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) {
fn send_hello(&mut self, peer_id: PeerId, id: u64, is_request: bool) { match gossip_message {
let rpc_event = if is_request { PubsubMessage::Block(message) => {
let _should_foward_on =
self.sync
.on_block_gossip(peer_id, message, &mut self.network_context);
}
PubsubMessage::Attestation(message) => {
self.sync
.on_attestation_gossip(peer_id, message, &mut self.network_context)
}
}
}
}
pub struct NetworkContext {
/// The network channel to relay messages to the Network service.
network_send: crossbeam_channel::Sender<NetworkMessage>,
/// A mapping of peers and the RPC id we have sent an RPC request to.
outstanding_outgoing_request_ids: HashMap<(PeerId, RequestId), Instant>,
/// Stores the next `RequestId` we should include on an outgoing `RPCRequest` to a `PeerId`.
outgoing_request_ids: HashMap<PeerId, RequestId>,
/// The `MessageHandler` logger.
log: slog::Logger,
}
impl NetworkContext {
pub fn new(network_send: crossbeam_channel::Sender<NetworkMessage>, log: slog::Logger) -> Self {
Self {
network_send,
outstanding_outgoing_request_ids: HashMap::new(),
outgoing_request_ids: HashMap::new(),
log,
}
}
pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) {
self.send_rpc_request(peer_id, RPCRequest::Goodbye(reason))
// TODO: disconnect peers.
}
pub fn send_rpc_request(&mut self, peer_id: PeerId, rpc_request: RPCRequest) {
let id = self.generate_request_id(&peer_id);
self.outstanding_outgoing_request_ids
.insert((peer_id.clone(), id.clone()), Instant::now());
self.send_rpc_event(
peer_id,
RPCEvent::Request { RPCEvent::Request {
id, id,
method_id: RPCMethod::Hello.into(), method_id: rpc_request.method_id(),
body: RPCRequest::Hello(self.sync.generate_hello()), body: rpc_request,
} },
} else { );
RPCEvent::Response {
id,
method_id: RPCMethod::Hello.into(),
result: RPCResponse::Hello(self.sync.generate_hello()),
}
};
// send the hello request to the network
trace!(self.log, "Sending HELLO message to peer {:?}", peer_id);
self.send_rpc(peer_id, rpc_event);
} }
/// Sends an RPC request/response to the network server. pub fn send_rpc_response(
fn send_rpc(&self, peer_id: PeerId, rpc_event: RPCEvent) { &mut self,
peer_id: PeerId,
request_id: RequestId,
rpc_response: RPCResponse,
) {
self.send_rpc_event(
peer_id,
RPCEvent::Response {
id: request_id,
method_id: rpc_response.method_id(),
result: rpc_response,
},
);
}
fn send_rpc_event(&self, peer_id: PeerId, rpc_event: RPCEvent) {
self.send(peer_id, OutgoingMessage::RPC(rpc_event))
}
fn send(&self, peer_id: PeerId, outgoing_message: OutgoingMessage) {
self.network_send self.network_send
.send(NetworkMessage::Send( .send(NetworkMessage::Send(peer_id, outgoing_message))
peer_id,
OutgoingMessage::RPC(rpc_event),
))
.unwrap_or_else(|_| { .unwrap_or_else(|_| {
warn!( warn!(
self.log, self.log,
"Could not send RPC message to the network service" "Could not send RPC message to the network service"
) )
}); });
//
}
/// Returns the next `RequestId` for sending an `RPCRequest` to the `peer_id`.
fn generate_request_id(&mut self, peer_id: &PeerId) -> RequestId {
let next_id = self
.outgoing_request_ids
.entry(peer_id.clone())
.and_modify(|id| id.increment())
.or_insert_with(|| RequestId::from(1));
next_id.previous()
} }
} }

View File

@ -3,20 +3,21 @@ use crate::error;
use crate::message_handler::{HandlerMessage, MessageHandler}; use crate::message_handler::{HandlerMessage, MessageHandler};
use crate::NetworkConfig; use crate::NetworkConfig;
use crossbeam_channel::{unbounded as channel, Sender, TryRecvError}; use crossbeam_channel::{unbounded as channel, Sender, TryRecvError};
use eth2_libp2p::RPCEvent;
use eth2_libp2p::Service as LibP2PService; use eth2_libp2p::Service as LibP2PService;
use eth2_libp2p::{Libp2pEvent, PeerId}; use eth2_libp2p::{Libp2pEvent, PeerId};
use eth2_libp2p::{PubsubMessage, RPCEvent};
use futures::prelude::*; use futures::prelude::*;
use futures::sync::oneshot; use futures::sync::oneshot;
use futures::Stream; use futures::Stream;
use slog::{debug, info, o, trace}; use slog::{debug, info, o, trace};
use std::sync::Arc; use std::sync::Arc;
use tokio::runtime::TaskExecutor; use tokio::runtime::TaskExecutor;
use types::Topic;
/// Service that handles communication between internal services and the eth2_libp2p network service. /// Service that handles communication between internal services and the eth2_libp2p network service.
pub struct Service { pub struct Service {
//libp2p_service: Arc<Mutex<LibP2PService>>, //libp2p_service: Arc<Mutex<LibP2PService>>,
libp2p_exit: oneshot::Sender<()>, _libp2p_exit: oneshot::Sender<()>,
network_send: crossbeam_channel::Sender<NetworkMessage>, network_send: crossbeam_channel::Sender<NetworkMessage>,
//message_handler: MessageHandler, //message_handler: MessageHandler,
//message_handler_send: Sender<HandlerMessage>, //message_handler_send: Sender<HandlerMessage>,
@ -53,7 +54,7 @@ impl Service {
log, log,
)?; )?;
let network_service = Service { let network_service = Service {
libp2p_exit, _libp2p_exit: libp2p_exit,
network_send: network_send.clone(), network_send: network_send.clone(),
}; };
@ -99,6 +100,7 @@ fn spawn_service(
Ok(network_exit) Ok(network_exit)
} }
//TODO: Potentially handle channel errors
fn network_service( fn network_service(
mut libp2p_service: LibP2PService, mut libp2p_service: LibP2PService,
network_recv: crossbeam_channel::Receiver<NetworkMessage>, network_recv: crossbeam_channel::Receiver<NetworkMessage>,
@ -128,10 +130,15 @@ fn network_service(
"We have identified peer: {:?} with {:?}", peer_id, info "We have identified peer: {:?} with {:?}", peer_id, info
); );
} }
Libp2pEvent::Message(m) => debug!( Libp2pEvent::PubsubMessage {
libp2p_service.log, source, message, ..
"Network Service: Message received: {}", m } => {
), //TODO: Decide if we need to propagate the topic upwards. (Potentially for
//attestations)
message_handler_send
.send(HandlerMessage::PubsubMessage(source, message))
.map_err(|_| " failed to send pubsub message to handler")?;
}
}, },
Ok(Async::Ready(None)) => unreachable!("Stream never ends"), Ok(Async::Ready(None)) => unreachable!("Stream never ends"),
Ok(Async::NotReady) => break, Ok(Async::NotReady) => break,
@ -152,10 +159,14 @@ fn network_service(
libp2p_service.swarm.send_rpc(peer_id, rpc_event); libp2p_service.swarm.send_rpc(peer_id, rpc_event);
} }
OutgoingMessage::NotifierTest => { OutgoingMessage::NotifierTest => {
debug!(log, "Received message from notifier"); // debug!(log, "Received message from notifier");
} }
}; };
} }
Ok(NetworkMessage::Publish { topics, message }) => {
debug!(log, "Sending pubsub message on topics {:?}", topics);
libp2p_service.swarm.publish(topics, *message);
}
Err(TryRecvError::Empty) => break, Err(TryRecvError::Empty) => break,
Err(TryRecvError::Disconnected) => { Err(TryRecvError::Disconnected) => {
return Err(eth2_libp2p::error::Error::from( return Err(eth2_libp2p::error::Error::from(
@ -174,6 +185,11 @@ pub enum NetworkMessage {
/// Send a message to libp2p service. /// Send a message to libp2p service.
//TODO: Define typing for messages across the wire //TODO: Define typing for messages across the wire
Send(PeerId, OutgoingMessage), Send(PeerId, OutgoingMessage),
/// Publish a message to pubsub mechanism.
Publish {
topics: Vec<Topic>,
message: Box<PubsubMessage>,
},
} }
/// Type of outgoing messages that can be sent through the network service. /// Type of outgoing messages that can be sent through the network service.

View File

@ -0,0 +1,302 @@
use crate::beacon_chain::BeaconChain;
use eth2_libp2p::rpc::methods::*;
use eth2_libp2p::PeerId;
use slog::{debug, error};
use ssz::TreeHash;
use std::sync::Arc;
use std::time::{Duration, Instant};
use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Hash256, Slot};
/// Provides a queue for fully and partially built `BeaconBlock`s.
///
/// The queue is fundamentally a `Vec<PartialBeaconBlock>` where no two items have the same
/// `item.block_root`. This struct it backed by a `Vec` not a `HashMap` for the following two
/// reasons:
///
/// - When we receive a `BeaconBlockBody`, the only way we can find it's matching
/// `BeaconBlockHeader` is to find a header such that `header.beacon_block_body ==
/// hash_tree_root(body)`. Therefore, if we used a `HashMap` we would need to use the root of
/// `BeaconBlockBody` as the key.
/// - It is possible for multiple distinct blocks to have identical `BeaconBlockBodies`. Therefore
/// we cannot use a `HashMap` keyed by the root of `BeaconBlockBody`.
pub struct ImportQueue {
pub chain: Arc<BeaconChain>,
/// Partially imported blocks, keyed by the root of `BeaconBlockBody`.
pub partials: Vec<PartialBeaconBlock>,
/// Time before a queue entry is considered state.
pub stale_time: Duration,
/// Logging
log: slog::Logger,
}
impl ImportQueue {
/// Return a new, empty queue.
pub fn new(chain: Arc<BeaconChain>, stale_time: Duration, log: slog::Logger) -> Self {
Self {
chain,
partials: vec![],
stale_time,
log,
}
}
/// Completes all possible partials into `BeaconBlock` and returns them, sorted by increasing
/// slot number. Does not delete the partials from the queue, this must be done manually.
///
/// Returns `(queue_index, block, sender)`:
///
/// - `block_root`: may be used to remove the entry if it is successfully processed.
/// - `block`: the completed block.
/// - `sender`: the `PeerId` the provided the `BeaconBlockBody` which completed the partial.
pub fn complete_blocks(&self) -> Vec<(Hash256, BeaconBlock, PeerId)> {
let mut complete: Vec<(Hash256, BeaconBlock, PeerId)> = self
.partials
.iter()
.filter_map(|partial| partial.clone().complete())
.collect();
// Sort the completable partials to be in ascending slot order.
complete.sort_unstable_by(|a, b| a.1.slot.partial_cmp(&b.1.slot).unwrap());
complete
}
/// Removes the first `PartialBeaconBlock` with a matching `block_root`, returning the partial
/// if it exists.
pub fn remove(&mut self, block_root: Hash256) -> Option<PartialBeaconBlock> {
let position = self
.partials
.iter()
.position(|p| p.block_root == block_root)?;
Some(self.partials.remove(position))
}
/// Flushes all stale entries from the queue.
///
/// An entry is stale if it has as a `inserted` time that is more than `self.stale_time` in the
/// past.
pub fn remove_stale(&mut self) {
let stale_indices: Vec<usize> = self
.partials
.iter()
.enumerate()
.filter_map(|(i, partial)| {
if partial.inserted + self.stale_time <= Instant::now() {
Some(i)
} else {
None
}
})
.collect();
if !stale_indices.is_empty() {
debug!(
self.log,
"ImportQueue removing stale entries";
"stale_items" => stale_indices.len(),
"stale_time_seconds" => self.stale_time.as_secs()
);
}
stale_indices.iter().for_each(|&i| {
self.partials.remove(i);
});
}
/// Returns `true` if `self.chain` has not yet processed this block.
pub fn chain_has_not_seen_block(&self, block_root: &Hash256) -> bool {
self.chain
.is_new_block_root(&block_root)
.unwrap_or_else(|_| {
error!(self.log, "Unable to determine if block is new.");
true
})
}
/// Adds the `block_roots` to the partials queue.
///
/// If a `block_root` is not in the queue and has not been processed by the chain it is added
/// to the queue and it's block root is included in the output.
pub fn enqueue_block_roots(
&mut self,
block_roots: &[BlockRootSlot],
sender: PeerId,
) -> Vec<BlockRootSlot> {
let new_roots: Vec<BlockRootSlot> = block_roots
.iter()
// Ignore any roots already processed by the chain.
.filter(|brs| self.chain_has_not_seen_block(&brs.block_root))
// Ignore any roots already stored in the queue.
.filter(|brs| !self.partials.iter().any(|p| p.block_root == brs.block_root))
.cloned()
.collect();
new_roots.iter().for_each(|brs| {
self.partials.push(PartialBeaconBlock {
slot: brs.slot,
block_root: brs.block_root,
sender: sender.clone(),
header: None,
body: None,
inserted: Instant::now(),
})
});
new_roots
}
/// Adds the `headers` to the `partials` queue. Returns a list of `Hash256` block roots for
/// which we should use to request `BeaconBlockBodies`.
///
/// If a `header` is not in the queue and has not been processed by the chain it is added to
/// the queue and it's block root is included in the output.
///
/// If a `header` is already in the queue, but not yet processed by the chain the block root is
/// included in the output and the `inserted` time for the partial record is set to
/// `Instant::now()`. Updating the `inserted` time stops the partial from becoming stale.
///
/// Presently the queue enforces that a `BeaconBlockHeader` _must_ be received before its
/// `BeaconBlockBody`. This is not a natural requirement and we could enhance the queue to lift
/// this restraint.
pub fn enqueue_headers(
&mut self,
headers: Vec<BeaconBlockHeader>,
sender: PeerId,
) -> Vec<Hash256> {
let mut required_bodies: Vec<Hash256> = vec![];
for header in headers {
let block_root = Hash256::from_slice(&header.hash_tree_root()[..]);
if self.chain_has_not_seen_block(&block_root) {
self.insert_header(block_root, header, sender.clone());
required_bodies.push(block_root)
}
}
required_bodies
}
/// If there is a matching `header` for this `body`, adds it to the queue.
///
/// If there is no `header` for the `body`, the body is simply discarded.
pub fn enqueue_bodies(&mut self, bodies: Vec<BeaconBlockBody>, sender: PeerId) {
for body in bodies {
self.insert_body(body, sender.clone());
}
}
pub fn enqueue_full_blocks(&mut self, blocks: Vec<BeaconBlock>, sender: PeerId) {
for block in blocks {
self.insert_full_block(block, sender.clone());
}
}
/// Inserts a header to the queue.
///
/// If the header already exists, the `inserted` time is set to `now` and not other
/// modifications are made.
fn insert_header(&mut self, block_root: Hash256, header: BeaconBlockHeader, sender: PeerId) {
if let Some(i) = self
.partials
.iter()
.position(|p| p.block_root == block_root)
{
// Case 1: there already exists a partial with a matching block root.
//
// The `inserted` time is set to now and the header is replaced, regardless of whether
// it existed or not.
self.partials[i].header = Some(header);
self.partials[i].inserted = Instant::now();
} else {
// Case 2: there was no partial with a matching block root.
//
// A new partial is added. This case permits adding a header without already known the
// root -- this is not possible in the wire protocol however we support it anyway.
self.partials.push(PartialBeaconBlock {
slot: header.slot,
block_root,
header: Some(header),
body: None,
inserted: Instant::now(),
sender,
})
}
}
/// Updates an existing partial with the `body`.
///
/// If there is no header for the `body`, the body is simply discarded.
///
/// If the body already existed, the `inserted` time is set to `now`.
fn insert_body(&mut self, body: BeaconBlockBody, sender: PeerId) {
let body_root = Hash256::from_slice(&body.hash_tree_root()[..]);
self.partials.iter_mut().for_each(|mut p| {
if let Some(header) = &mut p.header {
if body_root == header.block_body_root {
p.inserted = Instant::now();
if p.body.is_none() {
p.body = Some(body.clone());
p.sender = sender.clone();
}
}
}
});
}
/// Updates an existing `partial` with the completed block, or adds a new (complete) partial.
///
/// If the partial already existed, the `inserted` time is set to `now`.
fn insert_full_block(&mut self, block: BeaconBlock, sender: PeerId) {
let block_root = Hash256::from_slice(&block.hash_tree_root()[..]);
let partial = PartialBeaconBlock {
slot: block.slot,
block_root,
header: Some(block.block_header()),
body: Some(block.body),
inserted: Instant::now(),
sender,
};
if let Some(i) = self
.partials
.iter()
.position(|p| p.block_root == block_root)
{
self.partials[i] = partial;
} else {
self.partials.push(partial)
}
}
}
/// Individual components of a `BeaconBlock`, potentially all that are required to form a full
/// `BeaconBlock`.
#[derive(Clone, Debug)]
pub struct PartialBeaconBlock {
pub slot: Slot,
/// `BeaconBlock` root.
pub block_root: Hash256,
pub header: Option<BeaconBlockHeader>,
pub body: Option<BeaconBlockBody>,
/// The instant at which this record was created or last meaningfully modified. Used to
/// determine if an entry is stale and should be removed.
pub inserted: Instant,
/// The `PeerId` that last meaningfully contributed to this item.
pub sender: PeerId,
}
impl PartialBeaconBlock {
/// Consumes `self` and returns a full built `BeaconBlock`, it's root and the `sender`
/// `PeerId`, if enough information exists to complete the block. Otherwise, returns `None`.
pub fn complete(self) -> Option<(Hash256, BeaconBlock, PeerId)> {
Some((
self.block_root,
self.header?.into_block(self.body?),
self.sender,
))
}
}

View File

@ -1,3 +1,4 @@
mod import_queue;
/// Syncing for lighthouse. /// Syncing for lighthouse.
/// ///
/// Stores the various syncing methods for the beacon chain. /// Stores the various syncing methods for the beacon chain.

View File

@ -1,112 +1,849 @@
use crate::beacon_chain::BeaconChain; use super::import_queue::ImportQueue;
use eth2_libp2p::rpc::HelloMessage; use crate::beacon_chain::{BeaconChain, BlockProcessingOutcome, InvalidBlock};
use crate::message_handler::NetworkContext;
use eth2_libp2p::rpc::methods::*;
use eth2_libp2p::rpc::{RPCRequest, RPCResponse, RequestId};
use eth2_libp2p::PeerId; use eth2_libp2p::PeerId;
use slog::{debug, o}; use slog::{debug, error, info, o, warn};
use ssz::TreeHash;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use types::{Epoch, Hash256, Slot}; use std::time::Duration;
use types::{Attestation, BeaconBlock, Epoch, Hash256, Slot};
/// The number of slots that we can import blocks ahead of us, before going into full Sync mode. /// The number of slots that we can import blocks ahead of us, before going into full Sync mode.
const SLOT_IMPORT_TOLERANCE: u64 = 100; const SLOT_IMPORT_TOLERANCE: u64 = 100;
/// The amount of seconds a block (or partial block) may exist in the import queue.
const QUEUE_STALE_SECS: u64 = 600;
/// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it.
/// Otherwise we queue it.
const FUTURE_SLOT_TOLERANCE: u64 = 1;
/// Keeps track of syncing information for known connected peers. /// Keeps track of syncing information for known connected peers.
#[derive(Clone, Copy, Debug)]
pub struct PeerSyncInfo { pub struct PeerSyncInfo {
network_id: u8,
latest_finalized_root: Hash256, latest_finalized_root: Hash256,
latest_finalized_epoch: Epoch, latest_finalized_epoch: Epoch,
best_root: Hash256, best_root: Hash256,
best_slot: Slot, best_slot: Slot,
} }
impl PeerSyncInfo {
/// Returns `true` if the has a different network ID to `other`.
fn has_different_network_id_to(&self, other: Self) -> bool {
self.network_id != other.network_id
}
/// Returns `true` if the peer has a higher finalized epoch than `other`.
fn has_higher_finalized_epoch_than(&self, other: Self) -> bool {
self.latest_finalized_epoch > other.latest_finalized_epoch
}
/// Returns `true` if the peer has a higher best slot than `other`.
fn has_higher_best_slot_than(&self, other: Self) -> bool {
self.best_slot > other.best_slot
}
}
/// The status of a peers view on the chain, relative to some other view of the chain (presumably
/// our view).
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum PeerStatus {
/// The peer is on a completely different chain.
DifferentNetworkId,
/// The peer lists a finalized epoch for which we have a different root.
FinalizedEpochNotInChain,
/// The peer has a higher finalized epoch.
HigherFinalizedEpoch,
/// The peer has a higher best slot.
HigherBestSlot,
/// The peer has the same or lesser view of the chain. We have nothing to request of them.
NotInteresting,
}
impl PeerStatus {
pub fn should_handshake(self) -> bool {
match self {
PeerStatus::DifferentNetworkId => false,
PeerStatus::FinalizedEpochNotInChain => false,
PeerStatus::HigherFinalizedEpoch => true,
PeerStatus::HigherBestSlot => true,
PeerStatus::NotInteresting => true,
}
}
}
impl From<HelloMessage> for PeerSyncInfo {
fn from(hello: HelloMessage) -> PeerSyncInfo {
PeerSyncInfo {
network_id: hello.network_id,
latest_finalized_root: hello.latest_finalized_root,
latest_finalized_epoch: hello.latest_finalized_epoch,
best_root: hello.best_root,
best_slot: hello.best_slot,
}
}
}
impl From<&Arc<BeaconChain>> for PeerSyncInfo {
fn from(chain: &Arc<BeaconChain>) -> PeerSyncInfo {
Self::from(chain.hello_message())
}
}
/// The current syncing state. /// The current syncing state.
#[derive(PartialEq)] #[derive(PartialEq)]
pub enum SyncState { pub enum SyncState {
Idle, Idle,
Downloading, Downloading,
Stopped, _Stopped,
} }
/// Simple Syncing protocol. /// Simple Syncing protocol.
//TODO: Decide for HELLO messages whether its better to keep current in RAM or build on the fly
//when asked.
pub struct SimpleSync { pub struct SimpleSync {
/// A reference to the underlying beacon chain. /// A reference to the underlying beacon chain.
chain: Arc<BeaconChain>, chain: Arc<BeaconChain>,
/// A mapping of Peers to their respective PeerSyncInfo. /// A mapping of Peers to their respective PeerSyncInfo.
known_peers: HashMap<PeerId, PeerSyncInfo>, known_peers: HashMap<PeerId, PeerSyncInfo>,
/// A queue to allow importing of blocks
import_queue: ImportQueue,
/// The current state of the syncing protocol. /// The current state of the syncing protocol.
state: SyncState, state: SyncState,
/// The network id, for quick HELLO RPC message lookup.
network_id: u8,
/// The latest epoch of the syncing chain.
latest_finalized_epoch: Epoch,
/// The latest block of the syncing chain.
latest_slot: Slot,
/// Sync logger. /// Sync logger.
log: slog::Logger, log: slog::Logger,
} }
impl SimpleSync { impl SimpleSync {
/// Instantiate a `SimpleSync` instance, with no peers and an empty queue.
pub fn new(beacon_chain: Arc<BeaconChain>, log: &slog::Logger) -> Self { pub fn new(beacon_chain: Arc<BeaconChain>, log: &slog::Logger) -> Self {
let state = beacon_chain.get_state();
let sync_logger = log.new(o!("Service"=> "Sync")); let sync_logger = log.new(o!("Service"=> "Sync"));
let queue_item_stale_time = Duration::from_secs(QUEUE_STALE_SECS);
let import_queue =
ImportQueue::new(beacon_chain.clone(), queue_item_stale_time, log.clone());
SimpleSync { SimpleSync {
chain: beacon_chain.clone(), chain: beacon_chain.clone(),
known_peers: HashMap::new(), known_peers: HashMap::new(),
import_queue,
state: SyncState::Idle, state: SyncState::Idle,
network_id: beacon_chain.get_spec().network_id,
latest_finalized_epoch: state.finalized_epoch,
latest_slot: state.slot - 1, //TODO: Build latest block function into Beacon chain and correct this
log: sync_logger, log: sync_logger,
} }
} }
/// Generates our current state in the form of a HELLO RPC message. /// Handle a `Goodbye` message from a peer.
pub fn generate_hello(&self) -> HelloMessage { ///
let state = &self.chain.get_state(); /// Removes the peer from `known_peers`.
//TODO: Paul to verify the logic of these fields. pub fn on_goodbye(&mut self, peer_id: PeerId, reason: GoodbyeReason) {
HelloMessage { info!(
network_id: self.network_id, self.log, "PeerGoodbye";
latest_finalized_root: state.finalized_root, "peer" => format!("{:?}", peer_id),
latest_finalized_epoch: state.finalized_epoch, "reason" => format!("{:?}", reason),
best_root: Hash256::zero(), //TODO: build correct value as a beacon chain function );
best_slot: state.slot - 1,
self.known_peers.remove(&peer_id);
}
/// Handle the connection of a new peer.
///
/// Sends a `Hello` message to the peer.
pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) {
info!(self.log, "PeerConnect"; "peer" => format!("{:?}", peer_id));
network.send_rpc_request(peer_id, RPCRequest::Hello(self.chain.hello_message()));
}
/// Handle a `Hello` request.
///
/// Processes the `HelloMessage` from the remote peer and sends back our `Hello`.
pub fn on_hello_request(
&mut self,
peer_id: PeerId,
request_id: RequestId,
hello: HelloMessage,
network: &mut NetworkContext,
) {
debug!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id));
// Say hello back.
network.send_rpc_response(
peer_id.clone(),
request_id,
RPCResponse::Hello(self.chain.hello_message()),
);
self.process_hello(peer_id, hello, network);
}
/// Process a `Hello` response from a peer.
pub fn on_hello_response(
&mut self,
peer_id: PeerId,
hello: HelloMessage,
network: &mut NetworkContext,
) {
debug!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id));
// Process the hello message, without sending back another hello.
self.process_hello(peer_id, hello, network);
}
/// Returns a `PeerStatus` for some peer.
fn peer_status(&self, peer: PeerSyncInfo) -> PeerStatus {
let local = PeerSyncInfo::from(&self.chain);
if peer.has_different_network_id_to(local) {
return PeerStatus::DifferentNetworkId;
}
if local.has_higher_finalized_epoch_than(peer) {
let peer_finalized_slot = peer
.latest_finalized_epoch
.start_slot(self.chain.get_spec().slots_per_epoch);
let local_roots = self.chain.get_block_roots(peer_finalized_slot, 1, 0);
if let Ok(local_roots) = local_roots {
if let Some(local_root) = local_roots.get(0) {
if *local_root != peer.latest_finalized_root {
return PeerStatus::FinalizedEpochNotInChain;
}
} else {
error!(
self.log,
"Cannot get root for peer finalized slot.";
"error" => "empty roots"
);
}
} else {
error!(
self.log,
"Cannot get root for peer finalized slot.";
"error" => format!("{:?}", local_roots)
);
}
}
if peer.has_higher_finalized_epoch_than(local) {
PeerStatus::HigherFinalizedEpoch
} else if peer.has_higher_best_slot_than(local) {
PeerStatus::HigherBestSlot
} else {
PeerStatus::NotInteresting
} }
} }
pub fn validate_peer(&mut self, peer_id: PeerId, hello_message: HelloMessage) -> bool { /// Process a `Hello` message, requesting new blocks if appropriate.
// network id must match ///
if hello_message.network_id != self.network_id { /// Disconnects the peer if required.
return false; fn process_hello(
} &mut self,
// compare latest epoch and finalized root to see if they exist in our chain peer_id: PeerId,
if hello_message.latest_finalized_epoch <= self.latest_finalized_epoch { hello: HelloMessage,
// ensure their finalized root is in our chain network: &mut NetworkContext,
// TODO: Get the finalized root at hello_message.latest_epoch and ensure they match ) {
//if (hello_message.latest_finalized_root == self.chain.get_state() { let spec = self.chain.get_spec();
// return false;
// } let remote = PeerSyncInfo::from(hello);
let local = PeerSyncInfo::from(&self.chain);
let remote_status = self.peer_status(remote);
if remote_status.should_handshake() {
info!(self.log, "HandshakeSuccess"; "peer" => format!("{:?}", peer_id));
self.known_peers.insert(peer_id.clone(), remote);
} else {
info!(
self.log, "HandshakeFailure";
"peer" => format!("{:?}", peer_id),
"reason" => "network_id"
);
network.disconnect(peer_id.clone(), GoodbyeReason::IrreleventNetwork);
} }
// the client is valid, add it to our list of known_peers and request sync if required // If required, send additional requests.
// update peer list if peer already exists match remote_status {
let peer_info = PeerSyncInfo { PeerStatus::HigherFinalizedEpoch => {
latest_finalized_root: hello_message.latest_finalized_root, let start_slot = remote
latest_finalized_epoch: hello_message.latest_finalized_epoch, .latest_finalized_epoch
best_root: hello_message.best_root, .start_slot(spec.slots_per_epoch);
best_slot: hello_message.best_slot, let required_slots = start_slot - local.best_slot;
self.request_block_roots(
peer_id,
BeaconBlockRootsRequest {
start_slot,
count: required_slots.into(),
},
network,
);
}
PeerStatus::HigherBestSlot => {
let required_slots = remote.best_slot - local.best_slot;
self.request_block_roots(
peer_id,
BeaconBlockRootsRequest {
start_slot: local.best_slot + 1,
count: required_slots.into(),
},
network,
);
}
PeerStatus::FinalizedEpochNotInChain => {}
PeerStatus::DifferentNetworkId => {}
PeerStatus::NotInteresting => {}
}
}
/// Handle a `BeaconBlockRoots` request from the peer.
pub fn on_beacon_block_roots_request(
&mut self,
peer_id: PeerId,
request_id: RequestId,
req: BeaconBlockRootsRequest,
network: &mut NetworkContext,
) {
debug!(
self.log,
"BlockRootsRequest";
"peer" => format!("{:?}", peer_id),
"count" => req.count,
);
let roots = match self
.chain
.get_block_roots(req.start_slot, req.count as usize, 0)
{
Ok(roots) => roots,
Err(e) => {
// TODO: return RPC error.
warn!(
self.log,
"RPCRequest"; "peer" => format!("{:?}", peer_id),
"req" => "BeaconBlockRoots",
"error" => format!("{:?}", e)
);
return;
}
}; };
debug!(self.log, "Handshake successful. Peer: {:?}", peer_id); let roots = roots
self.known_peers.insert(peer_id, peer_info); .iter()
.enumerate()
.map(|(i, &block_root)| BlockRootSlot {
slot: req.start_slot + Slot::from(i),
block_root,
})
.collect();
// set state to sync network.send_rpc_response(
if self.state == SyncState::Idle peer_id,
&& hello_message.best_slot > self.latest_slot + SLOT_IMPORT_TOLERANCE request_id,
{ RPCResponse::BeaconBlockRoots(BeaconBlockRootsResponse { roots }),
self.state = SyncState::Downloading; )
//TODO: Start requesting blocks from known peers. Ideally in batches }
/// Handle a `BeaconBlockRoots` response from the peer.
pub fn on_beacon_block_roots_response(
&mut self,
peer_id: PeerId,
res: BeaconBlockRootsResponse,
network: &mut NetworkContext,
) {
debug!(
self.log,
"BlockRootsResponse";
"peer" => format!("{:?}", peer_id),
"count" => res.roots.len(),
);
if res.roots.is_empty() {
warn!(
self.log,
"Peer returned empty block roots response";
"peer_id" => format!("{:?}", peer_id)
);
return;
} }
true // The wire protocol specifies that slots must be in ascending order.
if !res.slots_are_ascending() {
warn!(
self.log,
"Peer returned block roots response with bad slot ordering";
"peer_id" => format!("{:?}", peer_id)
);
return;
}
let new_roots = self
.import_queue
.enqueue_block_roots(&res.roots, peer_id.clone());
// No new roots means nothing to do.
//
// This check protects against future panics.
if new_roots.is_empty() {
return;
}
// Determine the first (earliest) and last (latest) `BlockRootSlot` items.
//
// This logic relies upon slots to be in ascending order, which is enforced earlier.
let first = new_roots.first().expect("Non-empty list must have first");
let last = new_roots.last().expect("Non-empty list must have last");
// Request all headers between the earliest and latest new `BlockRootSlot` items.
self.request_block_headers(
peer_id,
BeaconBlockHeadersRequest {
start_root: first.block_root,
start_slot: first.slot,
max_headers: (last.slot - first.slot + 1).as_u64(),
skip_slots: 0,
},
network,
)
}
/// Handle a `BeaconBlockHeaders` request from the peer.
pub fn on_beacon_block_headers_request(
&mut self,
peer_id: PeerId,
request_id: RequestId,
req: BeaconBlockHeadersRequest,
network: &mut NetworkContext,
) {
debug!(
self.log,
"BlockHeadersRequest";
"peer" => format!("{:?}", peer_id),
"count" => req.max_headers,
);
let headers = match self.chain.get_block_headers(
req.start_slot,
req.max_headers as usize,
req.skip_slots as usize,
) {
Ok(headers) => headers,
Err(e) => {
// TODO: return RPC error.
warn!(
self.log,
"RPCRequest"; "peer" => format!("{:?}", peer_id),
"req" => "BeaconBlockHeaders",
"error" => format!("{:?}", e)
);
return;
}
};
network.send_rpc_response(
peer_id,
request_id,
RPCResponse::BeaconBlockHeaders(BeaconBlockHeadersResponse { headers }),
)
}
/// Handle a `BeaconBlockHeaders` response from the peer.
pub fn on_beacon_block_headers_response(
&mut self,
peer_id: PeerId,
res: BeaconBlockHeadersResponse,
network: &mut NetworkContext,
) {
debug!(
self.log,
"BlockHeadersResponse";
"peer" => format!("{:?}", peer_id),
"count" => res.headers.len(),
);
if res.headers.is_empty() {
warn!(
self.log,
"Peer returned empty block headers response. PeerId: {:?}", peer_id
);
return;
}
// Enqueue the headers, obtaining a list of the roots of the headers which were newly added
// to the queue.
let block_roots = self
.import_queue
.enqueue_headers(res.headers, peer_id.clone());
self.request_block_bodies(peer_id, BeaconBlockBodiesRequest { block_roots }, network);
}
/// Handle a `BeaconBlockBodies` request from the peer.
pub fn on_beacon_block_bodies_request(
&mut self,
peer_id: PeerId,
request_id: RequestId,
req: BeaconBlockBodiesRequest,
network: &mut NetworkContext,
) {
debug!(
self.log,
"BlockBodiesRequest";
"peer" => format!("{:?}", peer_id),
"count" => req.block_roots.len(),
);
let block_bodies = match self.chain.get_block_bodies(&req.block_roots) {
Ok(bodies) => bodies,
Err(e) => {
// TODO: return RPC error.
warn!(
self.log,
"RPCRequest"; "peer" => format!("{:?}", peer_id),
"req" => "BeaconBlockBodies",
"error" => format!("{:?}", e)
);
return;
}
};
network.send_rpc_response(
peer_id,
request_id,
RPCResponse::BeaconBlockBodies(BeaconBlockBodiesResponse { block_bodies }),
)
}
/// Handle a `BeaconBlockBodies` response from the peer.
pub fn on_beacon_block_bodies_response(
&mut self,
peer_id: PeerId,
res: BeaconBlockBodiesResponse,
network: &mut NetworkContext,
) {
debug!(
self.log,
"BlockBodiesResponse";
"peer" => format!("{:?}", peer_id),
"count" => res.block_bodies.len(),
);
self.import_queue
.enqueue_bodies(res.block_bodies, peer_id.clone());
// Clear out old entries
self.import_queue.remove_stale();
// Import blocks, if possible.
self.process_import_queue(network);
}
/// Process a gossip message declaring a new block.
///
/// Returns a `bool` which, if `true`, indicates we should forward the block to our peers.
pub fn on_block_gossip(
&mut self,
peer_id: PeerId,
block: BeaconBlock,
network: &mut NetworkContext,
) -> bool {
info!(
self.log,
"NewGossipBlock";
"peer" => format!("{:?}", peer_id),
);
// Ignore any block from a finalized slot.
if self.slot_is_finalized(block.slot) {
warn!(
self.log, "NewGossipBlock";
"msg" => "new block slot is finalized.",
"block_slot" => block.slot,
);
return false;
}
let block_root = Hash256::from_slice(&block.hash_tree_root());
// Ignore any block that the chain already knows about.
if self.chain_has_seen_block(&block_root) {
println!("this happened");
// TODO: Age confirm that we shouldn't forward a block if we already know of it.
return false;
}
debug!(
self.log,
"NewGossipBlock";
"peer" => format!("{:?}", peer_id),
"msg" => "processing block",
);
match self.chain.process_block(block.clone()) {
Ok(BlockProcessingOutcome::InvalidBlock(InvalidBlock::ParentUnknown)) => {
// The block was valid and we processed it successfully.
debug!(
self.log, "NewGossipBlock";
"msg" => "parent block unknown",
"parent_root" => format!("{}", block.previous_block_root),
"peer" => format!("{:?}", peer_id),
);
// Queue the block for later processing.
self.import_queue
.enqueue_full_blocks(vec![block], peer_id.clone());
// Send a hello to learn of the clients best slot so we can then sync the require
// parent(s).
network.send_rpc_request(
peer_id.clone(),
RPCRequest::Hello(self.chain.hello_message()),
);
// Forward the block onto our peers.
//
// Note: this may need to be changed if we decide to only forward blocks if we have
// all required info.
true
}
Ok(BlockProcessingOutcome::InvalidBlock(InvalidBlock::FutureSlot {
present_slot,
block_slot,
})) => {
if block_slot - present_slot > FUTURE_SLOT_TOLERANCE {
// The block is too far in the future, drop it.
warn!(
self.log, "NewGossipBlock";
"msg" => "future block rejected",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
"peer" => format!("{:?}", peer_id),
);
// Do not forward the block around to peers.
false
} else {
// The block is in the future, but not too far.
warn!(
self.log, "NewGossipBlock";
"msg" => "queuing future block",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
"peer" => format!("{:?}", peer_id),
);
// Queue the block for later processing.
self.import_queue.enqueue_full_blocks(vec![block], peer_id);
// Forward the block around to peers.
true
}
}
Ok(outcome) => {
if outcome.is_invalid() {
// The peer has sent a block which is fundamentally invalid.
warn!(
self.log, "NewGossipBlock";
"msg" => "invalid block from peer",
"outcome" => format!("{:?}", outcome),
"peer" => format!("{:?}", peer_id),
);
// Disconnect the peer
network.disconnect(peer_id, GoodbyeReason::Fault);
// Do not forward the block to peers.
false
} else if outcome.sucessfully_processed() {
// The block was valid and we processed it successfully.
info!(
self.log, "NewGossipBlock";
"msg" => "block import successful",
"peer" => format!("{:?}", peer_id),
);
// Forward the block to peers
true
} else {
// The block wasn't necessarily invalid but we didn't process it successfully.
// This condition shouldn't be reached.
error!(
self.log, "NewGossipBlock";
"msg" => "unexpected condition in processing block.",
"outcome" => format!("{:?}", outcome),
);
// Do not forward the block on.
false
}
}
Err(e) => {
// We encountered an error whilst processing the block.
//
// Blocks should not be able to trigger errors, instead they should be flagged as
// invalid.
error!(
self.log, "NewGossipBlock";
"msg" => "internal error in processing block.",
"error" => format!("{:?}", e),
);
// Do not forward the block to peers.
false
}
}
}
/// Process a gossip message declaring a new attestation.
///
/// Not currently implemented.
pub fn on_attestation_gossip(
&mut self,
peer_id: PeerId,
msg: Attestation,
_network: &mut NetworkContext,
) {
info!(
self.log,
"NewAttestationGossip";
"peer" => format!("{:?}", peer_id),
);
match self.chain.process_attestation(msg) {
Ok(()) => info!(self.log, "ImportedAttestation"),
Err(e) => warn!(self.log, "InvalidAttestation"; "error" => format!("{:?}", e)),
}
}
/// Iterate through the `import_queue` and process any complete blocks.
///
/// If a block is successfully processed it is removed from the queue, otherwise it remains in
/// the queue.
pub fn process_import_queue(&mut self, network: &mut NetworkContext) {
let mut successful = 0;
let mut invalid = 0;
let mut errored = 0;
// Loop through all of the complete blocks in the queue.
for (block_root, block, sender) in self.import_queue.complete_blocks() {
match self.chain.process_block(block) {
Ok(outcome) => {
if outcome.is_invalid() {
invalid += 1;
warn!(
self.log,
"InvalidBlock";
"sender_peer_id" => format!("{:?}", sender),
"reason" => format!("{:?}", outcome),
);
network.disconnect(sender, GoodbyeReason::Fault);
break;
}
// If this results to true, the item will be removed from the queue.
if outcome.sucessfully_processed() {
successful += 1;
self.import_queue.remove(block_root);
} else {
debug!(
self.log,
"ProcessImportQueue";
"msg" => "Block not imported",
"outcome" => format!("{:?}", outcome),
"peer" => format!("{:?}", sender),
);
}
}
Err(e) => {
errored += 1;
error!(self.log, "BlockProcessingError"; "error" => format!("{:?}", e));
}
}
}
if successful > 0 {
info!(self.log, "Imported {} blocks", successful)
}
if invalid > 0 {
warn!(self.log, "Rejected {} invalid blocks", invalid)
}
if errored > 0 {
warn!(self.log, "Failed to process {} blocks", errored)
}
}
/// Request some `BeaconBlockRoots` from the remote peer.
fn request_block_roots(
&mut self,
peer_id: PeerId,
req: BeaconBlockRootsRequest,
network: &mut NetworkContext,
) {
// Potentially set state to sync.
if self.state == SyncState::Idle && req.count > SLOT_IMPORT_TOLERANCE {
debug!(self.log, "Entering downloading sync state.");
self.state = SyncState::Downloading;
}
debug!(
self.log,
"RPCRequest(BeaconBlockRoots)";
"count" => req.count,
"peer" => format!("{:?}", peer_id)
);
// TODO: handle count > max count.
network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockRoots(req));
}
/// Request some `BeaconBlockHeaders` from the remote peer.
fn request_block_headers(
&mut self,
peer_id: PeerId,
req: BeaconBlockHeadersRequest,
network: &mut NetworkContext,
) {
debug!(
self.log,
"RPCRequest(BeaconBlockHeaders)";
"max_headers" => req.max_headers,
"peer" => format!("{:?}", peer_id)
);
network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockHeaders(req));
}
/// Request some `BeaconBlockBodies` from the remote peer.
fn request_block_bodies(
&mut self,
peer_id: PeerId,
req: BeaconBlockBodiesRequest,
network: &mut NetworkContext,
) {
debug!(
self.log,
"RPCRequest(BeaconBlockBodies)";
"count" => req.block_roots.len(),
"peer" => format!("{:?}", peer_id)
);
network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockBodies(req));
}
/// Returns `true` if `self.chain` has not yet processed this block.
pub fn chain_has_seen_block(&self, block_root: &Hash256) -> bool {
!self
.chain
.is_new_block_root(&block_root)
.unwrap_or_else(|_| {
error!(self.log, "Unable to determine if block is new.");
false
})
}
/// Returns `true` if the given slot is finalized in our chain.
fn slot_is_finalized(&self, slot: Slot) -> bool {
slot <= self
.chain
.hello_message()
.latest_finalized_epoch
.start_slot(self.chain.get_spec().slots_per_epoch)
}
/// Generates our current state in the form of a HELLO RPC message.
pub fn generate_hello(&self) -> HelloMessage {
self.chain.hello_message()
} }
} }

View File

@ -0,0 +1,570 @@
use crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender};
use eth2_libp2p::rpc::methods::*;
use eth2_libp2p::rpc::{RPCMethod, RPCRequest, RPCResponse, RequestId};
use eth2_libp2p::{PeerId, RPCEvent};
use network::beacon_chain::BeaconChain as NetworkBeaconChain;
use network::message_handler::{HandlerMessage, MessageHandler};
use network::service::{NetworkMessage, OutgoingMessage};
use sloggers::terminal::{Destination, TerminalLoggerBuilder};
use sloggers::types::Severity;
use sloggers::Build;
use std::time::Duration;
use test_harness::BeaconChainHarness;
use tokio::runtime::TaskExecutor;
use types::{test_utils::TestingBeaconStateBuilder, *};
pub struct SyncNode {
pub id: usize,
sender: Sender<HandlerMessage>,
receiver: Receiver<NetworkMessage>,
peer_id: PeerId,
harness: BeaconChainHarness,
}
impl SyncNode {
fn from_beacon_state_builder(
id: usize,
executor: &TaskExecutor,
state_builder: TestingBeaconStateBuilder,
spec: &ChainSpec,
logger: slog::Logger,
) -> Self {
let harness = BeaconChainHarness::from_beacon_state_builder(state_builder, spec.clone());
let (network_sender, network_receiver) = unbounded();
let message_handler_sender = MessageHandler::spawn(
harness.beacon_chain.clone(),
network_sender,
executor,
logger,
)
.unwrap();
Self {
id,
sender: message_handler_sender,
receiver: network_receiver,
peer_id: PeerId::random(),
harness,
}
}
fn increment_beacon_chain_slot(&mut self) {
self.harness.increment_beacon_chain_slot();
}
fn send(&self, message: HandlerMessage) {
self.sender.send(message).unwrap();
}
fn recv(&self) -> Result<NetworkMessage, RecvTimeoutError> {
self.receiver.recv_timeout(Duration::from_millis(500))
}
fn hello_message(&self) -> HelloMessage {
self.harness.beacon_chain.hello_message()
}
pub fn connect_to(&mut self, node: &SyncNode) {
let message = HandlerMessage::PeerDialed(self.peer_id.clone());
node.send(message);
}
/// Reads the receive queue from one node and passes the message to the other. Also returns a
/// copy of the message.
///
/// self -----> node
/// |
/// us
///
/// Named after the unix `tee` command.
fn tee(&mut self, node: &SyncNode) -> NetworkMessage {
let network_message = self.recv().expect("Timeout on tee");
let handler_message = match network_message.clone() {
NetworkMessage::Send(_to_peer_id, OutgoingMessage::RPC(event)) => {
HandlerMessage::RPC(self.peer_id.clone(), event)
}
_ => panic!("tee cannot parse {:?}", network_message),
};
node.send(handler_message);
network_message
}
fn tee_hello_request(&mut self, node: &SyncNode) -> HelloMessage {
let request = self.tee_rpc_request(node);
match request {
RPCRequest::Hello(message) => message,
_ => panic!("tee_hello_request got: {:?}", request),
}
}
fn tee_hello_response(&mut self, node: &SyncNode) -> HelloMessage {
let response = self.tee_rpc_response(node);
match response {
RPCResponse::Hello(message) => message,
_ => panic!("tee_hello_response got: {:?}", response),
}
}
fn tee_block_root_request(&mut self, node: &SyncNode) -> BeaconBlockRootsRequest {
let msg = self.tee_rpc_request(node);
match msg {
RPCRequest::BeaconBlockRoots(data) => data,
_ => panic!("tee_block_root_request got: {:?}", msg),
}
}
fn tee_block_root_response(&mut self, node: &SyncNode) -> BeaconBlockRootsResponse {
let msg = self.tee_rpc_response(node);
match msg {
RPCResponse::BeaconBlockRoots(data) => data,
_ => panic!("tee_block_root_response got: {:?}", msg),
}
}
fn tee_block_header_request(&mut self, node: &SyncNode) -> BeaconBlockHeadersRequest {
let msg = self.tee_rpc_request(node);
match msg {
RPCRequest::BeaconBlockHeaders(data) => data,
_ => panic!("tee_block_header_request got: {:?}", msg),
}
}
fn tee_block_header_response(&mut self, node: &SyncNode) -> BeaconBlockHeadersResponse {
let msg = self.tee_rpc_response(node);
match msg {
RPCResponse::BeaconBlockHeaders(data) => data,
_ => panic!("tee_block_header_response got: {:?}", msg),
}
}
fn tee_block_body_request(&mut self, node: &SyncNode) -> BeaconBlockBodiesRequest {
let msg = self.tee_rpc_request(node);
match msg {
RPCRequest::BeaconBlockBodies(data) => data,
_ => panic!("tee_block_body_request got: {:?}", msg),
}
}
fn tee_block_body_response(&mut self, node: &SyncNode) -> BeaconBlockBodiesResponse {
let msg = self.tee_rpc_response(node);
match msg {
RPCResponse::BeaconBlockBodies(data) => data,
_ => panic!("tee_block_body_response got: {:?}", msg),
}
}
fn tee_rpc_request(&mut self, node: &SyncNode) -> RPCRequest {
let network_message = self.tee(node);
match network_message {
NetworkMessage::Send(
_peer_id,
OutgoingMessage::RPC(RPCEvent::Request {
id: _,
method_id: _,
body,
}),
) => body,
_ => panic!("tee_rpc_request failed! got {:?}", network_message),
}
}
fn tee_rpc_response(&mut self, node: &SyncNode) -> RPCResponse {
let network_message = self.tee(node);
match network_message {
NetworkMessage::Send(
_peer_id,
OutgoingMessage::RPC(RPCEvent::Response {
id: _,
method_id: _,
result,
}),
) => result,
_ => panic!("tee_rpc_response failed! got {:?}", network_message),
}
}
pub fn get_block_root_request(&self) -> BeaconBlockRootsRequest {
let request = self.recv_rpc_request().expect("No block root request");
match request {
RPCRequest::BeaconBlockRoots(request) => request,
_ => panic!("Did not get block root request"),
}
}
pub fn get_block_headers_request(&self) -> BeaconBlockHeadersRequest {
let request = self.recv_rpc_request().expect("No block headers request");
match request {
RPCRequest::BeaconBlockHeaders(request) => request,
_ => panic!("Did not get block headers request"),
}
}
pub fn get_block_bodies_request(&self) -> BeaconBlockBodiesRequest {
let request = self.recv_rpc_request().expect("No block bodies request");
match request {
RPCRequest::BeaconBlockBodies(request) => request,
_ => panic!("Did not get block bodies request"),
}
}
fn _recv_rpc_response(&self) -> Result<RPCResponse, RecvTimeoutError> {
let network_message = self.recv()?;
Ok(match network_message {
NetworkMessage::Send(
_peer_id,
OutgoingMessage::RPC(RPCEvent::Response {
id: _,
method_id: _,
result,
}),
) => result,
_ => panic!("get_rpc_response failed! got {:?}", network_message),
})
}
fn recv_rpc_request(&self) -> Result<RPCRequest, RecvTimeoutError> {
let network_message = self.recv()?;
Ok(match network_message {
NetworkMessage::Send(
_peer_id,
OutgoingMessage::RPC(RPCEvent::Request {
id: _,
method_id: _,
body,
}),
) => body,
_ => panic!("get_rpc_request failed! got {:?}", network_message),
})
}
}
fn get_logger() -> slog::Logger {
let mut builder = TerminalLoggerBuilder::new();
builder.level(Severity::Debug);
builder.destination(Destination::Stderr);
builder.build().unwrap()
}
pub struct SyncMaster {
harness: BeaconChainHarness,
peer_id: PeerId,
response_ids: Vec<RequestId>,
}
impl SyncMaster {
fn from_beacon_state_builder(
state_builder: TestingBeaconStateBuilder,
node_count: usize,
spec: &ChainSpec,
) -> Self {
let harness = BeaconChainHarness::from_beacon_state_builder(state_builder, spec.clone());
let peer_id = PeerId::random();
let response_ids = vec![RequestId::from(0); node_count];
Self {
harness,
peer_id,
response_ids,
}
}
pub fn response_id(&mut self, node: &SyncNode) -> RequestId {
let id = self.response_ids[node.id].clone();
self.response_ids[node.id].increment();
id
}
pub fn do_hello_with(&mut self, node: &SyncNode) {
let message = HandlerMessage::PeerDialed(self.peer_id.clone());
node.send(message);
let request = node.recv_rpc_request().expect("No hello response");
match request {
RPCRequest::Hello(_hello) => {
let hello = self.harness.beacon_chain.hello_message();
let response = self.rpc_response(node, RPCResponse::Hello(hello));
node.send(response);
}
_ => panic!("Got message other than hello from node."),
}
}
pub fn respond_to_block_roots_request(
&mut self,
node: &SyncNode,
request: BeaconBlockRootsRequest,
) {
let roots = self
.harness
.beacon_chain
.get_block_roots(request.start_slot, request.count as usize, 0)
.expect("Beacon chain did not give block roots")
.iter()
.enumerate()
.map(|(i, root)| BlockRootSlot {
block_root: *root,
slot: Slot::from(i) + request.start_slot,
})
.collect();
let response = RPCResponse::BeaconBlockRoots(BeaconBlockRootsResponse { roots });
self.send_rpc_response(node, response)
}
pub fn respond_to_block_headers_request(
&mut self,
node: &SyncNode,
request: BeaconBlockHeadersRequest,
) {
let roots = self
.harness
.beacon_chain
.get_block_roots(
request.start_slot,
request.max_headers as usize,
request.skip_slots as usize,
)
.expect("Beacon chain did not give blocks");
if roots.is_empty() {
panic!("Roots was empty when trying to get headers.")
}
assert_eq!(
roots[0], request.start_root,
"Got the wrong start root when getting headers"
);
let headers: Vec<BeaconBlockHeader> = roots
.iter()
.map(|root| {
let block = self
.harness
.beacon_chain
.get_block(root)
.expect("Failed to load block")
.expect("Block did not exist");
block.block_header()
})
.collect();
let response = RPCResponse::BeaconBlockHeaders(BeaconBlockHeadersResponse { headers });
self.send_rpc_response(node, response)
}
pub fn respond_to_block_bodies_request(
&mut self,
node: &SyncNode,
request: BeaconBlockBodiesRequest,
) {
let block_bodies: Vec<BeaconBlockBody> = request
.block_roots
.iter()
.map(|root| {
let block = self
.harness
.beacon_chain
.get_block(root)
.expect("Failed to load block")
.expect("Block did not exist");
block.body
})
.collect();
let response = RPCResponse::BeaconBlockBodies(BeaconBlockBodiesResponse { block_bodies });
self.send_rpc_response(node, response)
}
fn send_rpc_response(&mut self, node: &SyncNode, rpc_response: RPCResponse) {
node.send(self.rpc_response(node, rpc_response));
}
fn rpc_response(&mut self, node: &SyncNode, rpc_response: RPCResponse) -> HandlerMessage {
HandlerMessage::RPC(
self.peer_id.clone(),
RPCEvent::Response {
id: self.response_id(node),
method_id: RPCMethod::Hello.into(),
result: rpc_response,
},
)
}
}
fn test_setup(
state_builder: TestingBeaconStateBuilder,
node_count: usize,
spec: &ChainSpec,
logger: slog::Logger,
) -> (tokio::runtime::Runtime, SyncMaster, Vec<SyncNode>) {
let runtime = tokio::runtime::Runtime::new().unwrap();
let mut nodes = Vec::with_capacity(node_count);
for id in 0..node_count {
let node = SyncNode::from_beacon_state_builder(
id,
&runtime.executor(),
state_builder.clone(),
&spec,
logger.clone(),
);
nodes.push(node);
}
let master = SyncMaster::from_beacon_state_builder(state_builder, node_count, &spec);
(runtime, master, nodes)
}
pub fn build_blocks(blocks: usize, master: &mut SyncMaster, nodes: &mut Vec<SyncNode>) {
for _ in 0..blocks {
master.harness.advance_chain_with_block();
for i in 0..nodes.len() {
nodes[i].increment_beacon_chain_slot();
}
}
master.harness.run_fork_choice();
for i in 0..nodes.len() {
nodes[i].harness.run_fork_choice();
}
}
#[test]
#[ignore]
fn sync_node_with_master() {
let logger = get_logger();
let spec = ChainSpec::few_validators();
let validator_count = 8;
let node_count = 1;
let state_builder =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec);
let (runtime, mut master, mut nodes) =
test_setup(state_builder, node_count, &spec, logger.clone());
let original_node_slot = nodes[0].hello_message().best_slot;
build_blocks(2, &mut master, &mut nodes);
master.do_hello_with(&nodes[0]);
let roots_request = nodes[0].get_block_root_request();
assert_eq!(roots_request.start_slot, original_node_slot + 1);
assert_eq!(roots_request.count, 2);
master.respond_to_block_roots_request(&nodes[0], roots_request);
let headers_request = nodes[0].get_block_headers_request();
assert_eq!(headers_request.start_slot, original_node_slot + 1);
assert_eq!(headers_request.max_headers, 2);
assert_eq!(headers_request.skip_slots, 0);
master.respond_to_block_headers_request(&nodes[0], headers_request);
let bodies_request = nodes[0].get_block_bodies_request();
assert_eq!(bodies_request.block_roots.len(), 2);
master.respond_to_block_bodies_request(&nodes[0], bodies_request);
std::thread::sleep(Duration::from_millis(10000));
runtime.shutdown_now();
}
#[test]
#[ignore]
fn sync_two_nodes() {
let logger = get_logger();
let spec = ChainSpec::few_validators();
let validator_count = 8;
let node_count = 2;
let state_builder =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec);
let (runtime, _master, mut nodes) =
test_setup(state_builder, node_count, &spec, logger.clone());
// let original_node_slot = nodes[0].hello_message().best_slot;
let mut node_a = nodes.remove(0);
let mut node_b = nodes.remove(0);
let blocks = 2;
// Node A builds out a longer, better chain.
for _ in 0..blocks {
// Node A should build a block.
node_a.harness.advance_chain_with_block();
// Node B should just increment it's slot without a block.
node_b.harness.increment_beacon_chain_slot();
}
node_a.harness.run_fork_choice();
// A connects to B.
node_a.connect_to(&node_b);
// B says hello to A.
node_b.tee_hello_request(&node_a);
// A says hello back.
node_a.tee_hello_response(&node_b);
// B requests block roots from A.
node_b.tee_block_root_request(&node_a);
// A provides block roots to A.
node_a.tee_block_root_response(&node_b);
// B requests block headers from A.
node_b.tee_block_header_request(&node_a);
// A provides block headers to B.
node_a.tee_block_header_response(&node_b);
// B requests block bodies from A.
node_b.tee_block_body_request(&node_a);
// A provides block bodies to B.
node_a.tee_block_body_response(&node_b);
std::thread::sleep(Duration::from_secs(20));
node_b.harness.run_fork_choice();
let node_a_chain = node_a
.harness
.beacon_chain
.chain_dump()
.expect("Can't dump node a chain");
let node_b_chain = node_b
.harness
.beacon_chain
.chain_dump()
.expect("Can't dump node b chain");
assert_eq!(
node_a_chain.len(),
node_b_chain.len(),
"Chains should be equal length"
);
assert_eq!(node_a_chain, node_b_chain, "Chains should be identical");
runtime.shutdown_now();
}

View File

@ -7,7 +7,12 @@ edition = "2018"
[dependencies] [dependencies]
bls = { path = "../../eth2/utils/bls" } bls = { path = "../../eth2/utils/bls" }
beacon_chain = { path = "../beacon_chain" } beacon_chain = { path = "../beacon_chain" }
network = { path = "../network" }
eth2-libp2p = { path = "../eth2-libp2p" }
version = { path = "../version" }
types = { path = "../../eth2/types" }
ssz = { path = "../../eth2/utils/ssz" }
slot_clock = { path = "../../eth2/utils/slot_clock" }
protos = { path = "../../protos" } protos = { path = "../../protos" }
grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] }
protobuf = "2.0.2" protobuf = "2.0.2"
@ -16,8 +21,8 @@ db = { path = "../db" }
dirs = "1.0.3" dirs = "1.0.3"
futures = "0.1.23" futures = "0.1.23"
slog = "^2.2.3" slog = "^2.2.3"
slot_clock = { path = "../../eth2/utils/slot_clock" }
slog-term = "^2.4.0" slog-term = "^2.4.0"
slog-async = "^2.3.0" slog-async = "^2.3.0"
types = { path = "../../eth2/types" } tokio = "0.1.17"
ssz = { path = "../../eth2/utils/ssz" } exit-future = "0.1.4"
crossbeam-channel = "0.3.8"

View File

@ -0,0 +1,157 @@
use crate::beacon_chain::BeaconChain;
use futures::Future;
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
use protos::services::{
AttestationData as AttestationDataProto, ProduceAttestationDataRequest,
ProduceAttestationDataResponse, PublishAttestationRequest, PublishAttestationResponse,
};
use protos::services_grpc::AttestationService;
use slog::{error, info, trace, warn};
use ssz::{ssz_encode, Decodable};
use std::sync::Arc;
use types::Attestation;
#[derive(Clone)]
pub struct AttestationServiceInstance {
pub chain: Arc<BeaconChain>,
pub log: slog::Logger,
}
impl AttestationService for AttestationServiceInstance {
/// Produce the `AttestationData` for signing by a validator.
fn produce_attestation_data(
&mut self,
ctx: RpcContext,
req: ProduceAttestationDataRequest,
sink: UnarySink<ProduceAttestationDataResponse>,
) {
trace!(
&self.log,
"Attempting to produce attestation at slot {}",
req.get_slot()
);
// verify the slot, drop lock on state afterwards
{
let slot_requested = req.get_slot();
let state = self.chain.get_state();
// Start by performing some checks
// Check that the AttestionData is for the current slot (otherwise it will not be valid)
if slot_requested > state.slot.as_u64() {
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::OutOfRange,
Some(
"AttestationData request for a slot that is in the future.".to_string(),
),
))
.map_err(move |e| {
error!(log_clone, "Failed to reply with failure {:?}: {:?}", req, e)
});
return ctx.spawn(f);
}
// currently cannot handle past slots. TODO: Handle this case
else if slot_requested < state.slot.as_u64() {
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::InvalidArgument,
Some("AttestationData request for a slot that is in the past.".to_string()),
))
.map_err(move |e| {
error!(log_clone, "Failed to reply with failure {:?}: {:?}", req, e)
});
return ctx.spawn(f);
}
}
// Then get the AttestationData from the beacon chain
let shard = req.get_shard();
let attestation_data = match self.chain.produce_attestation_data(shard) {
Ok(v) => v,
Err(e) => {
// Could not produce an attestation
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::Unknown,
Some(format!("Could not produce an attestation: {:?}", e)),
))
.map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e));
return ctx.spawn(f);
}
};
let mut attestation_data_proto = AttestationDataProto::new();
attestation_data_proto.set_ssz(ssz_encode(&attestation_data));
let mut resp = ProduceAttestationDataResponse::new();
resp.set_attestation_data(attestation_data_proto);
let error_log = self.log.clone();
let f = sink
.success(resp)
.map_err(move |e| error!(error_log, "Failed to reply with success {:?}: {:?}", req, e));
ctx.spawn(f)
}
/// Accept some fully-formed `FreeAttestation` from the validator,
/// store it, and aggregate it into an `Attestation`.
fn publish_attestation(
&mut self,
ctx: RpcContext,
req: PublishAttestationRequest,
sink: UnarySink<PublishAttestationResponse>,
) {
trace!(self.log, "Publishing attestation");
let mut resp = PublishAttestationResponse::new();
let ssz_serialized_attestation = req.get_attestation().get_ssz();
let attestation = match Attestation::ssz_decode(ssz_serialized_attestation, 0) {
Ok((v, _index)) => v,
Err(_) => {
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::InvalidArgument,
Some("Invalid attestation".to_string()),
))
.map_err(move |_| warn!(log_clone, "failed to reply {:?}", req));
return ctx.spawn(f);
}
};
match self.chain.process_attestation(attestation) {
Ok(_) => {
// Attestation was successfully processed.
info!(
self.log,
"PublishAttestation";
"type" => "valid_attestation",
);
resp.set_success(true);
}
Err(e) => {
// Attestation was invalid
warn!(
self.log,
"PublishAttestation";
"type" => "invalid_attestation",
"error" => format!("{:?}", e),
);
resp.set_success(false);
resp.set_msg(format!("InvalidAttestation: {:?}", e).as_bytes().to_vec());
}
};
let error_log = self.log.clone();
let f = sink
.success(resp)
.map_err(move |e| error!(error_log, "failed to reply {:?}: {:?}", req, e));
ctx.spawn(f)
}
}

View File

@ -1,14 +1,24 @@
use crate::beacon_chain::BeaconChain;
use crossbeam_channel;
use eth2_libp2p::PubsubMessage;
use futures::Future; use futures::Future;
use grpcio::{RpcContext, UnarySink}; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
use network::NetworkMessage;
use protos::services::{ use protos::services::{
BeaconBlock as BeaconBlockProto, ProduceBeaconBlockRequest, ProduceBeaconBlockResponse, BeaconBlock as BeaconBlockProto, ProduceBeaconBlockRequest, ProduceBeaconBlockResponse,
PublishBeaconBlockRequest, PublishBeaconBlockResponse, PublishBeaconBlockRequest, PublishBeaconBlockResponse,
}; };
use protos::services_grpc::BeaconBlockService; use protos::services_grpc::BeaconBlockService;
use slog::Logger; use slog::Logger;
use slog::{error, info, trace, warn};
use ssz::{ssz_encode, Decodable};
use std::sync::Arc;
use types::{BeaconBlock, Signature, Slot};
#[derive(Clone)] #[derive(Clone)]
pub struct BeaconBlockServiceInstance { pub struct BeaconBlockServiceInstance {
pub chain: Arc<BeaconChain>,
pub network_chan: crossbeam_channel::Sender<NetworkMessage>,
pub log: Logger, pub log: Logger,
} }
@ -20,12 +30,44 @@ impl BeaconBlockService for BeaconBlockServiceInstance {
req: ProduceBeaconBlockRequest, req: ProduceBeaconBlockRequest,
sink: UnarySink<ProduceBeaconBlockResponse>, sink: UnarySink<ProduceBeaconBlockResponse>,
) { ) {
println!("producing at slot {}", req.get_slot()); trace!(self.log, "Generating a beacon block"; "req" => format!("{:?}", req));
// decode the request
// TODO: requested slot currently unused, see: https://github.com/sigp/lighthouse/issues/336
let _requested_slot = Slot::from(req.get_slot());
let randao_reveal = match Signature::ssz_decode(req.get_randao_reveal(), 0) {
Ok((reveal, _index)) => reveal,
Err(_) => {
// decode error, incorrect signature
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::InvalidArgument,
Some("Invalid randao reveal signature".to_string()),
))
.map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e));
return ctx.spawn(f);
}
};
let produced_block = match self.chain.produce_block(randao_reveal) {
Ok((block, _state)) => block,
Err(e) => {
// could not produce a block
let log_clone = self.log.clone();
warn!(self.log, "RPC Error"; "Error" => format!("Could not produce a block:{:?}",e));
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::Unknown,
Some(format!("Could not produce a block: {:?}", e)),
))
.map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e));
return ctx.spawn(f);
}
};
// TODO: build a legit block.
let mut block = BeaconBlockProto::new(); let mut block = BeaconBlockProto::new();
block.set_slot(req.get_slot()); block.set_ssz(ssz_encode(&produced_block));
block.set_block_root(b"cats".to_vec());
let mut resp = ProduceBeaconBlockResponse::new(); let mut resp = ProduceBeaconBlockResponse::new();
resp.set_block(block); resp.set_block(block);
@ -43,11 +85,94 @@ impl BeaconBlockService for BeaconBlockServiceInstance {
req: PublishBeaconBlockRequest, req: PublishBeaconBlockRequest,
sink: UnarySink<PublishBeaconBlockResponse>, sink: UnarySink<PublishBeaconBlockResponse>,
) { ) {
println!("publishing {:?}", req.get_block()); trace!(&self.log, "Attempting to publish a block");
// TODO: actually process the block.
let mut resp = PublishBeaconBlockResponse::new(); let mut resp = PublishBeaconBlockResponse::new();
resp.set_success(true);
let ssz_serialized_block = req.get_block().get_ssz();
match BeaconBlock::ssz_decode(ssz_serialized_block, 0) {
Ok((block, _i)) => {
match self.chain.process_block(block.clone()) {
Ok(outcome) => {
if outcome.sucessfully_processed() {
// Block was successfully processed.
info!(
self.log,
"PublishBeaconBlock";
"type" => "valid_block",
"block_slot" => block.slot,
"outcome" => format!("{:?}", outcome)
);
// TODO: Obtain topics from the network service properly.
let topic =
types::TopicBuilder::new("beacon_chain".to_string()).build();
let message = PubsubMessage::Block(block);
// Publish the block to the p2p network via gossipsub.
self.network_chan
.send(NetworkMessage::Publish {
topics: vec![topic],
message: Box::new(message),
})
.unwrap_or_else(|e| {
error!(
self.log,
"PublishBeaconBlock";
"type" => "failed to publish to gossipsub",
"error" => format!("{:?}", e)
);
});
resp.set_success(true);
} else if outcome.is_invalid() {
// Block was invalid.
warn!(
self.log,
"PublishBeaconBlock";
"type" => "invalid_block",
"outcome" => format!("{:?}", outcome)
);
resp.set_success(false);
resp.set_msg(
format!("InvalidBlock: {:?}", outcome).as_bytes().to_vec(),
);
} else {
// Some failure during processing.
warn!(
self.log,
"PublishBeaconBlock";
"type" => "unable_to_import",
"outcome" => format!("{:?}", outcome)
);
resp.set_success(false);
resp.set_msg(format!("other: {:?}", outcome).as_bytes().to_vec());
}
}
Err(e) => {
// Some failure during processing.
error!(
self.log,
"PublishBeaconBlock";
"type" => "failed_to_process",
"error" => format!("{:?}", e)
);
resp.set_success(false);
resp.set_msg(format!("failed_to_process: {:?}", e).as_bytes().to_vec());
}
}
resp.set_success(true);
}
Err(_) => {
resp.set_success(false);
resp.set_msg(b"Invalid SSZ".to_vec());
}
};
let f = sink let f = sink
.success(resp) .success(resp)

View File

@ -0,0 +1,79 @@
use beacon_chain::BeaconChain as RawBeaconChain;
use beacon_chain::{
db::ClientDB,
fork_choice::ForkChoice,
parking_lot::{RwLockReadGuard, RwLockWriteGuard},
slot_clock::SlotClock,
types::{BeaconState, ChainSpec, Signature},
AttestationValidationError, BlockProductionError,
};
pub use beacon_chain::{BeaconChainError, BlockProcessingOutcome};
use types::{Attestation, AttestationData, BeaconBlock};
/// The RPC's API to the beacon chain.
pub trait BeaconChain: Send + Sync {
fn get_spec(&self) -> &ChainSpec;
fn get_state(&self) -> RwLockReadGuard<BeaconState>;
fn get_mut_state(&self) -> RwLockWriteGuard<BeaconState>;
fn process_block(&self, block: BeaconBlock)
-> Result<BlockProcessingOutcome, BeaconChainError>;
fn produce_block(
&self,
randao_reveal: Signature,
) -> Result<(BeaconBlock, BeaconState), BlockProductionError>;
fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, BeaconChainError>;
fn process_attestation(
&self,
attestation: Attestation,
) -> Result<(), AttestationValidationError>;
}
impl<T, U, F> BeaconChain for RawBeaconChain<T, U, F>
where
T: ClientDB + Sized,
U: SlotClock,
F: ForkChoice,
{
fn get_spec(&self) -> &ChainSpec {
&self.spec
}
fn get_state(&self) -> RwLockReadGuard<BeaconState> {
self.state.read()
}
fn get_mut_state(&self) -> RwLockWriteGuard<BeaconState> {
self.state.write()
}
fn process_block(
&self,
block: BeaconBlock,
) -> Result<BlockProcessingOutcome, BeaconChainError> {
self.process_block(block)
}
fn produce_block(
&self,
randao_reveal: Signature,
) -> Result<(BeaconBlock, BeaconState), BlockProductionError> {
self.produce_block(randao_reveal)
}
fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, BeaconChainError> {
self.produce_attestation_data(shard)
}
fn process_attestation(
&self,
attestation: Attestation,
) -> Result<(), AttestationValidationError> {
self.process_attestation(attestation)
}
}

View File

@ -0,0 +1,47 @@
use crate::beacon_chain::BeaconChain;
use futures::Future;
use grpcio::{RpcContext, UnarySink};
use protos::services::{Empty, Fork, NodeInfoResponse};
use protos::services_grpc::BeaconNodeService;
use slog::{trace, warn};
use std::sync::Arc;
#[derive(Clone)]
pub struct BeaconNodeServiceInstance {
pub chain: Arc<BeaconChain>,
pub log: slog::Logger,
}
impl BeaconNodeService for BeaconNodeServiceInstance {
/// Provides basic node information.
fn info(&mut self, ctx: RpcContext, _req: Empty, sink: UnarySink<NodeInfoResponse>) {
trace!(self.log, "Node info requested via RPC");
// build the response
let mut node_info = NodeInfoResponse::new();
node_info.set_version(version::version());
// get the chain state
let state = self.chain.get_state();
let state_fork = state.fork.clone();
let genesis_time = state.genesis_time;
// build the rpc fork struct
let mut fork = Fork::new();
fork.set_previous_version(state_fork.previous_version.to_vec());
fork.set_current_version(state_fork.current_version.to_vec());
fork.set_epoch(state_fork.epoch.into());
node_info.set_fork(fork);
node_info.set_genesis_time(genesis_time);
node_info.set_genesis_slot(self.chain.get_spec().genesis_slot.as_u64());
node_info.set_chain_id(u32::from(self.chain.get_spec().chain_id));
// send the node_info the requester
let error_log = self.log.clone();
let f = sink
.success(node_info)
.map_err(move |e| warn!(error_log, "failed to reply {:?}", e));
ctx.spawn(f)
}
}

View File

@ -1,38 +1,96 @@
mod attestation;
mod beacon_block; mod beacon_block;
pub mod beacon_chain;
mod beacon_node;
pub mod config; pub mod config;
mod validator; mod validator;
use self::attestation::AttestationServiceInstance;
use self::beacon_block::BeaconBlockServiceInstance; use self::beacon_block::BeaconBlockServiceInstance;
use self::beacon_chain::BeaconChain;
use self::beacon_node::BeaconNodeServiceInstance;
use self::validator::ValidatorServiceInstance; use self::validator::ValidatorServiceInstance;
pub use config::Config as RPCConfig; pub use config::Config as RPCConfig;
use grpcio::{Environment, Server, ServerBuilder}; use futures::Future;
use protos::services_grpc::{create_beacon_block_service, create_validator_service}; use grpcio::{Environment, ServerBuilder};
use network::NetworkMessage;
use protos::services_grpc::{
create_attestation_service, create_beacon_block_service, create_beacon_node_service,
create_validator_service,
};
use slog::{info, o, warn};
use std::sync::Arc; use std::sync::Arc;
use tokio::runtime::TaskExecutor;
use slog::{info, o}; pub fn start_server(
config: &RPCConfig,
pub fn start_server(config: &RPCConfig, log: &slog::Logger) -> Server { executor: &TaskExecutor,
network_chan: crossbeam_channel::Sender<NetworkMessage>,
beacon_chain: Arc<BeaconChain>,
log: &slog::Logger,
) -> exit_future::Signal {
let log = log.new(o!("Service"=>"RPC")); let log = log.new(o!("Service"=>"RPC"));
let env = Arc::new(Environment::new(1)); let env = Arc::new(Environment::new(1));
// build a channel to kill the rpc server
let (rpc_exit_signal, rpc_exit) = exit_future::signal();
// build the individual rpc services
let beacon_node_service = {
let instance = BeaconNodeServiceInstance {
chain: beacon_chain.clone(),
log: log.clone(),
};
create_beacon_node_service(instance)
};
let beacon_block_service = { let beacon_block_service = {
let instance = BeaconBlockServiceInstance { log: log.clone() }; let instance = BeaconBlockServiceInstance {
chain: beacon_chain.clone(),
network_chan,
log: log.clone(),
};
create_beacon_block_service(instance) create_beacon_block_service(instance)
}; };
let validator_service = { let validator_service = {
let instance = ValidatorServiceInstance { log: log.clone() }; let instance = ValidatorServiceInstance {
chain: beacon_chain.clone(),
log: log.clone(),
};
create_validator_service(instance) create_validator_service(instance)
}; };
let attestation_service = {
let instance = AttestationServiceInstance {
chain: beacon_chain.clone(),
log: log.clone(),
};
create_attestation_service(instance)
};
let mut server = ServerBuilder::new(env) let mut server = ServerBuilder::new(env)
.register_service(beacon_block_service) .register_service(beacon_block_service)
.register_service(validator_service) .register_service(validator_service)
.register_service(beacon_node_service)
.register_service(attestation_service)
.bind(config.listen_address.to_string(), config.port) .bind(config.listen_address.to_string(), config.port)
.build() .build()
.unwrap(); .unwrap();
server.start();
for &(ref host, port) in server.bind_addrs() { let spawn_rpc = {
info!(log, "gRPC listening on {}:{}", host, port); server.start();
} for &(ref host, port) in server.bind_addrs() {
server info!(log, "gRPC listening on {}:{}", host, port);
}
rpc_exit.and_then(move |_| {
info!(log, "RPC Server shutting down");
server
.shutdown()
.wait()
.map(|_| ())
.map_err(|e| warn!(log, "RPC server failed to shutdown: {:?}", e))?;
Ok(())
})
};
executor.spawn(spawn_rpc);
rpc_exit_signal
} }

View File

@ -1,60 +1,166 @@
use crate::beacon_chain::BeaconChain;
use bls::PublicKey; use bls::PublicKey;
use futures::Future; use futures::Future;
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
use protos::services::{ use protos::services::{ActiveValidator, GetDutiesRequest, GetDutiesResponse, ValidatorDuty};
IndexResponse, ProposeBlockSlotRequest, ProposeBlockSlotResponse, PublicKey as PublicKeyRequest,
};
use protos::services_grpc::ValidatorService; use protos::services_grpc::ValidatorService;
use slog::{debug, Logger}; use slog::{trace, warn};
use ssz::decode; use ssz::decode;
use std::sync::Arc;
use types::{Epoch, RelativeEpoch};
#[derive(Clone)] #[derive(Clone)]
pub struct ValidatorServiceInstance { pub struct ValidatorServiceInstance {
pub log: Logger, pub chain: Arc<BeaconChain>,
pub log: slog::Logger,
} }
//TODO: Refactor Errors
impl ValidatorService for ValidatorServiceInstance { impl ValidatorService for ValidatorServiceInstance {
fn validator_index( /// For a list of validator public keys, this function returns the slot at which each
/// validator must propose a block, attest to a shard, their shard committee and the shard they
/// need to attest to.
fn get_validator_duties(
&mut self, &mut self,
ctx: RpcContext, ctx: RpcContext,
req: PublicKeyRequest, req: GetDutiesRequest,
sink: UnarySink<IndexResponse>, sink: UnarySink<GetDutiesResponse>,
) { ) {
if let Ok(public_key) = decode::<PublicKey>(req.get_public_key()) { let validators = req.get_validators();
debug!(self.log, "RPC request"; "endpoint" => "ValidatorIndex", "public_key" => public_key.concatenated_hex_id()); trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch());
let mut resp = IndexResponse::new(); let spec = self.chain.get_spec();
let state = self.chain.get_state();
let epoch = Epoch::from(req.get_epoch());
let mut resp = GetDutiesResponse::new();
let resp_validators = resp.mut_active_validators();
// TODO: return a legit value. let relative_epoch =
resp.set_index(1); match RelativeEpoch::from_epoch(state.slot.epoch(spec.slots_per_epoch), epoch) {
Ok(v) => v,
Err(e) => {
// incorrect epoch
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::FailedPrecondition,
Some(format!("Invalid epoch: {:?}", e)),
))
.map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e));
return ctx.spawn(f);
}
};
let f = sink let validator_proposers: Result<Vec<usize>, _> = epoch
.success(resp) .slot_iter(spec.slots_per_epoch)
.map_err(move |e| println!("failed to reply {:?}: {:?}", req, e)); .map(|slot| state.get_beacon_proposer_index(slot, relative_epoch, &spec))
ctx.spawn(f) .collect();
} else { let validator_proposers = match validator_proposers {
let f = sink Ok(v) => v,
.fail(RpcStatus::new( Err(e) => {
RpcStatusCode::InvalidArgument, // could not get the validator proposer index
Some("Invalid public_key".to_string()), let log_clone = self.log.clone();
)) let f = sink
.map_err(move |e| println!("failed to reply {:?}: {:?}", req, e)); .fail(RpcStatus::new(
ctx.spawn(f) RpcStatusCode::FailedPrecondition,
Some(format!("Could not find beacon proposers: {:?}", e)),
))
.map_err(move |e| warn!(log_clone, "failed to reply {:?} : {:?}", req, e));
return ctx.spawn(f);
}
};
// get the duties for each validator
for validator_pk in validators.get_public_keys() {
let mut active_validator = ActiveValidator::new();
let public_key = match decode::<PublicKey>(validator_pk) {
Ok(v) => v,
Err(_) => {
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::InvalidArgument,
Some("Invalid public_key".to_string()),
))
.map_err(move |_| warn!(log_clone, "failed to reply {:?}", req));
return ctx.spawn(f);
}
};
// get the validator index
let val_index = match state.get_validator_index(&public_key) {
Ok(Some(index)) => index,
Ok(None) => {
// index not present in registry, set the duties for this key to None
warn!(
self.log,
"RPC requested a public key that is not in the registry: {:?}", public_key
);
active_validator.set_none(false);
resp_validators.push(active_validator);
continue;
}
// the cache is not built, throw an error
Err(e) => {
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::FailedPrecondition,
Some(format!("Beacon state error {:?}", e)),
))
.map_err(move |e| warn!(log_clone, "Failed to reply {:?}: {:?}", req, e));
return ctx.spawn(f);
}
};
// get attestation duties and check if validator is active
let attestation_duties = match state.get_attestation_duties(val_index, &spec) {
Ok(Some(v)) => v,
Ok(_) => {
// validator is inactive, go to the next validator
warn!(
self.log,
"RPC requested an inactive validator key: {:?}", public_key
);
active_validator.set_none(false);
resp_validators.push(active_validator);
continue;
}
// the cache is not built, throw an error
Err(e) => {
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::FailedPrecondition,
Some(format!("Beacon state error {:?}", e)),
))
.map_err(move |e| warn!(log_clone, "Failed to reply {:?}: {:?}", req, e));
return ctx.spawn(f);
}
};
// we have an active validator, set its duties
let mut duty = ValidatorDuty::new();
// check if the validator needs to propose a block
if let Some(slot) = validator_proposers.iter().position(|&v| val_index == v) {
duty.set_block_production_slot(
epoch.start_slot(spec.slots_per_epoch).as_u64() + slot as u64,
);
} else {
// no blocks to propose this epoch
duty.set_none(false)
}
duty.set_committee_index(attestation_duties.committee_index as u64);
duty.set_attestation_slot(attestation_duties.slot.as_u64());
duty.set_attestation_shard(attestation_duties.shard);
duty.set_committee_len(attestation_duties.committee_len as u64);
active_validator.set_duty(duty);
resp_validators.push(active_validator);
} }
}
fn propose_block_slot(
&mut self,
ctx: RpcContext,
req: ProposeBlockSlotRequest,
sink: UnarySink<ProposeBlockSlotResponse>,
) {
debug!(self.log, "RPC request"; "endpoint" => "ProposeBlockSlot", "epoch" => req.get_epoch(), "validator_index" => req.get_validator_index());
let mut resp = ProposeBlockSlotResponse::new();
// TODO: return a legit value.
resp.set_slot(1);
let f = sink let f = sink
.success(resp) .success(resp)

View File

@ -16,6 +16,7 @@ fn main() {
.version(version::version().as_str()) .version(version::version().as_str())
.author("Sigma Prime <contact@sigmaprime.io>") .author("Sigma Prime <contact@sigmaprime.io>")
.about("Eth 2.0 Client") .about("Eth 2.0 Client")
// file system related arguments
.arg( .arg(
Arg::with_name("datadir") Arg::with_name("datadir")
.long("datadir") .long("datadir")
@ -23,8 +24,9 @@ fn main() {
.help("Data directory for keys and databases.") .help("Data directory for keys and databases.")
.takes_value(true), .takes_value(true),
) )
// network related arguments
.arg( .arg(
Arg::with_name("listen_address") Arg::with_name("listen-address")
.long("listen-address") .long("listen-address")
.value_name("Listen Address") .value_name("Listen Address")
.help("The Network address to listen for p2p connections.") .help("The Network address to listen for p2p connections.")
@ -37,6 +39,14 @@ fn main() {
.help("Network listen port for p2p connections.") .help("Network listen port for p2p connections.")
.takes_value(true), .takes_value(true),
) )
.arg(
Arg::with_name("boot-nodes")
.long("boot-nodes")
.value_name("BOOTNODES")
.help("A list of comma separated multi addresses representing bootnodes to connect to.")
.takes_value(true),
)
// rpc related arguments
.arg( .arg(
Arg::with_name("rpc") Arg::with_name("rpc")
.long("rpc") .long("rpc")

View File

@ -6,10 +6,12 @@ use futures::Future;
use slog::info; use slog::info;
use std::cell::RefCell; use std::cell::RefCell;
use tokio::runtime::Builder; use tokio::runtime::Builder;
use tokio_timer::clock::Clock;
pub fn run_beacon_node(config: ClientConfig, log: &slog::Logger) -> error::Result<()> { pub fn run_beacon_node(config: ClientConfig, log: &slog::Logger) -> error::Result<()> {
let mut runtime = Builder::new() let mut runtime = Builder::new()
.name_prefix("main-") .name_prefix("main-")
.clock(Clock::system())
.build() .build()
.map_err(|e| format!("{:?}", e))?; .map_err(|e| format!("{:?}", e))?;

View File

@ -119,8 +119,7 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> Attester<T, U, V,
validator_index, validator_index,
}; };
self.beacon_node self.beacon_node.publish_attestation(free_attestation)?;
.publish_attestation_data(free_attestation)?;
Ok(PollOutcome::AttestationProduced(slot)) Ok(PollOutcome::AttestationProduced(slot))
} }

View File

@ -34,7 +34,7 @@ impl BeaconNode for SimulatedBeaconNode {
} }
} }
fn publish_attestation_data(&self, free_attestation: FreeAttestation) -> PublishResult { fn publish_attestation(&self, free_attestation: FreeAttestation) -> PublishResult {
*self.publish_input.write().unwrap() = Some(free_attestation.clone()); *self.publish_input.write().unwrap() = Some(free_attestation.clone());
match *self.publish_result.read().unwrap() { match *self.publish_result.read().unwrap() {
Some(ref r) => r.clone(), Some(ref r) => r.clone(),

View File

@ -20,7 +20,7 @@ pub trait BeaconNode: Send + Sync {
shard: u64, shard: u64,
) -> Result<Option<AttestationData>, BeaconNodeError>; ) -> Result<Option<AttestationData>, BeaconNodeError>;
fn publish_attestation_data( fn publish_attestation(
&self, &self,
free_attestation: FreeAttestation, free_attestation: FreeAttestation,
) -> Result<PublishOutcome, BeaconNodeError>; ) -> Result<PublishOutcome, BeaconNodeError>;

View File

@ -0,0 +1,13 @@
[package]
name = "operation_pool"
version = "0.1.0"
authors = ["Michael Sproul <michael@sigmaprime.io>"]
edition = "2018"
[dependencies]
int_to_bytes = { path = "../utils/int_to_bytes" }
itertools = "0.8"
parking_lot = "0.7"
types = { path = "../types" }
state_processing = { path = "../state_processing" }
ssz = { path = "../utils/ssz" }

View File

@ -0,0 +1,987 @@
use int_to_bytes::int_to_bytes8;
use itertools::Itertools;
use parking_lot::RwLock;
use ssz::ssz_encode;
use state_processing::per_block_processing::errors::{
AttestationValidationError, AttesterSlashingValidationError, DepositValidationError,
ExitValidationError, ProposerSlashingValidationError, TransferValidationError,
};
use state_processing::per_block_processing::{
gather_attester_slashing_indices_modular, validate_attestation,
validate_attestation_time_independent_only, verify_attester_slashing, verify_deposit,
verify_exit, verify_exit_time_independent_only, verify_proposer_slashing, verify_transfer,
verify_transfer_time_independent_only,
};
use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet};
use types::chain_spec::Domain;
use types::{
Attestation, AttestationData, AttesterSlashing, BeaconState, ChainSpec, Deposit, Epoch,
ProposerSlashing, Transfer, Validator, VoluntaryExit,
};
#[cfg(test)]
const VERIFY_DEPOSIT_PROOFS: bool = false;
#[cfg(not(test))]
const VERIFY_DEPOSIT_PROOFS: bool = false; // TODO: enable this
#[derive(Default)]
pub struct OperationPool {
/// Map from attestation ID (see below) to vectors of attestations.
attestations: RwLock<HashMap<AttestationId, Vec<Attestation>>>,
/// Map from deposit index to deposit data.
// NOTE: We assume that there is only one deposit per index
// because the Eth1 data is updated (at most) once per epoch,
// and the spec doesn't seem to accomodate for re-orgs on a time-frame
// longer than an epoch
deposits: RwLock<BTreeMap<u64, Deposit>>,
/// Map from two attestation IDs to a slashing for those IDs.
attester_slashings: RwLock<HashMap<(AttestationId, AttestationId), AttesterSlashing>>,
/// Map from proposer index to slashing.
proposer_slashings: RwLock<HashMap<u64, ProposerSlashing>>,
/// Map from exiting validator to their exit data.
voluntary_exits: RwLock<HashMap<u64, VoluntaryExit>>,
/// Set of transfers.
transfers: RwLock<HashSet<Transfer>>,
}
/// Serialized `AttestationData` augmented with a domain to encode the fork info.
#[derive(PartialEq, Eq, Clone, Hash, Debug)]
struct AttestationId(Vec<u8>);
/// Number of domain bytes that the end of an attestation ID is padded with.
const DOMAIN_BYTES_LEN: usize = 8;
impl AttestationId {
fn from_data(attestation: &AttestationData, state: &BeaconState, spec: &ChainSpec) -> Self {
let mut bytes = ssz_encode(attestation);
let epoch = attestation.slot.epoch(spec.slots_per_epoch);
bytes.extend_from_slice(&AttestationId::compute_domain_bytes(epoch, state, spec));
AttestationId(bytes)
}
fn compute_domain_bytes(epoch: Epoch, state: &BeaconState, spec: &ChainSpec) -> Vec<u8> {
int_to_bytes8(spec.get_domain(epoch, Domain::Attestation, &state.fork))
}
fn domain_bytes_match(&self, domain_bytes: &[u8]) -> bool {
&self.0[self.0.len() - DOMAIN_BYTES_LEN..] == domain_bytes
}
}
/// Compute a fitness score for an attestation.
///
/// The score is calculated by determining the number of *new* attestations that
/// the aggregate attestation introduces, and is proportional to the size of the reward we will
/// receive for including it in a block.
// TODO: this could be optimised with a map from validator index to whether that validator has
// attested in each of the current and previous epochs. Currently quadractic in number of validators.
fn attestation_score(attestation: &Attestation, state: &BeaconState, spec: &ChainSpec) -> usize {
// Bitfield of validators whose attestations are new/fresh.
let mut new_validators = attestation.aggregation_bitfield.clone();
let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch);
let state_attestations = if attestation_epoch == state.current_epoch(spec) {
&state.current_epoch_attestations
} else if attestation_epoch == state.previous_epoch(spec) {
&state.previous_epoch_attestations
} else {
return 0;
};
state_attestations
.iter()
// In a single epoch, an attester should only be attesting for one shard.
// TODO: we avoid including slashable attestations in the state here,
// but maybe we should do something else with them (like construct slashings).
.filter(|current_attestation| current_attestation.data.shard == attestation.data.shard)
.for_each(|current_attestation| {
// Remove the validators who have signed the existing attestation (they are not new)
new_validators.difference_inplace(&current_attestation.aggregation_bitfield);
});
new_validators.num_set_bits()
}
#[derive(Debug, PartialEq, Clone)]
pub enum DepositInsertStatus {
/// The deposit was not already in the pool.
Fresh,
/// The deposit already existed in the pool.
Duplicate,
/// The deposit conflicted with an existing deposit, which was replaced.
Replaced(Box<Deposit>),
}
impl OperationPool {
/// Create a new operation pool.
pub fn new() -> Self {
Self::default()
}
/// Insert an attestation into the pool, aggregating it with existing attestations if possible.
pub fn insert_attestation(
&self,
attestation: Attestation,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<(), AttestationValidationError> {
// Check that attestation signatures are valid.
validate_attestation_time_independent_only(state, &attestation, spec)?;
let id = AttestationId::from_data(&attestation.data, state, spec);
// Take a write lock on the attestations map.
let mut attestations = self.attestations.write();
let existing_attestations = match attestations.entry(id) {
hash_map::Entry::Vacant(entry) => {
entry.insert(vec![attestation]);
return Ok(());
}
hash_map::Entry::Occupied(entry) => entry.into_mut(),
};
let mut aggregated = false;
for existing_attestation in existing_attestations.iter_mut() {
if existing_attestation.signers_disjoint_from(&attestation) {
existing_attestation.aggregate(&attestation);
aggregated = true;
} else if *existing_attestation == attestation {
aggregated = true;
}
}
if !aggregated {
existing_attestations.push(attestation);
}
Ok(())
}
/// Total number of attestations in the pool, including attestations for the same data.
pub fn num_attestations(&self) -> usize {
self.attestations
.read()
.values()
.map(|atts| atts.len())
.sum()
}
/// Get a list of attestations for inclusion in a block.
pub fn get_attestations(&self, state: &BeaconState, spec: &ChainSpec) -> Vec<Attestation> {
// Attestations for the current fork, which may be from the current or previous epoch.
let prev_epoch = state.previous_epoch(spec);
let current_epoch = state.current_epoch(spec);
let prev_domain_bytes = AttestationId::compute_domain_bytes(prev_epoch, state, spec);
let curr_domain_bytes = AttestationId::compute_domain_bytes(current_epoch, state, spec);
self.attestations
.read()
.iter()
.filter(|(key, _)| {
key.domain_bytes_match(&prev_domain_bytes)
|| key.domain_bytes_match(&curr_domain_bytes)
})
.flat_map(|(_, attestations)| attestations)
// That are not superseded by an attestation included in the state...
.filter(|attestation| !superior_attestation_exists_in_state(state, attestation))
// That are valid...
.filter(|attestation| validate_attestation(state, attestation, spec).is_ok())
// Scored by the number of new attestations they introduce (descending)
// TODO: need to consider attestations introduced in THIS block
.map(|att| (att, attestation_score(att, state, spec)))
// Don't include any useless attestations (score 0)
.filter(|&(_, score)| score != 0)
.sorted_by_key(|&(_, score)| std::cmp::Reverse(score))
// Limited to the maximum number of attestations per block
.take(spec.max_attestations as usize)
.map(|(att, _)| att)
.cloned()
.collect()
}
/// Remove attestations which are too old to be included in a block.
// TODO: we could probably prune other attestations here:
// - ones that are completely covered by attestations included in the state
// - maybe ones invalidated by the confirmation of one fork over another
pub fn prune_attestations(&self, finalized_state: &BeaconState, spec: &ChainSpec) {
self.attestations.write().retain(|_, attestations| {
// All the attestations in this bucket have the same data, so we only need to
// check the first one.
attestations.first().map_or(false, |att| {
finalized_state.slot < att.data.slot + spec.slots_per_epoch
})
});
}
/// Add a deposit to the pool.
///
/// No two distinct deposits should be added with the same index.
pub fn insert_deposit(
&self,
deposit: Deposit,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<DepositInsertStatus, DepositValidationError> {
use DepositInsertStatus::*;
match self.deposits.write().entry(deposit.index) {
Entry::Vacant(entry) => {
verify_deposit(state, &deposit, VERIFY_DEPOSIT_PROOFS, spec)?;
entry.insert(deposit);
Ok(Fresh)
}
Entry::Occupied(mut entry) => {
if entry.get() == &deposit {
Ok(Duplicate)
} else {
verify_deposit(state, &deposit, VERIFY_DEPOSIT_PROOFS, spec)?;
Ok(Replaced(Box::new(entry.insert(deposit))))
}
}
}
}
/// Get an ordered list of deposits for inclusion in a block.
///
/// Take at most the maximum number of deposits, beginning from the current deposit index.
pub fn get_deposits(&self, state: &BeaconState, spec: &ChainSpec) -> Vec<Deposit> {
let start_idx = state.deposit_index;
(start_idx..start_idx + spec.max_deposits)
.map(|idx| self.deposits.read().get(&idx).cloned())
.take_while(Option::is_some)
.flatten()
.collect()
}
/// Remove all deposits with index less than the deposit index of the latest finalised block.
pub fn prune_deposits(&self, state: &BeaconState) -> BTreeMap<u64, Deposit> {
let deposits_keep = self.deposits.write().split_off(&state.deposit_index);
std::mem::replace(&mut self.deposits.write(), deposits_keep)
}
/// The number of deposits stored in the pool.
pub fn num_deposits(&self) -> usize {
self.deposits.read().len()
}
/// Insert a proposer slashing into the pool.
pub fn insert_proposer_slashing(
&self,
slashing: ProposerSlashing,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<(), ProposerSlashingValidationError> {
// TODO: should maybe insert anyway if the proposer is unknown in the validator index,
// because they could *become* known later
verify_proposer_slashing(&slashing, state, spec)?;
self.proposer_slashings
.write()
.insert(slashing.proposer_index, slashing);
Ok(())
}
/// Compute the tuple ID that is used to identify an attester slashing.
///
/// Depends on the fork field of the state, but not on the state's epoch.
fn attester_slashing_id(
slashing: &AttesterSlashing,
state: &BeaconState,
spec: &ChainSpec,
) -> (AttestationId, AttestationId) {
(
AttestationId::from_data(&slashing.slashable_attestation_1.data, state, spec),
AttestationId::from_data(&slashing.slashable_attestation_2.data, state, spec),
)
}
/// Insert an attester slashing into the pool.
pub fn insert_attester_slashing(
&self,
slashing: AttesterSlashing,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<(), AttesterSlashingValidationError> {
verify_attester_slashing(state, &slashing, true, spec)?;
let id = Self::attester_slashing_id(&slashing, state, spec);
self.attester_slashings.write().insert(id, slashing);
Ok(())
}
/// Get proposer and attester slashings for inclusion in a block.
///
/// This function computes both types of slashings together, because
/// attester slashings may be invalidated by proposer slashings included
/// earlier in the block.
pub fn get_slashings(
&self,
state: &BeaconState,
spec: &ChainSpec,
) -> (Vec<ProposerSlashing>, Vec<AttesterSlashing>) {
let proposer_slashings = filter_limit_operations(
self.proposer_slashings.read().values(),
|slashing| {
state
.validator_registry
.get(slashing.proposer_index as usize)
.map_or(false, |validator| !validator.slashed)
},
spec.max_proposer_slashings,
);
// Set of validators to be slashed, so we don't attempt to construct invalid attester
// slashings.
let mut to_be_slashed = proposer_slashings
.iter()
.map(|s| s.proposer_index)
.collect::<HashSet<_>>();
let attester_slashings = self
.attester_slashings
.read()
.iter()
.filter(|(id, slashing)| {
// Check the fork.
Self::attester_slashing_id(slashing, state, spec) == **id
})
.filter(|(_, slashing)| {
// Take all slashings that will slash 1 or more validators.
let slashed_validators = gather_attester_slashing_indices_modular(
state,
slashing,
|index, validator| validator.slashed || to_be_slashed.contains(&index),
spec,
);
// Extend the `to_be_slashed` set so subsequent iterations don't try to include
// useless slashings.
if let Ok(validators) = slashed_validators {
to_be_slashed.extend(validators);
true
} else {
false
}
})
.take(spec.max_attester_slashings as usize)
.map(|(_, slashing)| slashing.clone())
.collect();
(proposer_slashings, attester_slashings)
}
/// Prune proposer slashings for all slashed or withdrawn validators.
pub fn prune_proposer_slashings(&self, finalized_state: &BeaconState, spec: &ChainSpec) {
prune_validator_hash_map(
&mut self.proposer_slashings.write(),
|validator| {
validator.slashed
|| validator.is_withdrawable_at(finalized_state.current_epoch(spec))
},
finalized_state,
);
}
/// Prune attester slashings for all slashed or withdrawn validators, or attestations on another
/// fork.
pub fn prune_attester_slashings(&self, finalized_state: &BeaconState, spec: &ChainSpec) {
self.attester_slashings.write().retain(|id, slashing| {
let fork_ok = &Self::attester_slashing_id(slashing, finalized_state, spec) == id;
let curr_epoch = finalized_state.current_epoch(spec);
let slashing_ok = gather_attester_slashing_indices_modular(
finalized_state,
slashing,
|_, validator| validator.slashed || validator.is_withdrawable_at(curr_epoch),
spec,
)
.is_ok();
fork_ok && slashing_ok
});
}
/// Insert a voluntary exit, validating it almost-entirely (future exits are permitted).
pub fn insert_voluntary_exit(
&self,
exit: VoluntaryExit,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<(), ExitValidationError> {
verify_exit_time_independent_only(state, &exit, spec)?;
self.voluntary_exits
.write()
.insert(exit.validator_index, exit);
Ok(())
}
/// Get a list of voluntary exits for inclusion in a block.
pub fn get_voluntary_exits(&self, state: &BeaconState, spec: &ChainSpec) -> Vec<VoluntaryExit> {
filter_limit_operations(
self.voluntary_exits.read().values(),
|exit| verify_exit(state, exit, spec).is_ok(),
spec.max_voluntary_exits,
)
}
/// Prune if validator has already exited at the last finalized state.
pub fn prune_voluntary_exits(&self, finalized_state: &BeaconState, spec: &ChainSpec) {
prune_validator_hash_map(
&mut self.voluntary_exits.write(),
|validator| validator.is_exited_at(finalized_state.current_epoch(spec)),
finalized_state,
);
}
/// Insert a transfer into the pool, checking it for validity in the process.
pub fn insert_transfer(
&self,
transfer: Transfer,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<(), TransferValidationError> {
// The signature of the transfer isn't hashed, but because we check
// it before we insert into the HashSet, we can't end up with duplicate
// transactions.
verify_transfer_time_independent_only(state, &transfer, spec)?;
self.transfers.write().insert(transfer);
Ok(())
}
/// Get a list of transfers for inclusion in a block.
// TODO: improve the economic optimality of this function by accounting for
// dependencies between transfers in the same block e.g. A pays B, B pays C
pub fn get_transfers(&self, state: &BeaconState, spec: &ChainSpec) -> Vec<Transfer> {
self.transfers
.read()
.iter()
.filter(|transfer| verify_transfer(state, transfer, spec).is_ok())
.sorted_by_key(|transfer| std::cmp::Reverse(transfer.fee))
.take(spec.max_transfers as usize)
.cloned()
.collect()
}
/// Prune the set of transfers by removing all those whose slot has already passed.
pub fn prune_transfers(&self, finalized_state: &BeaconState) {
self.transfers
.write()
.retain(|transfer| transfer.slot > finalized_state.slot)
}
/// Prune all types of transactions given the latest finalized state.
pub fn prune_all(&self, finalized_state: &BeaconState, spec: &ChainSpec) {
self.prune_attestations(finalized_state, spec);
self.prune_deposits(finalized_state);
self.prune_proposer_slashings(finalized_state, spec);
self.prune_attester_slashings(finalized_state, spec);
self.prune_voluntary_exits(finalized_state, spec);
self.prune_transfers(finalized_state);
}
}
/// Returns `true` if the state already contains a `PendingAttestation` that is superior to the
/// given `attestation`.
///
/// A validator has nothing to gain from re-including an attestation and it adds load to the
/// network.
///
/// An existing `PendingAttestation` is superior to an existing `attestation` if:
///
/// - Their `AttestationData` is equal.
/// - `attestation` does not contain any signatures that `PendingAttestation` does not have.
fn superior_attestation_exists_in_state(state: &BeaconState, attestation: &Attestation) -> bool {
state
.current_epoch_attestations
.iter()
.chain(state.previous_epoch_attestations.iter())
.any(|existing_attestation| {
let bitfield = &attestation.aggregation_bitfield;
let existing_bitfield = &existing_attestation.aggregation_bitfield;
existing_attestation.data == attestation.data
&& bitfield.intersection(existing_bitfield).num_set_bits()
== bitfield.num_set_bits()
})
}
/// Filter up to a maximum number of operations out of an iterator.
fn filter_limit_operations<'a, T: 'a, I, F>(operations: I, filter: F, limit: u64) -> Vec<T>
where
I: IntoIterator<Item = &'a T>,
F: Fn(&T) -> bool,
T: Clone,
{
operations
.into_iter()
.filter(|x| filter(*x))
.take(limit as usize)
.cloned()
.collect()
}
/// Remove all entries from the given hash map for which `prune_if` returns true.
///
/// The keys in the map should be validator indices, which will be looked up
/// in the state's validator registry and then passed to `prune_if`.
/// Entries for unknown validators will be kept.
fn prune_validator_hash_map<T, F>(
map: &mut HashMap<u64, T>,
prune_if: F,
finalized_state: &BeaconState,
) where
F: Fn(&Validator) -> bool,
{
map.retain(|&validator_index, _| {
finalized_state
.validator_registry
.get(validator_index as usize)
.map_or(true, |validator| !prune_if(validator))
});
}
#[cfg(test)]
mod tests {
use super::DepositInsertStatus::*;
use super::*;
use types::test_utils::*;
use types::*;
#[test]
fn insert_deposit() {
let rng = &mut XorShiftRng::from_seed([42; 16]);
let (ref spec, ref state) = test_state(rng);
let op_pool = OperationPool::new();
let deposit1 = make_deposit(rng, state, spec);
let mut deposit2 = make_deposit(rng, state, spec);
deposit2.index = deposit1.index;
assert_eq!(
op_pool.insert_deposit(deposit1.clone(), state, spec),
Ok(Fresh)
);
assert_eq!(
op_pool.insert_deposit(deposit1.clone(), state, spec),
Ok(Duplicate)
);
assert_eq!(
op_pool.insert_deposit(deposit2, state, spec),
Ok(Replaced(Box::new(deposit1)))
);
}
#[test]
fn get_deposits_max() {
let rng = &mut XorShiftRng::from_seed([42; 16]);
let (spec, mut state) = test_state(rng);
let op_pool = OperationPool::new();
let start = 10000;
let max_deposits = spec.max_deposits;
let extra = 5;
let offset = 1;
assert!(offset <= extra);
let deposits = dummy_deposits(rng, &state, &spec, start, max_deposits + extra);
for deposit in &deposits {
assert_eq!(
op_pool.insert_deposit(deposit.clone(), &state, &spec),
Ok(Fresh)
);
}
state.deposit_index = start + offset;
let deposits_for_block = op_pool.get_deposits(&state, &spec);
assert_eq!(deposits_for_block.len() as u64, max_deposits);
assert_eq!(
deposits_for_block[..],
deposits[offset as usize..(offset + max_deposits) as usize]
);
}
#[test]
fn prune_deposits() {
let rng = &mut XorShiftRng::from_seed([42; 16]);
let (spec, state) = test_state(rng);
let op_pool = OperationPool::new();
let start1 = 100;
// test is super slow in debug mode if this parameter is too high
let count = 5;
let gap = 25;
let start2 = start1 + count + gap;
let deposits1 = dummy_deposits(rng, &state, &spec, start1, count);
let deposits2 = dummy_deposits(rng, &state, &spec, start2, count);
for d in deposits1.into_iter().chain(deposits2) {
assert!(op_pool.insert_deposit(d, &state, &spec).is_ok());
}
assert_eq!(op_pool.num_deposits(), 2 * count as usize);
let mut state = BeaconState::random_for_test(rng);
state.deposit_index = start1;
// Pruning the first bunch of deposits in batches of 5 should work.
let step = 5;
let mut pool_size = step + 2 * count as usize;
for i in (start1..=(start1 + count)).step_by(step) {
state.deposit_index = i;
op_pool.prune_deposits(&state);
pool_size -= step;
assert_eq!(op_pool.num_deposits(), pool_size);
}
assert_eq!(pool_size, count as usize);
// Pruning in the gap should do nothing.
for i in (start1 + count..start2).step_by(step) {
state.deposit_index = i;
op_pool.prune_deposits(&state);
assert_eq!(op_pool.num_deposits(), count as usize);
}
// Same again for the later deposits.
pool_size += step;
for i in (start2..=(start2 + count)).step_by(step) {
state.deposit_index = i;
op_pool.prune_deposits(&state);
pool_size -= step;
assert_eq!(op_pool.num_deposits(), pool_size);
}
assert_eq!(op_pool.num_deposits(), 0);
}
// Create a random deposit (with a valid proof of posession)
fn make_deposit(rng: &mut XorShiftRng, state: &BeaconState, spec: &ChainSpec) -> Deposit {
let keypair = Keypair::random();
let mut deposit = Deposit::random_for_test(rng);
let mut deposit_input = DepositInput {
pubkey: keypair.pk.clone(),
withdrawal_credentials: Hash256::zero(),
proof_of_possession: Signature::empty_signature(),
};
deposit_input.proof_of_possession = deposit_input.create_proof_of_possession(
&keypair.sk,
state.slot.epoch(spec.slots_per_epoch),
&state.fork,
spec,
);
deposit.deposit_data.deposit_input = deposit_input;
deposit
}
// Create `count` dummy deposits with sequential deposit IDs beginning from `start`.
fn dummy_deposits(
rng: &mut XorShiftRng,
state: &BeaconState,
spec: &ChainSpec,
start: u64,
count: u64,
) -> Vec<Deposit> {
let proto_deposit = make_deposit(rng, state, spec);
(start..start + count)
.map(|index| {
let mut deposit = proto_deposit.clone();
deposit.index = index;
deposit
})
.collect()
}
fn test_state(rng: &mut XorShiftRng) -> (ChainSpec, BeaconState) {
let spec = ChainSpec::foundation();
let mut state = BeaconState::random_for_test(rng);
state.fork = Fork::genesis(&spec);
(spec, state)
}
/// Create a signed attestation for use in tests.
/// Signed by all validators in `committee[signing_range]` and `committee[extra_signer]`.
#[cfg(not(debug_assertions))]
fn signed_attestation<R: std::slice::SliceIndex<[usize], Output = [usize]>>(
committee: &CrosslinkCommittee,
keypairs: &[Keypair],
signing_range: R,
slot: Slot,
state: &BeaconState,
spec: &ChainSpec,
extra_signer: Option<usize>,
) -> Attestation {
let mut builder = TestingAttestationBuilder::new(
state,
&committee.committee,
slot,
committee.shard,
spec,
);
let signers = &committee.committee[signing_range];
let committee_keys = signers.iter().map(|&i| &keypairs[i].sk).collect::<Vec<_>>();
builder.sign(signers, &committee_keys, &state.fork, spec);
extra_signer.map(|c_idx| {
let validator_index = committee.committee[c_idx];
builder.sign(
&[validator_index],
&[&keypairs[validator_index].sk],
&state.fork,
spec,
)
});
builder.build()
}
/// Test state for attestation-related tests.
#[cfg(not(debug_assertions))]
fn attestation_test_state(
spec: &ChainSpec,
num_committees: usize,
) -> (BeaconState, Vec<Keypair>) {
let num_validators =
num_committees * (spec.slots_per_epoch * spec.target_committee_size) as usize;
let mut state_builder =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(num_validators, spec);
let slot_offset = 1000 * spec.slots_per_epoch + spec.slots_per_epoch / 2;
let slot = spec.genesis_slot + slot_offset;
state_builder.teleport_to_slot(slot, spec);
state_builder.build_caches(spec).unwrap();
state_builder.build()
}
/// Set the latest crosslink in the state to match the attestation.
#[cfg(not(debug_assertions))]
fn fake_latest_crosslink(att: &Attestation, state: &mut BeaconState, spec: &ChainSpec) {
state.latest_crosslinks[att.data.shard as usize] = Crosslink {
crosslink_data_root: att.data.crosslink_data_root,
epoch: att.data.slot.epoch(spec.slots_per_epoch),
};
}
#[test]
#[cfg(not(debug_assertions))]
fn test_attestation_score() {
let spec = &ChainSpec::foundation();
let (ref mut state, ref keypairs) = attestation_test_state(spec, 1);
let slot = state.slot - 1;
let committees = state
.get_crosslink_committees_at_slot(slot, spec)
.unwrap()
.clone();
for committee in committees {
let att1 = signed_attestation(&committee, keypairs, ..2, slot, state, spec, None);
let att2 = signed_attestation(&committee, keypairs, .., slot, state, spec, None);
assert_eq!(
att1.aggregation_bitfield.num_set_bits(),
attestation_score(&att1, state, spec)
);
state
.current_epoch_attestations
.push(PendingAttestation::from_attestation(&att1, state.slot));
assert_eq!(
committee.committee.len() - 2,
attestation_score(&att2, state, spec)
);
}
}
/// End-to-end test of basic attestation handling.
#[test]
#[cfg(not(debug_assertions))]
fn attestation_aggregation_insert_get_prune() {
let spec = &ChainSpec::foundation();
let (ref mut state, ref keypairs) = attestation_test_state(spec, 1);
let op_pool = OperationPool::new();
let slot = state.slot - 1;
let committees = state
.get_crosslink_committees_at_slot(slot, spec)
.unwrap()
.clone();
assert_eq!(
committees.len(),
1,
"we expect just one committee with this many validators"
);
for committee in &committees {
let step_size = 2;
for i in (0..committee.committee.len()).step_by(step_size) {
let att = signed_attestation(
committee,
keypairs,
i..i + step_size,
slot,
state,
spec,
None,
);
fake_latest_crosslink(&att, state, spec);
op_pool.insert_attestation(att, state, spec).unwrap();
}
}
assert_eq!(op_pool.attestations.read().len(), committees.len());
assert_eq!(op_pool.num_attestations(), committees.len());
// Before the min attestation inclusion delay, get_attestations shouldn't return anything.
assert_eq!(op_pool.get_attestations(state, spec).len(), 0);
// Then once the delay has elapsed, we should get a single aggregated attestation.
state.slot += spec.min_attestation_inclusion_delay;
let block_attestations = op_pool.get_attestations(state, spec);
assert_eq!(block_attestations.len(), committees.len());
let agg_att = &block_attestations[0];
assert_eq!(
agg_att.aggregation_bitfield.num_set_bits(),
spec.target_committee_size as usize
);
// Prune attestations shouldn't do anything at this point.
op_pool.prune_attestations(state, spec);
assert_eq!(op_pool.num_attestations(), committees.len());
// But once we advance to an epoch after the attestation, it should prune it out of
// existence.
state.slot = slot + spec.slots_per_epoch;
op_pool.prune_attestations(state, spec);
assert_eq!(op_pool.num_attestations(), 0);
}
/// Adding an attestation already in the pool should not increase the size of the pool.
#[test]
#[cfg(not(debug_assertions))]
fn attestation_duplicate() {
let spec = &ChainSpec::foundation();
let (ref mut state, ref keypairs) = attestation_test_state(spec, 1);
let op_pool = OperationPool::new();
let slot = state.slot - 1;
let committees = state
.get_crosslink_committees_at_slot(slot, spec)
.unwrap()
.clone();
for committee in &committees {
let att = signed_attestation(committee, keypairs, .., slot, state, spec, None);
fake_latest_crosslink(&att, state, spec);
op_pool
.insert_attestation(att.clone(), state, spec)
.unwrap();
op_pool.insert_attestation(att, state, spec).unwrap();
}
assert_eq!(op_pool.num_attestations(), committees.len());
}
/// Adding lots of attestations that only intersect pairwise should lead to two aggregate
/// attestations.
#[test]
#[cfg(not(debug_assertions))]
fn attestation_pairwise_overlapping() {
let spec = &ChainSpec::foundation();
let (ref mut state, ref keypairs) = attestation_test_state(spec, 1);
let op_pool = OperationPool::new();
let slot = state.slot - 1;
let committees = state
.get_crosslink_committees_at_slot(slot, spec)
.unwrap()
.clone();
let step_size = 2;
for committee in &committees {
// Create attestations that overlap on `step_size` validators, like:
// {0,1,2,3}, {2,3,4,5}, {4,5,6,7}, ...
for i in (0..committee.committee.len() - step_size).step_by(step_size) {
let att = signed_attestation(
committee,
keypairs,
i..i + 2 * step_size,
slot,
state,
spec,
None,
);
fake_latest_crosslink(&att, state, spec);
op_pool.insert_attestation(att, state, spec).unwrap();
}
}
// The attestations should get aggregated into two attestations that comprise all
// validators.
assert_eq!(op_pool.attestations.read().len(), committees.len());
assert_eq!(op_pool.num_attestations(), 2 * committees.len());
}
/// Create a bunch of attestations signed by a small number of validators, and another
/// bunch signed by a larger number, such that there are at least `max_attestations`
/// signed by the larger number. Then, check that `get_attestations` only returns the
/// high-quality attestations. To ensure that no aggregation occurs, ALL attestations
/// are also signed by the 0th member of the committee.
#[test]
#[cfg(not(debug_assertions))]
fn attestation_get_max() {
let spec = &ChainSpec::foundation();
let small_step_size = 2;
let big_step_size = 4;
let (ref mut state, ref keypairs) = attestation_test_state(spec, big_step_size);
let op_pool = OperationPool::new();
let slot = state.slot - 1;
let committees = state
.get_crosslink_committees_at_slot(slot, spec)
.unwrap()
.clone();
let max_attestations = spec.max_attestations as usize;
let target_committee_size = spec.target_committee_size as usize;
let mut insert_attestations = |committee, step_size| {
for i in (0..target_committee_size).step_by(step_size) {
let att = signed_attestation(
committee,
keypairs,
i..i + step_size,
slot,
state,
spec,
if i == 0 { None } else { Some(0) },
);
fake_latest_crosslink(&att, state, spec);
op_pool.insert_attestation(att, state, spec).unwrap();
}
};
for committee in &committees {
assert_eq!(committee.committee.len(), target_committee_size);
// Attestations signed by only 2-3 validators
insert_attestations(committee, small_step_size);
// Attestations signed by 4+ validators
insert_attestations(committee, big_step_size);
}
let num_small = target_committee_size / small_step_size;
let num_big = target_committee_size / big_step_size;
assert_eq!(op_pool.attestations.read().len(), committees.len());
assert_eq!(
op_pool.num_attestations(),
(num_small + num_big) * committees.len()
);
assert!(op_pool.num_attestations() > max_attestations);
state.slot += spec.min_attestation_inclusion_delay;
let best_attestations = op_pool.get_attestations(state, spec);
assert_eq!(best_attestations.len(), max_attestations);
// All the best attestations should be signed by at least `big_step_size` (4) validators.
for att in &best_attestations {
assert!(att.aggregation_bitfield.num_set_bits() >= big_step_size);
}
}
// TODO: more tests
}

View File

@ -1,4 +1,3 @@
use self::verify_proposer_slashing::verify_proposer_slashing;
use crate::common::slash_validator; use crate::common::slash_validator;
use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex}; use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex};
use rayon::prelude::*; use rayon::prelude::*;
@ -6,13 +5,20 @@ use ssz::{SignedRoot, TreeHash};
use types::*; use types::*;
pub use self::verify_attester_slashing::{ pub use self::verify_attester_slashing::{
gather_attester_slashing_indices, verify_attester_slashing, gather_attester_slashing_indices, gather_attester_slashing_indices_modular,
verify_attester_slashing,
};
pub use self::verify_proposer_slashing::verify_proposer_slashing;
pub use validate_attestation::{
validate_attestation, validate_attestation_time_independent_only,
validate_attestation_without_signature,
}; };
pub use validate_attestation::{validate_attestation, validate_attestation_without_signature};
pub use verify_deposit::{get_existing_validator_index, verify_deposit, verify_deposit_index}; pub use verify_deposit::{get_existing_validator_index, verify_deposit, verify_deposit_index};
pub use verify_exit::verify_exit; pub use verify_exit::{verify_exit, verify_exit_time_independent_only};
pub use verify_slashable_attestation::verify_slashable_attestation; pub use verify_slashable_attestation::verify_slashable_attestation;
pub use verify_transfer::{execute_transfer, verify_transfer}; pub use verify_transfer::{
execute_transfer, verify_transfer, verify_transfer_time_independent_only,
};
pub mod errors; pub mod errors;
mod validate_attestation; mod validate_attestation;
@ -316,13 +322,7 @@ pub fn process_attestations(
// Update the state in series. // Update the state in series.
for attestation in attestations { for attestation in attestations {
let pending_attestation = PendingAttestation { let pending_attestation = PendingAttestation::from_attestation(attestation, state.slot);
data: attestation.data.clone(),
aggregation_bitfield: attestation.aggregation_bitfield.clone(),
custody_bitfield: attestation.custody_bitfield.clone(),
inclusion_slot: state.slot,
};
let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch); let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch);
if attestation_epoch == state.current_epoch(spec) { if attestation_epoch == state.current_epoch(spec) {

View File

@ -390,6 +390,11 @@ pub enum TransferInvalid {
/// ///
/// (state_slot, transfer_slot) /// (state_slot, transfer_slot)
StateSlotMismatch(Slot, Slot), StateSlotMismatch(Slot, Slot),
/// The `transfer.slot` is in the past relative to the state slot.
///
///
/// (state_slot, transfer_slot)
TransferSlotInPast(Slot, Slot),
/// The `transfer.from` validator has been activated and is not withdrawable. /// The `transfer.from` validator has been activated and is not withdrawable.
/// ///
/// (from_validator) /// (from_validator)

View File

@ -14,7 +14,16 @@ pub fn validate_attestation(
attestation: &Attestation, attestation: &Attestation,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
validate_attestation_signature_optional(state, attestation, spec, true) validate_attestation_parametric(state, attestation, spec, true, false)
}
/// Like `validate_attestation` but doesn't run checks which may become true in future states.
pub fn validate_attestation_time_independent_only(
state: &BeaconState,
attestation: &Attestation,
spec: &ChainSpec,
) -> Result<(), Error> {
validate_attestation_parametric(state, attestation, spec, true, true)
} }
/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the /// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the
@ -28,7 +37,7 @@ pub fn validate_attestation_without_signature(
attestation: &Attestation, attestation: &Attestation,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
validate_attestation_signature_optional(state, attestation, spec, false) validate_attestation_parametric(state, attestation, spec, false, false)
} }
/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the /// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the
@ -36,15 +45,13 @@ pub fn validate_attestation_without_signature(
/// ///
/// ///
/// Spec v0.5.0 /// Spec v0.5.0
fn validate_attestation_signature_optional( fn validate_attestation_parametric(
state: &BeaconState, state: &BeaconState,
attestation: &Attestation, attestation: &Attestation,
spec: &ChainSpec, spec: &ChainSpec,
verify_signature: bool, verify_signature: bool,
time_independent_only: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
let state_epoch = state.slot.epoch(spec.slots_per_epoch);
let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch);
// Can't submit pre-historic attestations. // Can't submit pre-historic attestations.
verify!( verify!(
attestation.data.slot >= spec.genesis_slot, attestation.data.slot >= spec.genesis_slot,
@ -65,7 +72,8 @@ fn validate_attestation_signature_optional(
// Can't submit attestation too quickly. // Can't submit attestation too quickly.
verify!( verify!(
attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot, time_independent_only
|| attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot,
Invalid::IncludedTooEarly { Invalid::IncludedTooEarly {
state: state.slot, state: state.slot,
delay: spec.min_attestation_inclusion_delay, delay: spec.min_attestation_inclusion_delay,
@ -74,40 +82,8 @@ fn validate_attestation_signature_optional(
); );
// Verify the justified epoch and root is correct. // Verify the justified epoch and root is correct.
if attestation_epoch >= state_epoch { if !time_independent_only {
verify!( verify_justified_epoch_and_root(attestation, state, spec)?;
attestation.data.source_epoch == state.current_justified_epoch,
Invalid::WrongJustifiedEpoch {
state: state.current_justified_epoch,
attestation: attestation.data.source_epoch,
is_current: true,
}
);
verify!(
attestation.data.source_root == state.current_justified_root,
Invalid::WrongJustifiedRoot {
state: state.current_justified_root,
attestation: attestation.data.source_root,
is_current: true,
}
);
} else {
verify!(
attestation.data.source_epoch == state.previous_justified_epoch,
Invalid::WrongJustifiedEpoch {
state: state.previous_justified_epoch,
attestation: attestation.data.source_epoch,
is_current: false,
}
);
verify!(
attestation.data.source_root == state.previous_justified_root,
Invalid::WrongJustifiedRoot {
state: state.previous_justified_root,
attestation: attestation.data.source_root,
is_current: true,
}
);
} }
// Check that the crosslink data is valid. // Check that the crosslink data is valid.
@ -188,6 +164,56 @@ fn validate_attestation_signature_optional(
Ok(()) Ok(())
} }
/// Verify that the `source_epoch` and `source_root` of an `Attestation` correctly
/// match the current (or previous) justified epoch and root from the state.
///
/// Spec v0.5.0
fn verify_justified_epoch_and_root(
attestation: &Attestation,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<(), Error> {
let state_epoch = state.slot.epoch(spec.slots_per_epoch);
let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch);
if attestation_epoch >= state_epoch {
verify!(
attestation.data.source_epoch == state.current_justified_epoch,
Invalid::WrongJustifiedEpoch {
state: state.current_justified_epoch,
attestation: attestation.data.source_epoch,
is_current: true,
}
);
verify!(
attestation.data.source_root == state.current_justified_root,
Invalid::WrongJustifiedRoot {
state: state.current_justified_root,
attestation: attestation.data.source_root,
is_current: true,
}
);
} else {
verify!(
attestation.data.source_epoch == state.previous_justified_epoch,
Invalid::WrongJustifiedEpoch {
state: state.previous_justified_epoch,
attestation: attestation.data.source_epoch,
is_current: false,
}
);
verify!(
attestation.data.source_root == state.previous_justified_root,
Invalid::WrongJustifiedRoot {
state: state.previous_justified_root,
attestation: attestation.data.source_root,
is_current: true,
}
);
}
Ok(())
}
/// Verifies an aggregate signature for some given `AttestationData`, returning `true` if the /// Verifies an aggregate signature for some given `AttestationData`, returning `true` if the
/// `aggregate_signature` is valid. /// `aggregate_signature` is valid.
/// ///

View File

@ -47,6 +47,25 @@ pub fn gather_attester_slashing_indices(
attester_slashing: &AttesterSlashing, attester_slashing: &AttesterSlashing,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Vec<u64>, Error> { ) -> Result<Vec<u64>, Error> {
gather_attester_slashing_indices_modular(
state,
attester_slashing,
|_, validator| validator.slashed,
spec,
)
}
/// Same as `gather_attester_slashing_indices` but allows the caller to specify the criteria
/// for determining whether a given validator should be considered slashed.
pub fn gather_attester_slashing_indices_modular<F>(
state: &BeaconState,
attester_slashing: &AttesterSlashing,
is_slashed: F,
spec: &ChainSpec,
) -> Result<Vec<u64>, Error>
where
F: Fn(u64, &Validator) -> bool,
{
let slashable_attestation_1 = &attester_slashing.slashable_attestation_1; let slashable_attestation_1 = &attester_slashing.slashable_attestation_1;
let slashable_attestation_2 = &attester_slashing.slashable_attestation_2; let slashable_attestation_2 = &attester_slashing.slashable_attestation_2;
@ -57,7 +76,7 @@ pub fn gather_attester_slashing_indices(
.get(*i as usize) .get(*i as usize)
.ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(*i)))?; .ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(*i)))?;
if slashable_attestation_2.validator_indices.contains(&i) & !validator.slashed { if slashable_attestation_2.validator_indices.contains(&i) & !is_slashed(*i, validator) {
// TODO: verify that we should reject any slashable attestation which includes a // TODO: verify that we should reject any slashable attestation which includes a
// withdrawn validator. PH has asked the question on gitter, awaiting response. // withdrawn validator. PH has asked the question on gitter, awaiting response.
verify!( verify!(

View File

@ -12,6 +12,25 @@ pub fn verify_exit(
state: &BeaconState, state: &BeaconState,
exit: &VoluntaryExit, exit: &VoluntaryExit,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> {
verify_exit_parametric(state, exit, spec, false)
}
/// Like `verify_exit` but doesn't run checks which may become true in future states.
pub fn verify_exit_time_independent_only(
state: &BeaconState,
exit: &VoluntaryExit,
spec: &ChainSpec,
) -> Result<(), Error> {
verify_exit_parametric(state, exit, spec, true)
}
/// Parametric version of `verify_exit` that skips some checks if `time_independent_only` is true.
fn verify_exit_parametric(
state: &BeaconState,
exit: &VoluntaryExit,
spec: &ChainSpec,
time_independent_only: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
let validator = state let validator = state
.validator_registry .validator_registry
@ -32,7 +51,7 @@ pub fn verify_exit(
// Exits must specify an epoch when they become valid; they are not valid before then. // Exits must specify an epoch when they become valid; they are not valid before then.
verify!( verify!(
state.current_epoch(spec) >= exit.epoch, time_independent_only || state.current_epoch(spec) >= exit.epoch,
Invalid::FutureEpoch { Invalid::FutureEpoch {
state: state.current_epoch(spec), state: state.current_epoch(spec),
exit: exit.epoch exit: exit.epoch

View File

@ -15,6 +15,25 @@ pub fn verify_transfer(
state: &BeaconState, state: &BeaconState,
transfer: &Transfer, transfer: &Transfer,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> {
verify_transfer_parametric(state, transfer, spec, false)
}
/// Like `verify_transfer` but doesn't run checks which may become true in future states.
pub fn verify_transfer_time_independent_only(
state: &BeaconState,
transfer: &Transfer,
spec: &ChainSpec,
) -> Result<(), Error> {
verify_transfer_parametric(state, transfer, spec, true)
}
/// Parametric version of `verify_transfer` that allows some checks to be skipped.
fn verify_transfer_parametric(
state: &BeaconState,
transfer: &Transfer,
spec: &ChainSpec,
time_independent_only: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
let sender_balance = *state let sender_balance = *state
.validator_balances .validator_balances
@ -27,17 +46,18 @@ pub fn verify_transfer(
.ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?; .ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?;
verify!( verify!(
sender_balance >= transfer.amount, time_independent_only || sender_balance >= transfer.amount,
Invalid::FromBalanceInsufficient(transfer.amount, sender_balance) Invalid::FromBalanceInsufficient(transfer.amount, sender_balance)
); );
verify!( verify!(
sender_balance >= transfer.fee, time_independent_only || sender_balance >= transfer.fee,
Invalid::FromBalanceInsufficient(transfer.fee, sender_balance) Invalid::FromBalanceInsufficient(transfer.fee, sender_balance)
); );
verify!( verify!(
(sender_balance == total_amount) time_independent_only
|| (sender_balance == total_amount)
|| (sender_balance >= (total_amount + spec.min_deposit_amount)), || (sender_balance >= (total_amount + spec.min_deposit_amount)),
Invalid::InvalidResultingFromBalance( Invalid::InvalidResultingFromBalance(
sender_balance - total_amount, sender_balance - total_amount,
@ -45,10 +65,17 @@ pub fn verify_transfer(
) )
); );
verify!( if time_independent_only {
state.slot == transfer.slot, verify!(
Invalid::StateSlotMismatch(state.slot, transfer.slot) state.slot <= transfer.slot,
); Invalid::TransferSlotInPast(state.slot, transfer.slot)
);
} else {
verify!(
state.slot == transfer.slot,
Invalid::StateSlotMismatch(state.slot, transfer.slot)
);
}
let sender_validator = state let sender_validator = state
.validator_registry .validator_registry
@ -57,7 +84,8 @@ pub fn verify_transfer(
let epoch = state.slot.epoch(spec.slots_per_epoch); let epoch = state.slot.epoch(spec.slots_per_epoch);
verify!( verify!(
sender_validator.is_withdrawable_at(epoch) time_independent_only
|| sender_validator.is_withdrawable_at(epoch)
|| sender_validator.activation_epoch == spec.far_future_epoch, || sender_validator.activation_epoch == spec.far_future_epoch,
Invalid::FromValidatorIneligableForTransfer(transfer.sender) Invalid::FromValidatorIneligableForTransfer(transfer.sender)
); );

View File

@ -227,7 +227,7 @@ impl ValidatorStatuses {
status.is_previous_epoch_attester = true; status.is_previous_epoch_attester = true;
// The inclusion slot and distance are only required for previous epoch attesters. // The inclusion slot and distance are only required for previous epoch attesters.
let relative_epoch = RelativeEpoch::from_slot(state.slot, a.data.slot, spec)?; let relative_epoch = RelativeEpoch::from_slot(state.slot, a.inclusion_slot, spec)?;
status.inclusion_info = Some(InclusionInfo { status.inclusion_info = Some(InclusionInfo {
slot: a.inclusion_slot, slot: a.inclusion_slot,
distance: inclusion_distance(a), distance: inclusion_distance(a),

View File

@ -20,7 +20,6 @@ pub fn per_slot_processing(
if (state.slot + 1) % spec.slots_per_epoch == 0 { if (state.slot + 1) % spec.slots_per_epoch == 0 {
per_epoch_processing(state, spec)?; per_epoch_processing(state, spec)?;
state.advance_caches();
} }
state.slot += 1; state.slot += 1;

View File

@ -1,4 +1,11 @@
use serde_derive::Deserialize; use serde_derive::Deserialize;
use serde_yaml;
#[cfg(not(debug_assertions))]
use state_processing::{
per_block_processing, per_block_processing_without_verifying_block_signature,
per_slot_processing,
};
use std::{fs::File, io::prelude::*, path::PathBuf};
use types::*; use types::*;
#[allow(unused_imports)] #[allow(unused_imports)]
use yaml_utils; use yaml_utils;
@ -21,10 +28,7 @@ pub struct TestDoc {
} }
#[test] #[test]
fn yaml() { fn test_read_yaml() {
use serde_yaml;
use std::{fs::File, io::prelude::*, path::PathBuf};
// Test sanity-check_small-config_32-vals.yaml // Test sanity-check_small-config_32-vals.yaml
let mut file = { let mut file = {
let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
@ -57,3 +61,48 @@ fn yaml() {
let _doc: TestDoc = serde_yaml::from_str(&yaml_str.as_str()).unwrap(); let _doc: TestDoc = serde_yaml::from_str(&yaml_str.as_str()).unwrap();
} }
#[test]
#[cfg(not(debug_assertions))]
fn run_state_transition_tests_small() {
// Test sanity-check_small-config_32-vals.yaml
let mut file = {
let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
file_path_buf.push("yaml_utils/specs/sanity-check_small-config_32-vals.yaml");
File::open(file_path_buf).unwrap()
};
let mut yaml_str = String::new();
file.read_to_string(&mut yaml_str).unwrap();
yaml_str = yaml_str.to_lowercase();
let doc: TestDoc = serde_yaml::from_str(&yaml_str.as_str()).unwrap();
// Run Tests
for (i, test_case) in doc.test_cases.iter().enumerate() {
let mut state = test_case.initial_state.clone();
for block in test_case.blocks.iter() {
while block.slot > state.slot {
let latest_block_header = state.latest_block_header.clone();
per_slot_processing(&mut state, &latest_block_header, &test_case.config).unwrap();
}
if test_case.verify_signatures {
let res = per_block_processing(&mut state, &block, &test_case.config);
if res.is_err() {
println!("{:?}", i);
println!("{:?}", res);
};
} else {
let res = per_block_processing_without_verifying_block_signature(
&mut state,
&block,
&test_case.config,
);
if res.is_err() {
println!("{:?}", i);
println!("{:?}", res);
}
}
}
}
}

View File

@ -8,6 +8,7 @@ edition = "2018"
bls = { path = "../utils/bls" } bls = { path = "../utils/bls" }
boolean-bitfield = { path = "../utils/boolean-bitfield" } boolean-bitfield = { path = "../utils/boolean-bitfield" }
dirs = "1.0" dirs = "1.0"
derivative = "1.0"
ethereum-types = "0.5" ethereum-types = "0.5"
hashing = { path = "../utils/hashing" } hashing = { path = "../utils/hashing" }
hex = "0.3" hex = "0.3"

View File

@ -28,6 +28,29 @@ pub struct Attestation {
pub aggregate_signature: AggregateSignature, pub aggregate_signature: AggregateSignature,
} }
impl Attestation {
/// Are the aggregation bitfields of these attestations disjoint?
pub fn signers_disjoint_from(&self, other: &Attestation) -> bool {
self.aggregation_bitfield
.intersection(&other.aggregation_bitfield)
.is_zero()
}
/// Aggregate another Attestation into this one.
///
/// The aggregation bitfields must be disjoint, and the data must be the same.
pub fn aggregate(&mut self, other: &Attestation) {
debug_assert_eq!(self.data, other.data);
debug_assert!(self.signers_disjoint_from(other));
self.aggregation_bitfield
.union_inplace(&other.aggregation_bitfield);
self.custody_bitfield.union_inplace(&other.custody_bitfield);
self.aggregate_signature
.add_aggregate(&other.aggregate_signature);
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -1,9 +1,10 @@
use crate::*; use crate::*;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
#[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize)] #[derive(Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)]
pub struct AttestationDuty { pub struct AttestationDuty {
pub slot: Slot, pub slot: Slot,
pub shard: Shard, pub shard: Shard,
pub committee_index: usize, pub committee_index: usize,
pub committee_len: usize,
} }

View File

@ -37,6 +37,19 @@ impl BeaconBlockHeader {
pub fn canonical_root(&self) -> Hash256 { pub fn canonical_root(&self) -> Hash256 {
Hash256::from_slice(&self.hash_tree_root()[..]) Hash256::from_slice(&self.hash_tree_root()[..])
} }
/// Given a `body`, consumes `self` and returns a complete `BeaconBlock`.
///
/// Spec v0.5.0
pub fn into_block(self, body: BeaconBlockBody) -> BeaconBlock {
BeaconBlock {
slot: self.slot,
previous_block_root: self.previous_block_root,
state_root: self.state_root,
body,
signature: self.signature,
}
}
} }
#[cfg(test)] #[cfg(test)]

View File

@ -661,6 +661,17 @@ impl BeaconState {
}) })
} }
/// Build all the caches, if they need to be built.
pub fn build_all_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> {
self.build_epoch_cache(RelativeEpoch::Previous, spec)?;
self.build_epoch_cache(RelativeEpoch::Current, spec)?;
self.build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, spec)?;
self.build_epoch_cache(RelativeEpoch::NextWithRegistryChange, spec)?;
self.update_pubkey_cache()?;
Ok(())
}
/// Build an epoch cache, unless it is has already been built. /// Build an epoch cache, unless it is has already been built.
pub fn build_epoch_cache( pub fn build_epoch_cache(
&mut self, &mut self,

View File

@ -92,6 +92,7 @@ impl EpochCache {
slot, slot,
shard, shard,
committee_index: k, committee_index: k,
committee_len: crosslink_committee.committee.len(),
}; };
attestation_duties[*validator_index] = Some(attestation_duty) attestation_duties[*validator_index] = Some(attestation_duty)
} }

View File

@ -120,7 +120,7 @@ pub struct ChainSpec {
* *
*/ */
pub boot_nodes: Vec<Multiaddr>, pub boot_nodes: Vec<Multiaddr>,
pub network_id: u8, pub chain_id: u8,
} }
impl ChainSpec { impl ChainSpec {
@ -257,7 +257,7 @@ impl ChainSpec {
* Boot nodes * Boot nodes
*/ */
boot_nodes: vec![], boot_nodes: vec![],
network_id: 1, // foundation network id chain_id: 1, // foundation chain id
} }
} }
@ -274,7 +274,7 @@ impl ChainSpec {
Self { Self {
boot_nodes, boot_nodes,
network_id: 2, // lighthouse testnet network id chain_id: 2, // lighthouse testnet chain id
..ChainSpec::few_validators() ..ChainSpec::few_validators()
} }
} }

View File

@ -85,6 +85,6 @@ pub type AttesterMap = HashMap<(u64, u64), Vec<usize>>;
pub type ProposerMap = HashMap<u64, usize>; pub type ProposerMap = HashMap<u64, usize>;
pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, SecretKey, Signature}; pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, SecretKey, Signature};
pub use libp2p::floodsub::{Topic, TopicBuilder}; pub use libp2p::floodsub::{Topic, TopicBuilder, TopicHash};
pub use libp2p::multiaddr; pub use libp2p::multiaddr;
pub use libp2p::Multiaddr; pub use libp2p::Multiaddr;

View File

@ -1,5 +1,5 @@
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use crate::{AttestationData, Bitfield, Slot}; use crate::{Attestation, AttestationData, Bitfield, Slot};
use rand::RngCore; use rand::RngCore;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
@ -16,6 +16,18 @@ pub struct PendingAttestation {
pub inclusion_slot: Slot, pub inclusion_slot: Slot,
} }
impl PendingAttestation {
/// Create a `PendingAttestation` from an `Attestation`, at the given `inclusion_slot`.
pub fn from_attestation(attestation: &Attestation, inclusion_slot: Slot) -> Self {
PendingAttestation {
data: attestation.data.clone(),
aggregation_bitfield: attestation.aggregation_bitfield.clone(),
custody_bitfield: attestation.custody_bitfield.clone(),
inclusion_slot,
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -113,6 +113,16 @@ mod epoch_tests {
all_tests!(Epoch); all_tests!(Epoch);
#[test]
fn epoch_start_end() {
let slots_per_epoch = 8;
let epoch = Epoch::new(0);
assert_eq!(epoch.start_slot(slots_per_epoch), Slot::new(0));
assert_eq!(epoch.end_slot(slots_per_epoch), Slot::new(7));
}
#[test] #[test]
fn slot_iter() { fn slot_iter() {
let slots_per_epoch = 8; let slots_per_epoch = 8;

View File

@ -19,7 +19,7 @@ pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec<Keypair> {
.collect::<Vec<usize>>() .collect::<Vec<usize>>()
.par_iter() .par_iter()
.map(|&i| { .map(|&i| {
let secret = int_to_bytes48(i as u64 + 1); let secret = int_to_bytes48(i as u64 + 1000);
let sk = SecretKey::from_bytes(&secret).unwrap(); let sk = SecretKey::from_bytes(&secret).unwrap();
let pk = PublicKey::from_secret_key(&sk); let pk = PublicKey::from_secret_key(&sk);
Keypair { sk, pk } Keypair { sk, pk }

View File

@ -5,13 +5,13 @@ macro_rules! ssz_tests {
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{decode, ssz_encode}; use ssz::{ssz_encode, Decodable};
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = $type::random_for_test(&mut rng); let original = $type::random_for_test(&mut rng);
let bytes = ssz_encode(&original); let bytes = ssz_encode(&original);
let decoded: $type = decode(&bytes).unwrap(); let (decoded, _): ($type, usize) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded); assert_eq!(original, decoded);
} }

View File

@ -6,6 +6,7 @@ use dirs;
use log::debug; use log::debug;
use rayon::prelude::*; use rayon::prelude::*;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::time::SystemTime;
pub const KEYPAIRS_FILE: &str = "keypairs.raw_keypairs"; pub const KEYPAIRS_FILE: &str = "keypairs.raw_keypairs";
@ -23,6 +24,7 @@ pub fn keypairs_path() -> PathBuf {
/// Builds a beacon state to be used for testing purposes. /// Builds a beacon state to be used for testing purposes.
/// ///
/// This struct should **never be used for production purposes.** /// This struct should **never be used for production purposes.**
#[derive(Clone)]
pub struct TestingBeaconStateBuilder { pub struct TestingBeaconStateBuilder {
state: BeaconState, state: BeaconState,
keypairs: Vec<Keypair>, keypairs: Vec<Keypair>,
@ -119,8 +121,20 @@ impl TestingBeaconStateBuilder {
}) })
.collect(); .collect();
// TODO: Testing only. Burn with fire later.
// set genesis to the last 30 minute block.
// this is used for testing only. Allows multiple nodes to connect within a 30min window
// and agree on a genesis
let now = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs();
let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0);
// genesis is now the last 30 minute block.
let genesis_time = now - secs_after_last_period;
let mut state = BeaconState::genesis( let mut state = BeaconState::genesis(
0, genesis_time,
Eth1Data { Eth1Data {
deposit_root: Hash256::zero(), deposit_root: Hash256::zero(),
block_hash: Hash256::zero(), block_hash: Hash256::zero(),

View File

@ -1,6 +1,7 @@
use super::Slot; use super::Slot;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use bls::{PublicKey, Signature}; use bls::{PublicKey, Signature};
use derivative::Derivative;
use rand::RngCore; use rand::RngCore;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz::TreeHash; use ssz::TreeHash;
@ -12,7 +13,6 @@ use test_random_derive::TestRandom;
/// Spec v0.5.0 /// Spec v0.5.0
#[derive( #[derive(
Debug, Debug,
PartialEq,
Clone, Clone,
Serialize, Serialize,
Deserialize, Deserialize,
@ -21,7 +21,9 @@ use test_random_derive::TestRandom;
TreeHash, TreeHash,
TestRandom, TestRandom,
SignedRoot, SignedRoot,
Derivative,
)] )]
#[derivative(PartialEq, Eq, Hash)]
pub struct Transfer { pub struct Transfer {
pub sender: u64, pub sender: u64,
pub recipient: u64, pub recipient: u64,
@ -29,6 +31,7 @@ pub struct Transfer {
pub fee: u64, pub fee: u64,
pub slot: Slot, pub slot: Slot,
pub pubkey: PublicKey, pub pubkey: PublicKey,
#[derivative(Hash = "ignore")]
pub signature: Signature, pub signature: Signature,
} }

View File

@ -1,7 +1,7 @@
use super::PublicKey; use super::PublicKey;
use bls_aggregates::AggregatePublicKey as RawAggregatePublicKey; use bls_aggregates::AggregatePublicKey as RawAggregatePublicKey;
/// A single BLS signature. /// A BLS aggregate public key.
/// ///
/// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ /// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ
/// serialization). /// serialization).
@ -17,7 +17,7 @@ impl AggregatePublicKey {
self.0.add(public_key.as_raw()) self.0.add(public_key.as_raw())
} }
/// Returns the underlying signature. /// Returns the underlying public key.
pub fn as_raw(&self) -> &RawAggregatePublicKey { pub fn as_raw(&self) -> &RawAggregatePublicKey {
&self.0 &self.0
} }

View File

@ -36,6 +36,12 @@ impl AggregateSignature {
} }
} }
/// Add (aggregate) another `AggregateSignature`.
pub fn add_aggregate(&mut self, agg_signature: &AggregateSignature) {
self.aggregate_signature
.add_aggregate(&agg_signature.aggregate_signature)
}
/// Verify the `AggregateSignature` against an `AggregatePublicKey`. /// Verify the `AggregateSignature` against an `AggregatePublicKey`.
/// ///
/// Only returns `true` if the set of keys in the `AggregatePublicKey` match the set of keys /// Only returns `true` if the set of keys in the `AggregatePublicKey` match the set of keys

View File

@ -0,0 +1,125 @@
use super::{fake_signature::FakeSignature, AggregatePublicKey, BLS_AGG_SIG_BYTE_SIZE};
use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer};
use serde_hex::{encode as hex_encode, PrefixedHexVisitor};
use ssz::{hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash};
/// A BLS aggregate signature.
///
/// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ
/// serialization).
#[derive(Debug, PartialEq, Clone, Default, Eq)]
pub struct FakeAggregateSignature {
bytes: Vec<u8>,
}
impl FakeAggregateSignature {
/// Creates a new all-zero's signature
pub fn new() -> Self {
Self::zero()
}
/// Creates a new all-zero's signature
pub fn zero() -> Self {
Self {
bytes: vec![0; BLS_AGG_SIG_BYTE_SIZE],
}
}
/// Does glorious nothing.
pub fn add(&mut self, _signature: &FakeSignature) {
// Do nothing.
}
/// Does glorious nothing.
pub fn add_aggregate(&mut self, _agg_sig: &FakeAggregateSignature) {
// Do nothing.
}
/// _Always_ returns `true`.
pub fn verify(
&self,
_msg: &[u8],
_domain: u64,
_aggregate_public_key: &AggregatePublicKey,
) -> bool {
true
}
/// _Always_ returns `true`.
pub fn verify_multiple(
&self,
_messages: &[&[u8]],
_domain: u64,
_aggregate_public_keys: &[&AggregatePublicKey],
) -> bool {
true
}
}
impl Encodable for FakeAggregateSignature {
fn ssz_append(&self, s: &mut SszStream) {
s.append_encoded_raw(&self.bytes);
}
}
impl Decodable for FakeAggregateSignature {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
if bytes.len() - i < BLS_AGG_SIG_BYTE_SIZE {
return Err(DecodeError::TooShort);
}
Ok((
FakeAggregateSignature {
bytes: bytes[i..(i + BLS_AGG_SIG_BYTE_SIZE)].to_vec(),
},
i + BLS_AGG_SIG_BYTE_SIZE,
))
}
}
impl Serialize for FakeAggregateSignature {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&hex_encode(ssz_encode(self)))
}
}
impl<'de> Deserialize<'de> for FakeAggregateSignature {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?;
let (obj, _) = <_>::ssz_decode(&bytes[..], 0)
.map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?;
Ok(obj)
}
}
impl TreeHash for FakeAggregateSignature {
fn hash_tree_root(&self) -> Vec<u8> {
hash(&self.bytes)
}
}
#[cfg(test)]
mod tests {
use super::super::{Keypair, Signature};
use super::*;
use ssz::ssz_encode;
#[test]
pub fn test_ssz_round_trip() {
let keypair = Keypair::random();
let mut original = FakeAggregateSignature::new();
original.add(&Signature::new(&[42, 42], 0, &keypair.sk));
let bytes = ssz_encode(&original);
let (decoded, _) = FakeAggregateSignature::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
}

View File

@ -0,0 +1,120 @@
use super::{PublicKey, SecretKey, BLS_SIG_BYTE_SIZE};
use hex::encode as hex_encode;
use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer};
use serde_hex::HexVisitor;
use ssz::{hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash};
/// A single BLS signature.
///
/// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ
/// serialization).
#[derive(Debug, PartialEq, Clone, Eq)]
pub struct FakeSignature {
bytes: Vec<u8>,
}
impl FakeSignature {
/// Creates a new all-zero's signature
pub fn new(_msg: &[u8], _domain: u64, _sk: &SecretKey) -> Self {
FakeSignature::zero()
}
/// Creates a new all-zero's signature
pub fn zero() -> Self {
Self {
bytes: vec![0; BLS_SIG_BYTE_SIZE],
}
}
/// Creates a new all-zero's signature
pub fn new_hashed(_x_real_hashed: &[u8], _x_imaginary_hashed: &[u8], _sk: &SecretKey) -> Self {
FakeSignature::zero()
}
/// _Always_ returns `true`.
pub fn verify(&self, _msg: &[u8], _domain: u64, _pk: &PublicKey) -> bool {
true
}
/// _Always_ returns true.
pub fn verify_hashed(
&self,
_x_real_hashed: &[u8],
_x_imaginary_hashed: &[u8],
_pk: &PublicKey,
) -> bool {
true
}
/// Returns a new empty signature.
pub fn empty_signature() -> Self {
FakeSignature::zero()
}
}
impl Encodable for FakeSignature {
fn ssz_append(&self, s: &mut SszStream) {
s.append_encoded_raw(&self.bytes);
}
}
impl Decodable for FakeSignature {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
if bytes.len() - i < BLS_SIG_BYTE_SIZE {
return Err(DecodeError::TooShort);
}
Ok((
FakeSignature {
bytes: bytes[i..(i + BLS_SIG_BYTE_SIZE)].to_vec(),
},
i + BLS_SIG_BYTE_SIZE,
))
}
}
impl TreeHash for FakeSignature {
fn hash_tree_root(&self) -> Vec<u8> {
hash(&self.bytes)
}
}
impl Serialize for FakeSignature {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&hex_encode(ssz_encode(self)))
}
}
impl<'de> Deserialize<'de> for FakeSignature {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let bytes = deserializer.deserialize_str(HexVisitor)?;
let (pubkey, _) = <_>::ssz_decode(&bytes[..], 0)
.map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?;
Ok(pubkey)
}
}
#[cfg(test)]
mod tests {
use super::super::Keypair;
use super::*;
use ssz::ssz_encode;
#[test]
pub fn test_ssz_round_trip() {
let keypair = Keypair::random();
let original = FakeSignature::new(&[42, 42], 0, &keypair.sk);
let bytes = ssz_encode(&original);
let (decoded, _) = FakeSignature::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
}

View File

@ -1,7 +1,9 @@
use super::{PublicKey, SecretKey}; use super::{PublicKey, SecretKey};
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use std::fmt;
use std::hash::{Hash, Hasher};
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[derive(Debug, Clone, Eq, Serialize, Deserialize)]
pub struct Keypair { pub struct Keypair {
pub sk: SecretKey, pub sk: SecretKey,
pub pk: PublicKey, pub pk: PublicKey,
@ -19,3 +21,27 @@ impl Keypair {
self.pk.concatenated_hex_id() self.pk.concatenated_hex_id()
} }
} }
impl PartialEq for Keypair {
fn eq(&self, other: &Keypair) -> bool {
self == other
}
}
impl Hash for Keypair {
/// Note: this is distinct from consensus serialization, it will produce a different hash.
///
/// This method uses the uncompressed bytes, which are much faster to obtain than the
/// compressed bytes required for consensus serialization.
///
/// Use `ssz::Encode` to obtain the bytes required for consensus hashing.
fn hash<H: Hasher>(&self, state: &mut H) {
self.pk.as_uncompressed_bytes().hash(state)
}
}
impl fmt::Display for Keypair {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.pk)
}
}

View File

@ -2,18 +2,32 @@ extern crate bls_aggregates;
extern crate ssz; extern crate ssz;
mod aggregate_public_key; mod aggregate_public_key;
mod aggregate_signature;
mod keypair; mod keypair;
mod public_key; mod public_key;
mod secret_key; mod secret_key;
#[cfg(not(debug_assertions))]
mod aggregate_signature;
#[cfg(not(debug_assertions))]
mod signature; mod signature;
#[cfg(not(debug_assertions))]
pub use crate::aggregate_signature::AggregateSignature;
#[cfg(not(debug_assertions))]
pub use crate::signature::Signature;
#[cfg(debug_assertions)]
mod fake_aggregate_signature;
#[cfg(debug_assertions)]
mod fake_signature;
#[cfg(debug_assertions)]
pub use crate::fake_aggregate_signature::FakeAggregateSignature as AggregateSignature;
#[cfg(debug_assertions)]
pub use crate::fake_signature::FakeSignature as Signature;
pub use crate::aggregate_public_key::AggregatePublicKey; pub use crate::aggregate_public_key::AggregatePublicKey;
pub use crate::aggregate_signature::AggregateSignature;
pub use crate::keypair::Keypair; pub use crate::keypair::Keypair;
pub use crate::public_key::PublicKey; pub use crate::public_key::PublicKey;
pub use crate::secret_key::SecretKey; pub use crate::secret_key::SecretKey;
pub use crate::signature::Signature;
pub const BLS_AGG_SIG_BYTE_SIZE: usize = 96; pub const BLS_AGG_SIG_BYTE_SIZE: usize = 96;
pub const BLS_SIG_BYTE_SIZE: usize = 96; pub const BLS_SIG_BYTE_SIZE: usize = 96;

View File

@ -5,6 +5,7 @@ use serde::ser::{Serialize, Serializer};
use serde_hex::{encode as hex_encode, HexVisitor}; use serde_hex::{encode as hex_encode, HexVisitor};
use ssz::{decode, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{decode, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash};
use std::default; use std::default;
use std::fmt;
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
/// A single BLS signature. /// A single BLS signature.
@ -52,6 +53,12 @@ impl PublicKey {
} }
} }
impl fmt::Display for PublicKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.concatenated_hex_id())
}
}
impl default::Default for PublicKey { impl default::Default for PublicKey {
fn default() -> Self { fn default() -> Self {
let secret_key = SecretKey::random(); let secret_key = SecretKey::random();

View File

@ -33,10 +33,21 @@ impl BooleanBitfield {
} }
/// Create a new bitfield with the given length `initial_len` and all values set to `bit`. /// Create a new bitfield with the given length `initial_len` and all values set to `bit`.
pub fn from_elem(inital_len: usize, bit: bool) -> Self { ///
Self { /// Note: if `initial_len` is not a multiple of 8, the remaining bits will be set to `false`
0: BitVec::from_elem(inital_len, bit), /// regardless of `bit`.
pub fn from_elem(initial_len: usize, bit: bool) -> Self {
// BitVec can panic if we don't set the len to be a multiple of 8.
let full_len = ((initial_len + 7) / 8) * 8;
let mut bitfield = BitVec::from_elem(full_len, false);
if bit {
for i in 0..initial_len {
bitfield.set(i, true);
}
} }
Self { 0: bitfield }
} }
/// Create a new bitfield using the supplied `bytes` as input /// Create a new bitfield using the supplied `bytes` as input
@ -89,6 +100,11 @@ impl BooleanBitfield {
self.len() == 0 self.len() == 0
} }
/// Returns true if all bits are set to 0.
pub fn is_zero(&self) -> bool {
self.0.none()
}
/// Returns the number of bytes required to represent this bitfield. /// Returns the number of bytes required to represent this bitfield.
pub fn num_bytes(&self) -> usize { pub fn num_bytes(&self) -> usize {
self.to_bytes().len() self.to_bytes().len()
@ -104,6 +120,44 @@ impl BooleanBitfield {
pub fn to_bytes(&self) -> Vec<u8> { pub fn to_bytes(&self) -> Vec<u8> {
self.0.to_bytes() self.0.to_bytes()
} }
/// Compute the intersection (binary-and) of this bitfield with another. Lengths must match.
pub fn intersection(&self, other: &Self) -> Self {
let mut res = self.clone();
res.intersection_inplace(other);
res
}
/// Like `intersection` but in-place (updates `self`).
pub fn intersection_inplace(&mut self, other: &Self) {
self.0.intersect(&other.0);
}
/// Compute the union (binary-or) of this bitfield with another. Lengths must match.
pub fn union(&self, other: &Self) -> Self {
let mut res = self.clone();
res.union_inplace(other);
res
}
/// Like `union` but in-place (updates `self`).
pub fn union_inplace(&mut self, other: &Self) {
self.0.union(&other.0);
}
/// Compute the difference (binary-minus) of this bitfield with another. Lengths must match.
///
/// Computes `self - other`.
pub fn difference(&self, other: &Self) -> Self {
let mut res = self.clone();
res.difference_inplace(other);
res
}
/// Like `difference` but in-place (updates `self`).
pub fn difference_inplace(&mut self, other: &Self) {
self.0.difference(&other.0);
}
} }
impl default::Default for BooleanBitfield { impl default::Default for BooleanBitfield {
@ -125,10 +179,11 @@ impl cmp::PartialEq for BooleanBitfield {
/// Create a new bitfield that is a union of two other bitfields. /// Create a new bitfield that is a union of two other bitfields.
/// ///
/// For example `union(0101, 1000) == 1101` /// For example `union(0101, 1000) == 1101`
impl std::ops::BitAnd for BooleanBitfield { // TODO: length-independent intersection for BitAnd
impl std::ops::BitOr for BooleanBitfield {
type Output = Self; type Output = Self;
fn bitand(self, other: Self) -> Self { fn bitor(self, other: Self) -> Self {
let (biggest, smallest) = if self.len() > other.len() { let (biggest, smallest) = if self.len() > other.len() {
(&self, &other) (&self, &other)
} else { } else {
@ -419,10 +474,59 @@ mod tests {
} }
#[test] #[test]
fn test_bitand() { fn test_bitor() {
let a = BooleanBitfield::from_bytes(&vec![2, 8, 1][..]); let a = BooleanBitfield::from_bytes(&vec![2, 8, 1][..]);
let b = BooleanBitfield::from_bytes(&vec![4, 8, 16][..]); let b = BooleanBitfield::from_bytes(&vec![4, 8, 16][..]);
let c = BooleanBitfield::from_bytes(&vec![6, 8, 17][..]); let c = BooleanBitfield::from_bytes(&vec![6, 8, 17][..]);
assert_eq!(c, a & b); assert_eq!(c, a | b);
}
#[test]
fn test_is_zero() {
let yes_data: &[&[u8]] = &[&[], &[0], &[0, 0], &[0, 0, 0]];
for bytes in yes_data {
assert!(BooleanBitfield::from_bytes(bytes).is_zero());
}
let no_data: &[&[u8]] = &[&[1], &[6], &[0, 1], &[0, 0, 1], &[0, 0, 255]];
for bytes in no_data {
assert!(!BooleanBitfield::from_bytes(bytes).is_zero());
}
}
#[test]
fn test_intersection() {
let a = BooleanBitfield::from_bytes(&[0b1100, 0b0001]);
let b = BooleanBitfield::from_bytes(&[0b1011, 0b1001]);
let c = BooleanBitfield::from_bytes(&[0b1000, 0b0001]);
assert_eq!(a.intersection(&b), c);
assert_eq!(b.intersection(&a), c);
assert_eq!(a.intersection(&c), c);
assert_eq!(b.intersection(&c), c);
assert_eq!(a.intersection(&a), a);
assert_eq!(b.intersection(&b), b);
assert_eq!(c.intersection(&c), c);
}
#[test]
fn test_union() {
let a = BooleanBitfield::from_bytes(&[0b1100, 0b0001]);
let b = BooleanBitfield::from_bytes(&[0b1011, 0b1001]);
let c = BooleanBitfield::from_bytes(&[0b1111, 0b1001]);
assert_eq!(a.union(&b), c);
assert_eq!(b.union(&a), c);
assert_eq!(a.union(&a), a);
assert_eq!(b.union(&b), b);
assert_eq!(c.union(&c), c);
}
#[test]
fn test_difference() {
let a = BooleanBitfield::from_bytes(&[0b1100, 0b0001]);
let b = BooleanBitfield::from_bytes(&[0b1011, 0b1001]);
let a_b = BooleanBitfield::from_bytes(&[0b0100, 0b0000]);
let b_a = BooleanBitfield::from_bytes(&[0b0011, 0b1000]);
assert_eq!(a.difference(&b), a_b);
assert_eq!(b.difference(&a), b_a);
assert!(a.difference(&a).is_zero());
} }
} }

View File

@ -3,10 +3,13 @@ mod testing_slot_clock;
pub use crate::system_time_slot_clock::{Error as SystemTimeSlotClockError, SystemTimeSlotClock}; pub use crate::system_time_slot_clock::{Error as SystemTimeSlotClockError, SystemTimeSlotClock};
pub use crate::testing_slot_clock::{Error as TestingSlotClockError, TestingSlotClock}; pub use crate::testing_slot_clock::{Error as TestingSlotClockError, TestingSlotClock};
use std::time::Duration;
pub use types::Slot; pub use types::Slot;
pub trait SlotClock: Send + Sync { pub trait SlotClock: Send + Sync {
type Error; type Error;
fn present_slot(&self) -> Result<Option<Slot>, Self::Error>; fn present_slot(&self) -> Result<Option<Slot>, Self::Error>;
fn duration_to_next_slot(&self) -> Result<Option<Duration>, Self::Error>;
} }

View File

@ -13,6 +13,7 @@ pub enum Error {
/// Determines the present slot based upon the present system time. /// Determines the present slot based upon the present system time.
#[derive(Clone)] #[derive(Clone)]
pub struct SystemTimeSlotClock { pub struct SystemTimeSlotClock {
genesis_slot: Slot,
genesis_seconds: u64, genesis_seconds: u64,
slot_duration_seconds: u64, slot_duration_seconds: u64,
} }
@ -22,6 +23,7 @@ impl SystemTimeSlotClock {
/// ///
/// Returns an Error if `slot_duration_seconds == 0`. /// Returns an Error if `slot_duration_seconds == 0`.
pub fn new( pub fn new(
genesis_slot: Slot,
genesis_seconds: u64, genesis_seconds: u64,
slot_duration_seconds: u64, slot_duration_seconds: u64,
) -> Result<SystemTimeSlotClock, Error> { ) -> Result<SystemTimeSlotClock, Error> {
@ -29,6 +31,7 @@ impl SystemTimeSlotClock {
Err(Error::SlotDurationIsZero) Err(Error::SlotDurationIsZero)
} else { } else {
Ok(Self { Ok(Self {
genesis_slot,
genesis_seconds, genesis_seconds,
slot_duration_seconds, slot_duration_seconds,
}) })
@ -44,11 +47,17 @@ impl SlotClock for SystemTimeSlotClock {
let duration_since_epoch = syslot_time.duration_since(SystemTime::UNIX_EPOCH)?; let duration_since_epoch = syslot_time.duration_since(SystemTime::UNIX_EPOCH)?;
let duration_since_genesis = let duration_since_genesis =
duration_since_epoch.checked_sub(Duration::from_secs(self.genesis_seconds)); duration_since_epoch.checked_sub(Duration::from_secs(self.genesis_seconds));
match duration_since_genesis { match duration_since_genesis {
None => Ok(None), None => Ok(None),
Some(d) => Ok(slot_from_duration(self.slot_duration_seconds, d)), Some(d) => Ok(slot_from_duration(self.slot_duration_seconds, d)
.and_then(|s| Some(s + self.genesis_slot))),
} }
} }
fn duration_to_next_slot(&self) -> Result<Option<Duration>, Error> {
duration_to_next_slot(self.genesis_seconds, self.slot_duration_seconds)
}
} }
impl From<SystemTimeError> for Error { impl From<SystemTimeError> for Error {
@ -62,6 +71,30 @@ fn slot_from_duration(slot_duration_seconds: u64, duration: Duration) -> Option<
duration.as_secs().checked_div(slot_duration_seconds)?, duration.as_secs().checked_div(slot_duration_seconds)?,
)) ))
} }
// calculate the duration to the next slot
fn duration_to_next_slot(
genesis_time: u64,
seconds_per_slot: u64,
) -> Result<Option<Duration>, Error> {
let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?;
let genesis_time = Duration::from_secs(genesis_time);
if now < genesis_time {
return Ok(None);
}
let since_genesis = now - genesis_time;
let elapsed_slots = since_genesis.as_secs() / seconds_per_slot;
let next_slot_start_seconds = (elapsed_slots + 1)
.checked_mul(seconds_per_slot)
.expect("Next slot time should not overflow u64");
let time_to_next_slot = Duration::from_secs(next_slot_start_seconds) - since_genesis;
Ok(Some(time_to_next_slot))
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
@ -74,6 +107,7 @@ mod tests {
#[test] #[test]
fn test_slot_now() { fn test_slot_now() {
let slot_time = 100; let slot_time = 100;
let genesis_slot = Slot::new(0);
let now = SystemTime::now(); let now = SystemTime::now();
let since_epoch = now.duration_since(SystemTime::UNIX_EPOCH).unwrap(); let since_epoch = now.duration_since(SystemTime::UNIX_EPOCH).unwrap();
@ -81,18 +115,21 @@ mod tests {
let genesis = since_epoch.as_secs() - slot_time * 89; let genesis = since_epoch.as_secs() - slot_time * 89;
let clock = SystemTimeSlotClock { let clock = SystemTimeSlotClock {
genesis_slot,
genesis_seconds: genesis, genesis_seconds: genesis,
slot_duration_seconds: slot_time, slot_duration_seconds: slot_time,
}; };
assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(89))); assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(89)));
let clock = SystemTimeSlotClock { let clock = SystemTimeSlotClock {
genesis_slot,
genesis_seconds: since_epoch.as_secs(), genesis_seconds: since_epoch.as_secs(),
slot_duration_seconds: slot_time, slot_duration_seconds: slot_time,
}; };
assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(0))); assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(0)));
let clock = SystemTimeSlotClock { let clock = SystemTimeSlotClock {
genesis_slot,
genesis_seconds: since_epoch.as_secs() - slot_time * 42 - 5, genesis_seconds: since_epoch.as_secs() - slot_time * 42 - 5,
slot_duration_seconds: slot_time, slot_duration_seconds: slot_time,
}; };

View File

@ -1,5 +1,6 @@
use super::SlotClock; use super::SlotClock;
use std::sync::RwLock; use std::sync::RwLock;
use std::time::Duration;
use types::Slot; use types::Slot;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@ -32,6 +33,11 @@ impl SlotClock for TestingSlotClock {
let slot = *self.slot.read().expect("TestingSlotClock poisoned."); let slot = *self.slot.read().expect("TestingSlotClock poisoned.");
Ok(Some(Slot::new(slot))) Ok(Some(Slot::new(slot)))
} }
/// Always returns a duration of 1 second.
fn duration_to_next_slot(&self) -> Result<Option<Duration>, Error> {
Ok(Some(Duration::from_secs(1)))
}
} }
#[cfg(test)] #[cfg(test)]

View File

@ -12,27 +12,61 @@ syntax = "proto3";
package ethereum.beacon.rpc.v1; package ethereum.beacon.rpc.v1;
// Service that currently identifies a beacon node
service BeaconNodeService {
rpc Info(Empty) returns (NodeInfoResponse);
}
/// Service that handles block production
service BeaconBlockService { service BeaconBlockService {
// Requests a block to be signed from the beacon node.
rpc ProduceBeaconBlock(ProduceBeaconBlockRequest) returns (ProduceBeaconBlockResponse); rpc ProduceBeaconBlock(ProduceBeaconBlockRequest) returns (ProduceBeaconBlockResponse);
// Responds to the node the signed block to be published.
rpc PublishBeaconBlock(PublishBeaconBlockRequest) returns (PublishBeaconBlockResponse); rpc PublishBeaconBlock(PublishBeaconBlockRequest) returns (PublishBeaconBlockResponse);
} }
/// Service that provides the validator client with requisite knowledge about
//its public keys
service ValidatorService { service ValidatorService {
// rpc ValidatorAssignment(ValidatorAssignmentRequest) returns (ValidatorAssignmentResponse); // Gets the block proposer slot and comittee slot that a validator needs to
rpc ProposeBlockSlot(ProposeBlockSlotRequest) returns (ProposeBlockSlotResponse); // perform work on.
rpc ValidatorIndex(PublicKey) returns (IndexResponse); rpc GetValidatorDuties(GetDutiesRequest) returns (GetDutiesResponse);
} }
message BeaconBlock { /// Service that handles validator attestations
uint64 slot = 1; service AttestationService {
bytes block_root = 2; rpc ProduceAttestationData(ProduceAttestationDataRequest) returns (ProduceAttestationDataResponse);
bytes randao_reveal = 3; rpc PublishAttestation(PublishAttestationRequest) returns (PublishAttestationResponse);
bytes signature = 4;
} }
/*
* Beacon Node Service Message
*/
message NodeInfoResponse {
string version = 1;
Fork fork = 2;
uint32 chain_id = 3;
uint64 genesis_time = 4;
uint64 genesis_slot = 5;
}
message Fork {
bytes previous_version = 1;
bytes current_version = 2;
uint64 epoch = 3;
}
message Empty {}
/*
* Block Production Service Messages
*/
// Validator requests an unsigned proposal. // Validator requests an unsigned proposal.
message ProduceBeaconBlockRequest { message ProduceBeaconBlockRequest {
uint64 slot = 1; uint64 slot = 1;
bytes randao_reveal = 2;
} }
// Beacon node returns an unsigned proposal. // Beacon node returns an unsigned proposal.
@ -51,44 +85,75 @@ message PublishBeaconBlockResponse {
bytes msg = 2; bytes msg = 2;
} }
// A validators duties for some epoch. message BeaconBlock {
// TODO: add shard duties. bytes ssz = 1;
message ValidatorAssignment {
oneof block_production_slot_oneof {
bool block_production_slot_none = 1;
uint64 block_production_slot = 2;
}
}
message ValidatorAssignmentRequest {
uint64 epoch = 1;
bytes validator_index = 2;
} }
/* /*
* Propose slot * Validator Service Messages
*/ */
message ProposeBlockSlotRequest { // Validator Assignment
uint64 epoch = 1;
uint64 validator_index = 2; // the public keys of the validators
message Validators {
repeated bytes public_keys = 1;
} }
message ProposeBlockSlotResponse { // Propose slot
oneof slot_oneof { message GetDutiesRequest {
uint64 epoch = 1;
Validators validators = 2;
}
message GetDutiesResponse {
repeated ActiveValidator active_validators = 1;
}
message ActiveValidator {
oneof duty_oneof {
bool none = 1; bool none = 1;
uint64 slot = 2; ValidatorDuty duty = 2;
} }
} }
message ValidatorDuty {
oneof block_oneof {
bool none = 1;
uint64 block_production_slot = 2;
}
uint64 attestation_slot = 3;
uint64 attestation_shard = 4;
uint64 committee_index = 5;
uint64 committee_len = 6;
}
/* /*
* Validator Assignment * Attestation Service Messages
*/ */
message PublicKey { message ProduceAttestationDataRequest {
bytes public_key = 1; uint64 slot = 1;
uint64 shard = 2;
} }
message IndexResponse { message ProduceAttestationDataResponse {
uint64 index = 1; AttestationData attestation_data = 1;
}
message PublishAttestationRequest {
Attestation attestation = 1;
}
message Attestation {
bytes ssz = 1;
}
message PublishAttestationResponse {
bool success = 1;
bytes msg = 2;
}
message AttestationData {
bytes ssz = 1;
} }

View File

@ -1,7 +1,7 @@
[package] [package]
name = "validator_client" name = "validator_client"
version = "0.1.0" version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"] authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>", "Luke Anderson <luke@lukeanderson.com.au>"]
edition = "2018" edition = "2018"
[[bin]] [[bin]]
@ -12,10 +12,11 @@ path = "src/main.rs"
name = "validator_client" name = "validator_client"
path = "src/lib.rs" path = "src/lib.rs"
[dependencies] [dependencies]
block_proposer = { path = "../eth2/block_proposer" } block_proposer = { path = "../eth2/block_proposer" }
attester = { path = "../eth2/attester" }
bls = { path = "../eth2/utils/bls" } bls = { path = "../eth2/utils/bls" }
ssz = { path = "../eth2/utils/ssz" }
clap = "2.32.0" clap = "2.32.0"
dirs = "1.0.3" dirs = "1.0.3"
grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] }
@ -26,5 +27,8 @@ types = { path = "../eth2/types" }
slog = "^2.2.3" slog = "^2.2.3"
slog-term = "^2.4.0" slog-term = "^2.4.0"
slog-async = "^2.3.0" slog-async = "^2.3.0"
ssz = { path = "../eth2/utils/ssz" } tokio = "0.1.18"
tokio-timer = "0.2.10"
error-chain = "0.12.0"
bincode = "^1.1.2" bincode = "^1.1.2"
futures = "0.1.25"

View File

@ -0,0 +1,23 @@
//TODO: generalise these enums to the crate
use crate::block_producer::{BeaconNodeError, PublishOutcome};
use types::{Attestation, AttestationData, Slot};
/// Defines the methods required to produce and publish attestations on a Beacon Node. Abstracts the
/// actual beacon node.
pub trait BeaconNodeAttestation: Send + Sync {
/// Request that the node produces the required attestation data.
///
fn produce_attestation_data(
&self,
slot: Slot,
shard: u64,
) -> Result<AttestationData, BeaconNodeError>;
/// Request that the node publishes a attestation.
///
/// Returns `true` if the publish was successful.
fn publish_attestation(
&self,
attestation: Attestation,
) -> Result<PublishOutcome, BeaconNodeError>;
}

View File

@ -0,0 +1,57 @@
use super::beacon_node_attestation::BeaconNodeAttestation;
use crate::block_producer::{BeaconNodeError, PublishOutcome};
use protos::services_grpc::AttestationServiceClient;
use ssz::{ssz_encode, Decodable};
use protos::services::{
Attestation as GrpcAttestation, ProduceAttestationDataRequest, PublishAttestationRequest,
};
use types::{Attestation, AttestationData, Slot};
impl BeaconNodeAttestation for AttestationServiceClient {
fn produce_attestation_data(
&self,
slot: Slot,
shard: u64,
) -> Result<AttestationData, BeaconNodeError> {
let mut req = ProduceAttestationDataRequest::new();
req.set_slot(slot.as_u64());
req.set_shard(shard);
let reply = self
.produce_attestation_data(&req)
.map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?;
let (attestation_data, _index) =
AttestationData::ssz_decode(reply.get_attestation_data().get_ssz(), 0)
.map_err(|_| BeaconNodeError::DecodeFailure)?;
Ok(attestation_data)
}
fn publish_attestation(
&self,
attestation: Attestation,
) -> Result<PublishOutcome, BeaconNodeError> {
let mut req = PublishAttestationRequest::new();
let ssz = ssz_encode(&attestation);
let mut grpc_attestation = GrpcAttestation::new();
grpc_attestation.set_ssz(ssz);
req.set_attestation(grpc_attestation);
let reply = self
.publish_attestation(&req)
.map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?;
if reply.get_success() {
Ok(PublishOutcome::Valid)
} else {
// TODO: distinguish between different errors
Ok(PublishOutcome::InvalidAttestation(
"Publish failed".to_string(),
))
}
}
}

View File

@ -0,0 +1,165 @@
mod beacon_node_attestation;
mod grpc;
use std::sync::Arc;
use types::{ChainSpec, Domain, Fork};
//TODO: Move these higher up in the crate
use super::block_producer::{BeaconNodeError, PublishOutcome, ValidatorEvent};
use crate::signer::Signer;
use beacon_node_attestation::BeaconNodeAttestation;
use slog::{error, info, warn};
use ssz::TreeHash;
use types::{
AggregateSignature, Attestation, AttestationData, AttestationDataAndCustodyBit,
AttestationDuty, Bitfield,
};
//TODO: Group these errors at a crate level
#[derive(Debug, PartialEq)]
pub enum Error {
BeaconNodeError(BeaconNodeError),
}
impl From<BeaconNodeError> for Error {
fn from(e: BeaconNodeError) -> Error {
Error::BeaconNodeError(e)
}
}
/// This struct contains the logic for requesting and signing beacon attestations for a validator. The
/// validator can abstractly sign via the Signer trait object.
pub struct AttestationProducer<'a, B: BeaconNodeAttestation, S: Signer> {
/// The current fork.
pub fork: Fork,
/// The attestation duty to perform.
pub duty: AttestationDuty,
/// The current epoch.
pub spec: Arc<ChainSpec>,
/// The beacon node to connect to.
pub beacon_node: Arc<B>,
/// The signer to sign the block.
pub signer: &'a S,
}
impl<'a, B: BeaconNodeAttestation, S: Signer> AttestationProducer<'a, B, S> {
/// Handle outputs and results from attestation production.
pub fn handle_produce_attestation(&mut self, log: slog::Logger) {
match self.produce_attestation() {
Ok(ValidatorEvent::AttestationProduced(_slot)) => {
info!(log, "Attestation produced"; "Validator" => format!("{}", self.signer))
}
Err(e) => error!(log, "Attestation production error"; "Error" => format!("{:?}", e)),
Ok(ValidatorEvent::SignerRejection(_slot)) => {
error!(log, "Attestation production error"; "Error" => "Signer could not sign the attestation".to_string())
}
Ok(ValidatorEvent::SlashableAttestationNotProduced(_slot)) => {
error!(log, "Attestation production error"; "Error" => "Rejected the attestation as it could have been slashed".to_string())
}
Ok(ValidatorEvent::PublishAttestationFailed) => {
error!(log, "Attestation production error"; "Error" => "Beacon node was unable to publish an attestation".to_string())
}
Ok(ValidatorEvent::InvalidAttestation) => {
error!(log, "Attestation production error"; "Error" => "The signed attestation was invalid".to_string())
}
Ok(v) => {
warn!(log, "Unknown result for attestation production"; "Error" => format!("{:?}",v))
}
}
}
/// Produce an attestation, sign it and send it back
///
/// Assumes that an attestation is required at this slot (does not check the duties).
///
/// Ensures the message is not slashable.
///
/// !!! UNSAFE !!!
///
/// The slash-protection code is not yet implemented. There is zero protection against
/// slashing.
pub fn produce_attestation(&mut self) -> Result<ValidatorEvent, Error> {
let epoch = self.duty.slot.epoch(self.spec.slots_per_epoch);
let attestation = self
.beacon_node
.produce_attestation_data(self.duty.slot, self.duty.shard)?;
if self.safe_to_produce(&attestation) {
let domain = self.spec.get_domain(epoch, Domain::Attestation, &self.fork);
if let Some(attestation) = self.sign_attestation(attestation, self.duty, domain) {
match self.beacon_node.publish_attestation(attestation) {
Ok(PublishOutcome::InvalidAttestation(_string)) => {
Ok(ValidatorEvent::InvalidAttestation)
}
Ok(PublishOutcome::Valid) => {
Ok(ValidatorEvent::AttestationProduced(self.duty.slot))
}
Err(_) | Ok(_) => Ok(ValidatorEvent::PublishAttestationFailed),
}
} else {
Ok(ValidatorEvent::SignerRejection(self.duty.slot))
}
} else {
Ok(ValidatorEvent::SlashableAttestationNotProduced(
self.duty.slot,
))
}
}
/// Consumes an attestation, returning the attestation signed by the validators private key.
///
/// Important: this function will not check to ensure the attestation is not slashable. This must be
/// done upstream.
fn sign_attestation(
&mut self,
attestation: AttestationData,
duties: AttestationDuty,
domain: u64,
) -> Option<Attestation> {
self.store_produce(&attestation);
// build the aggregate signature
let aggregate_signature = {
let message = AttestationDataAndCustodyBit {
data: attestation.clone(),
custody_bit: false,
}
.hash_tree_root();
let sig = self.signer.sign_message(&message, domain)?;
let mut agg_sig = AggregateSignature::new();
agg_sig.add(&sig);
agg_sig
};
let mut aggregation_bitfield = Bitfield::with_capacity(duties.committee_len);
let custody_bitfield = Bitfield::with_capacity(duties.committee_len);
aggregation_bitfield.set(duties.committee_index, true);
Some(Attestation {
aggregation_bitfield,
data: attestation,
custody_bitfield,
aggregate_signature,
})
}
/// Returns `true` if signing an attestation is safe (non-slashable).
///
/// !!! UNSAFE !!!
///
/// Important: this function is presently stubbed-out. It provides ZERO SAFETY.
fn safe_to_produce(&self, _attestation: &AttestationData) -> bool {
//TODO: Implement slash protection
true
}
/// Record that an attestation was produced so that slashable votes may not be made in the future.
///
/// !!! UNSAFE !!!
///
/// Important: this function is presently stubbed-out. It provides ZERO SAFETY.
fn store_produce(&mut self, _attestation: &AttestationData) {
// TODO: Implement slash protection
}
}

View File

@ -0,0 +1,31 @@
use types::{BeaconBlock, Signature, Slot};
#[derive(Debug, PartialEq, Clone)]
pub enum BeaconNodeError {
RemoteFailure(String),
DecodeFailure,
}
#[derive(Debug, PartialEq, Clone)]
pub enum PublishOutcome {
Valid,
InvalidBlock(String),
InvalidAttestation(String),
}
/// Defines the methods required to produce and publish blocks on a Beacon Node. Abstracts the
/// actual beacon node.
pub trait BeaconNodeBlock: Send + Sync {
/// Request that the node produces a block.
///
/// Returns Ok(None) if the Beacon Node is unable to produce at the given slot.
fn produce_beacon_block(
&self,
slot: Slot,
randao_reveal: &Signature,
) -> Result<Option<BeaconBlock>, BeaconNodeError>;
/// Request that the node publishes a block.
///
/// Returns `true` if the publish was successful.
fn publish_beacon_block(&self, block: BeaconBlock) -> Result<PublishOutcome, BeaconNodeError>;
}

View File

@ -1,12 +1,13 @@
use block_proposer::{BeaconNode, BeaconNodeError, PublishOutcome}; use super::beacon_node_block::*;
use protos::services::{ use protos::services::{
BeaconBlock as GrpcBeaconBlock, ProduceBeaconBlockRequest, PublishBeaconBlockRequest, BeaconBlock as GrpcBeaconBlock, ProduceBeaconBlockRequest, PublishBeaconBlockRequest,
}; };
use protos::services_grpc::BeaconBlockServiceClient; use protos::services_grpc::BeaconBlockServiceClient;
use ssz::{decode, ssz_encode}; use ssz::{decode, ssz_encode};
use std::sync::Arc; use std::sync::Arc;
use types::{BeaconBlock, BeaconBlockBody, Eth1Data, Hash256, Signature, Slot}; use types::{BeaconBlock, Signature, Slot};
//TODO: Remove this new type. Do not need to wrap
/// A newtype designed to wrap the gRPC-generated service so the `BeaconNode` trait may be /// A newtype designed to wrap the gRPC-generated service so the `BeaconNode` trait may be
/// implemented upon it. /// implemented upon it.
pub struct BeaconBlockGrpcClient { pub struct BeaconBlockGrpcClient {
@ -19,7 +20,7 @@ impl BeaconBlockGrpcClient {
} }
} }
impl BeaconNode for BeaconBlockGrpcClient { impl BeaconNodeBlock for BeaconBlockGrpcClient {
/// Request a Beacon Node (BN) to produce a new block at the supplied slot. /// Request a Beacon Node (BN) to produce a new block at the supplied slot.
/// ///
/// Returns `None` if it is not possible to produce at the supplied slot. For example, if the /// Returns `None` if it is not possible to produce at the supplied slot. For example, if the
@ -27,46 +28,27 @@ impl BeaconNode for BeaconBlockGrpcClient {
fn produce_beacon_block( fn produce_beacon_block(
&self, &self,
slot: Slot, slot: Slot,
// TODO: use randao_reveal, when proto APIs have been updated. randao_reveal: &Signature,
_randao_reveal: &Signature,
) -> Result<Option<BeaconBlock>, BeaconNodeError> { ) -> Result<Option<BeaconBlock>, BeaconNodeError> {
// request a beacon block from the node
let mut req = ProduceBeaconBlockRequest::new(); let mut req = ProduceBeaconBlockRequest::new();
req.set_slot(slot.as_u64()); req.set_slot(slot.as_u64());
req.set_randao_reveal(ssz_encode(randao_reveal));
//TODO: Determine if we want an explicit timeout
let reply = self let reply = self
.client .client
.produce_beacon_block(&req) .produce_beacon_block(&req)
.map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?; .map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?;
// format the reply
if reply.has_block() { if reply.has_block() {
let block = reply.get_block(); let block = reply.get_block();
let ssz = block.get_ssz();
let signature = decode::<Signature>(block.get_signature()) let block = decode::<BeaconBlock>(&ssz).map_err(|_| BeaconNodeError::DecodeFailure)?;
.map_err(|_| BeaconNodeError::DecodeFailure)?;
let randao_reveal = decode::<Signature>(block.get_randao_reveal()) Ok(Some(block))
.map_err(|_| BeaconNodeError::DecodeFailure)?;
// TODO: this conversion is incomplete; fix it.
Ok(Some(BeaconBlock {
slot: Slot::new(block.get_slot()),
previous_block_root: Hash256::zero(),
state_root: Hash256::zero(),
signature,
body: BeaconBlockBody {
randao_reveal,
eth1_data: Eth1Data {
deposit_root: Hash256::zero(),
block_hash: Hash256::zero(),
},
proposer_slashings: vec![],
attester_slashings: vec![],
attestations: vec![],
deposits: vec![],
voluntary_exits: vec![],
transfers: vec![],
},
}))
} else { } else {
Ok(None) Ok(None)
} }
@ -79,12 +61,10 @@ impl BeaconNode for BeaconBlockGrpcClient {
fn publish_beacon_block(&self, block: BeaconBlock) -> Result<PublishOutcome, BeaconNodeError> { fn publish_beacon_block(&self, block: BeaconBlock) -> Result<PublishOutcome, BeaconNodeError> {
let mut req = PublishBeaconBlockRequest::new(); let mut req = PublishBeaconBlockRequest::new();
// TODO: this conversion is incomplete; fix it. let ssz = ssz_encode(&block);
let mut grpc_block = GrpcBeaconBlock::new(); let mut grpc_block = GrpcBeaconBlock::new();
grpc_block.set_slot(block.slot.as_u64()); grpc_block.set_ssz(ssz);
grpc_block.set_block_root(vec![0]);
grpc_block.set_randao_reveal(ssz_encode(&block.body.randao_reveal));
grpc_block.set_signature(ssz_encode(&block.signature));
req.set_block(grpc_block); req.set_block(grpc_block);
@ -94,7 +74,7 @@ impl BeaconNode for BeaconBlockGrpcClient {
.map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?; .map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?;
if reply.get_success() { if reply.get_success() {
Ok(PublishOutcome::ValidBlock) Ok(PublishOutcome::Valid)
} else { } else {
// TODO: distinguish between different errors // TODO: distinguish between different errors
Ok(PublishOutcome::InvalidBlock("Publish failed".to_string())) Ok(PublishOutcome::InvalidBlock("Publish failed".to_string()))

View File

@ -0,0 +1,242 @@
mod beacon_node_block;
mod grpc;
use self::beacon_node_block::BeaconNodeBlock;
pub use self::beacon_node_block::{BeaconNodeError, PublishOutcome};
pub use self::grpc::BeaconBlockGrpcClient;
use crate::signer::Signer;
use slog::{error, info, warn};
use ssz::{SignedRoot, TreeHash};
use std::sync::Arc;
use types::{BeaconBlock, ChainSpec, Domain, Fork, Slot};
#[derive(Debug, PartialEq)]
pub enum Error {
BeaconNodeError(BeaconNodeError),
}
#[derive(Debug, PartialEq)]
pub enum ValidatorEvent {
/// A new block was produced.
BlockProduced(Slot),
/// A new attestation was produced.
AttestationProduced(Slot),
/// A block was not produced as it would have been slashable.
SlashableBlockNotProduced(Slot),
/// An attestation was not produced as it would have been slashable.
SlashableAttestationNotProduced(Slot),
/// The Beacon Node was unable to produce a block at that slot.
BeaconNodeUnableToProduceBlock(Slot),
/// The signer failed to sign the message.
SignerRejection(Slot),
/// Publishing an attestation failed.
PublishAttestationFailed,
/// Beacon node rejected the attestation.
InvalidAttestation,
}
/// This struct contains the logic for requesting and signing beacon blocks for a validator. The
/// validator can abstractly sign via the Signer trait object.
pub struct BlockProducer<'a, B: BeaconNodeBlock, S: Signer> {
/// The current fork.
pub fork: Fork,
/// The current slot to produce a block for.
pub slot: Slot,
/// The current epoch.
pub spec: Arc<ChainSpec>,
/// The beacon node to connect to.
pub beacon_node: Arc<B>,
/// The signer to sign the block.
pub signer: &'a S,
}
impl<'a, B: BeaconNodeBlock, S: Signer> BlockProducer<'a, B, S> {
/// Handle outputs and results from block production.
pub fn handle_produce_block(&mut self, log: slog::Logger) {
match self.produce_block() {
Ok(ValidatorEvent::BlockProduced(_slot)) => {
info!(log, "Block produced"; "Validator" => format!("{}", self.signer))
}
Err(e) => error!(log, "Block production error"; "Error" => format!("{:?}", e)),
Ok(ValidatorEvent::SignerRejection(_slot)) => {
error!(log, "Block production error"; "Error" => "Signer Could not sign the block".to_string())
}
Ok(ValidatorEvent::SlashableBlockNotProduced(_slot)) => {
error!(log, "Block production error"; "Error" => "Rejected the block as it could have been slashed".to_string())
}
Ok(ValidatorEvent::BeaconNodeUnableToProduceBlock(_slot)) => {
error!(log, "Block production error"; "Error" => "Beacon node was unable to produce a block".to_string())
}
Ok(v) => {
warn!(log, "Unknown result for block production"; "Error" => format!("{:?}",v))
}
}
}
/// Produce a block at some slot.
///
/// Assumes that a block is required at this slot (does not check the duties).
///
/// Ensures the message is not slashable.
///
/// !!! UNSAFE !!!
///
/// The slash-protection code is not yet implemented. There is zero protection against
/// slashing.
pub fn produce_block(&mut self) -> Result<ValidatorEvent, Error> {
let epoch = self.slot.epoch(self.spec.slots_per_epoch);
let message = epoch.hash_tree_root();
let randao_reveal = match self.signer.sign_message(
&message,
self.spec.get_domain(epoch, Domain::Randao, &self.fork),
) {
None => return Ok(ValidatorEvent::SignerRejection(self.slot)),
Some(signature) => signature,
};
if let Some(block) = self
.beacon_node
.produce_beacon_block(self.slot, &randao_reveal)?
{
if self.safe_to_produce(&block) {
let domain = self.spec.get_domain(epoch, Domain::BeaconBlock, &self.fork);
if let Some(block) = self.sign_block(block, domain) {
self.beacon_node.publish_beacon_block(block)?;
Ok(ValidatorEvent::BlockProduced(self.slot))
} else {
Ok(ValidatorEvent::SignerRejection(self.slot))
}
} else {
Ok(ValidatorEvent::SlashableBlockNotProduced(self.slot))
}
} else {
Ok(ValidatorEvent::BeaconNodeUnableToProduceBlock(self.slot))
}
}
/// Consumes a block, returning that block signed by the validators private key.
///
/// Important: this function will not check to ensure the block is not slashable. This must be
/// done upstream.
fn sign_block(&mut self, mut block: BeaconBlock, domain: u64) -> Option<BeaconBlock> {
self.store_produce(&block);
match self.signer.sign_message(&block.signed_root()[..], domain) {
None => None,
Some(signature) => {
block.signature = signature;
Some(block)
}
}
}
/// Returns `true` if signing a block is safe (non-slashable).
///
/// !!! UNSAFE !!!
///
/// Important: this function is presently stubbed-out. It provides ZERO SAFETY.
fn safe_to_produce(&self, _block: &BeaconBlock) -> bool {
// TODO: ensure the producer doesn't produce slashable blocks.
// https://github.com/sigp/lighthouse/issues/160
true
}
/// Record that a block was produced so that slashable votes may not be made in the future.
///
/// !!! UNSAFE !!!
///
/// Important: this function is presently stubbed-out. It provides ZERO SAFETY.
fn store_produce(&mut self, _block: &BeaconBlock) {
// TODO: record this block production to prevent future slashings.
// https://github.com/sigp/lighthouse/issues/160
}
}
impl From<BeaconNodeError> for Error {
fn from(e: BeaconNodeError) -> Error {
Error::BeaconNodeError(e)
}
}
/* Old tests - Re-work for new logic
#[cfg(test)]
mod tests {
use super::test_utils::{EpochMap, LocalSigner, SimulatedBeaconNode};
use super::*;
use slot_clock::TestingSlotClock;
use types::{
test_utils::{SeedableRng, TestRandom, XorShiftRng},
Keypair,
};
// TODO: implement more thorough testing.
// https://github.com/sigp/lighthouse/issues/160
//
// These tests should serve as a good example for future tests.
#[test]
pub fn polling() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let spec = Arc::new(ChainSpec::foundation());
let slot_clock = Arc::new(TestingSlotClock::new(0));
let beacon_node = Arc::new(SimulatedBeaconNode::default());
let signer = Arc::new(LocalSigner::new(Keypair::random()));
let mut epoch_map = EpochMap::new(spec.slots_per_epoch);
let produce_slot = Slot::new(100);
let produce_epoch = produce_slot.epoch(spec.slots_per_epoch);
epoch_map.map.insert(produce_epoch, produce_slot);
let epoch_map = Arc::new(epoch_map);
let mut block_proposer = BlockProducer::new(
spec.clone(),
epoch_map.clone(),
slot_clock.clone(),
beacon_node.clone(),
signer.clone(),
);
// Configure responses from the BeaconNode.
beacon_node.set_next_produce_result(Ok(Some(BeaconBlock::random_for_test(&mut rng))));
beacon_node.set_next_publish_result(Ok(PublishOutcome::ValidBlock));
// One slot before production slot...
slot_clock.set_slot(produce_slot.as_u64() - 1);
assert_eq!(
block_proposer.poll(),
Ok(PollOutcome::BlockProductionNotRequired(produce_slot - 1))
);
// On the produce slot...
slot_clock.set_slot(produce_slot.as_u64());
assert_eq!(
block_proposer.poll(),
Ok(PollOutcome::BlockProduced(produce_slot.into()))
);
// Trying the same produce slot again...
slot_clock.set_slot(produce_slot.as_u64());
assert_eq!(
block_proposer.poll(),
Ok(PollOutcome::SlotAlreadyProcessed(produce_slot))
);
// One slot after the produce slot...
slot_clock.set_slot(produce_slot.as_u64() + 1);
assert_eq!(
block_proposer.poll(),
Ok(PollOutcome::BlockProductionNotRequired(produce_slot + 1))
);
// In an epoch without known duties...
let slot = (produce_epoch.as_u64() + 1) * spec.slots_per_epoch;
slot_clock.set_slot(slot);
assert_eq!(
block_proposer.poll(),
Ok(PollOutcome::ProducerDutiesUnknown(Slot::new(slot)))
);
}
}
*/

View File

@ -1,61 +0,0 @@
mod beacon_block_grpc_client;
// mod block_producer_service;
use block_proposer::{
BeaconNode, BlockProducer, DutiesReader, PollOutcome as BlockProducerPollOutcome, Signer,
};
use slog::{error, info, warn, Logger};
use slot_clock::SlotClock;
use std::time::Duration;
pub use self::beacon_block_grpc_client::BeaconBlockGrpcClient;
pub struct BlockProducerService<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> {
pub block_producer: BlockProducer<T, U, V, W>,
pub poll_interval_millis: u64,
pub log: Logger,
}
impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducerService<T, U, V, W> {
/// Run a loop which polls the block producer each `poll_interval_millis` millseconds.
///
/// Logs the results of the polls.
pub fn run(&mut self) {
loop {
match self.block_producer.poll() {
Err(error) => {
error!(self.log, "Block producer poll error"; "error" => format!("{:?}", error))
}
Ok(BlockProducerPollOutcome::BlockProduced(slot)) => {
info!(self.log, "Produced block"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::SlashableBlockNotProduced(slot)) => {
warn!(self.log, "Slashable block was not signed"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::BlockProductionNotRequired(slot)) => {
info!(self.log, "Block production not required"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::ProducerDutiesUnknown(slot)) => {
error!(self.log, "Block production duties unknown"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::SlotAlreadyProcessed(slot)) => {
warn!(self.log, "Attempted to re-process slot"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::BeaconNodeUnableToProduceBlock(slot)) => {
error!(self.log, "Beacon node unable to produce block"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::SignerRejection(slot)) => {
error!(self.log, "The cryptographic signer refused to sign the block"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::ValidatorIsUnknown(slot)) => {
error!(self.log, "The Beacon Node does not recognise the validator"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::UnableToGetFork(slot)) => {
error!(self.log, "Unable to get a `Fork` struct to generate signature domains"; "slot" => slot)
}
};
std::thread::sleep(Duration::from_millis(self.poll_interval_millis));
}
}
}

View File

@ -22,15 +22,14 @@ pub struct Config {
const DEFAULT_PRIVATE_KEY_FILENAME: &str = "private.key"; const DEFAULT_PRIVATE_KEY_FILENAME: &str = "private.key";
impl Default for Config { impl Default for Config {
/// Build a new configuration from defaults.
fn default() -> Self { fn default() -> Self {
let data_dir = { let data_dir = {
let home = dirs::home_dir().expect("Unable to determine home directory."); let home = dirs::home_dir().expect("Unable to determine home directory.");
home.join(".lighthouse-validator") home.join(".lighthouse-validator")
}; };
fs::create_dir_all(&data_dir)
.unwrap_or_else(|_| panic!("Unable to create {:?}", &data_dir));
let server = "localhost:50051".to_string(); let server = "localhost:5051".to_string();
let spec = ChainSpec::foundation(); let spec = ChainSpec::foundation();
@ -50,13 +49,14 @@ impl Config {
// Use the specified datadir, or default in the home directory // Use the specified datadir, or default in the home directory
if let Some(datadir) = args.value_of("datadir") { if let Some(datadir) = args.value_of("datadir") {
config.data_dir = PathBuf::from(datadir); config.data_dir = PathBuf::from(datadir);
fs::create_dir_all(&config.data_dir)
.unwrap_or_else(|_| panic!("Unable to create {:?}", &config.data_dir));
info!(log, "Using custom data dir: {:?}", &config.data_dir); info!(log, "Using custom data dir: {:?}", &config.data_dir);
}; };
fs::create_dir_all(&config.data_dir)
.unwrap_or_else(|_| panic!("Unable to create {:?}", &config.data_dir));
if let Some(srv) = args.value_of("server") { if let Some(srv) = args.value_of("server") {
//TODO: I don't think this parses correctly a server & port combo //TODO: Validate the server value, to ensure it makes sense.
config.server = srv.to_string(); config.server = srv.to_string();
info!(log, "Using custom server: {:?}", &config.server); info!(log, "Using custom server: {:?}", &config.server);
}; };
@ -67,15 +67,21 @@ impl Config {
config.spec = match spec_str { config.spec = match spec_str {
"foundation" => ChainSpec::foundation(), "foundation" => ChainSpec::foundation(),
"few_validators" => ChainSpec::few_validators(), "few_validators" => ChainSpec::few_validators(),
"lighthouse_testnet" => ChainSpec::lighthouse_testnet(),
// Should be impossible due to clap's `possible_values(..)` function. // Should be impossible due to clap's `possible_values(..)` function.
_ => unreachable!(), _ => unreachable!(),
}; };
}; };
// Log configuration
info!(log, "";
"data_dir" => &config.data_dir.to_str(),
"server" => &config.server);
Ok(config) Ok(config)
} }
/// Try to load keys from validator_dir, returning None if none are found or an error. /// Try to load keys from validator_dir, returning None if none are found or an error.
#[allow(dead_code)]
pub fn fetch_keys(&self, log: &slog::Logger) -> Option<Vec<Keypair>> { pub fn fetch_keys(&self, log: &slog::Logger) -> Option<Vec<Keypair>> {
let key_pairs: Vec<Keypair> = fs::read_dir(&self.data_dir) let key_pairs: Vec<Keypair> = fs::read_dir(&self.data_dir)
.unwrap() .unwrap()
@ -139,6 +145,7 @@ impl Config {
} }
/// Saves a keypair to a file inside the appropriate validator directory. Returns the saved path filename. /// Saves a keypair to a file inside the appropriate validator directory. Returns the saved path filename.
#[allow(dead_code)]
pub fn save_key(&self, key: &Keypair) -> Result<PathBuf, Error> { pub fn save_key(&self, key: &Keypair) -> Result<PathBuf, Error> {
let validator_config_path = self.data_dir.join(key.identifier()); let validator_config_path = self.data_dir.join(key.identifier());
let key_path = validator_config_path.join(DEFAULT_PRIVATE_KEY_FILENAME); let key_path = validator_config_path.join(DEFAULT_PRIVATE_KEY_FILENAME);

View File

@ -0,0 +1,20 @@
use super::EpochDuties;
use types::{Epoch, PublicKey};
#[derive(Debug, PartialEq, Clone)]
pub enum BeaconNodeDutiesError {
RemoteFailure(String),
}
/// Defines the methods required to obtain a validators shuffling from a Beacon Node.
pub trait BeaconNodeDuties: Send + Sync {
/// Gets the duties for all validators.
///
/// Returns a vector of EpochDuties for each validator public key. The entry will be None for
/// validators that are not activated.
fn request_duties(
&self,
epoch: Epoch,
pub_keys: &[PublicKey],
) -> Result<EpochDuties, BeaconNodeDutiesError>;
}

View File

@ -1,90 +1,132 @@
use block_proposer::{DutiesReader, DutiesReaderError};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::RwLock; use std::fmt;
use types::{Epoch, Fork, Slot}; use std::ops::{Deref, DerefMut};
use types::{AttestationDuty, Epoch, PublicKey, Slot};
/// When work needs to be performed by a validator, this type is given back to the main service
/// which indicates all the information that required to process the work.
///
/// Note: This is calculated per slot, so a validator knows which slot is related to this struct.
#[derive(Debug, Clone)]
pub struct WorkInfo {
/// Validator needs to produce a block.
pub produce_block: bool,
/// Validator needs to produce an attestation. This supplies the required attestation data.
pub attestation_duty: Option<AttestationDuty>,
}
/// The information required for a validator to propose and attest during some epoch. /// The information required for a validator to propose and attest during some epoch.
/// ///
/// Generally obtained from a Beacon Node, this information contains the validators canonical index /// Generally obtained from a Beacon Node, this information contains the validators canonical index
/// (thier sequence in the global validator induction process) and the "shuffling" for that index /// (their sequence in the global validator induction process) and the "shuffling" for that index
/// for some epoch. /// for some epoch.
#[derive(Debug, PartialEq, Clone, Copy, Default)] #[derive(Debug, PartialEq, Clone, Copy, Default)]
pub struct EpochDuties { pub struct EpochDuty {
pub validator_index: u64,
pub block_production_slot: Option<Slot>, pub block_production_slot: Option<Slot>,
// Future shard info pub attestation_duty: AttestationDuty,
} }
impl EpochDuties { impl EpochDuty {
/// Returns `true` if the supplied `slot` is a slot in which the validator should produce a /// Returns `WorkInfo` if work needs to be done in the supplied `slot`
/// block. pub fn is_work_slot(&self, slot: Slot) -> Option<WorkInfo> {
pub fn is_block_production_slot(&self, slot: Slot) -> bool { // if validator is required to produce a slot return true
match self.block_production_slot { let produce_block = match self.block_production_slot {
Some(s) if s == slot => true, Some(s) if s == slot => true,
_ => false, _ => false,
};
// if the validator is required to attest to a shard, create the data
let mut attestation_duty = None;
if self.attestation_duty.slot == slot {
attestation_duty = Some(self.attestation_duty)
} }
if produce_block | attestation_duty.is_some() {
return Some(WorkInfo {
produce_block,
attestation_duty,
});
}
None
} }
} }
impl fmt::Display for EpochDuty {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut display_block = String::from("None");
if let Some(block_slot) = self.block_production_slot {
display_block = block_slot.to_string();
}
write!(
f,
"produce block slot: {}, attestation slot: {}, attestation shard: {}",
display_block, self.attestation_duty.slot, self.attestation_duty.shard
)
}
}
/// Maps a list of keypairs (many validators) to an EpochDuty.
pub type EpochDuties = HashMap<PublicKey, Option<EpochDuty>>;
pub enum EpochDutiesMapError { pub enum EpochDutiesMapError {
Poisoned, UnknownEpoch,
UnknownValidator,
} }
/// Maps an `epoch` to some `EpochDuties` for a single validator. /// Maps an `epoch` to some `EpochDuties` for a single validator.
pub struct EpochDutiesMap { pub struct EpochDutiesMap {
pub slots_per_epoch: u64, pub slots_per_epoch: u64,
pub map: RwLock<HashMap<Epoch, EpochDuties>>, pub map: HashMap<Epoch, EpochDuties>,
} }
impl EpochDutiesMap { impl EpochDutiesMap {
pub fn new(slots_per_epoch: u64) -> Self { pub fn new(slots_per_epoch: u64) -> Self {
Self { Self {
slots_per_epoch, slots_per_epoch,
map: RwLock::new(HashMap::new()), map: HashMap::new(),
} }
} }
pub fn get(&self, epoch: Epoch) -> Result<Option<EpochDuties>, EpochDutiesMapError> {
let map = self.map.read().map_err(|_| EpochDutiesMapError::Poisoned)?;
match map.get(&epoch) {
Some(duties) => Ok(Some(*duties)),
None => Ok(None),
}
}
pub fn insert(
&self,
epoch: Epoch,
epoch_duties: EpochDuties,
) -> Result<Option<EpochDuties>, EpochDutiesMapError> {
let mut map = self
.map
.write()
.map_err(|_| EpochDutiesMapError::Poisoned)?;
Ok(map.insert(epoch, epoch_duties))
}
} }
impl DutiesReader for EpochDutiesMap { // Expose the hashmap methods
fn is_block_production_slot(&self, slot: Slot) -> Result<bool, DutiesReaderError> { impl Deref for EpochDutiesMap {
type Target = HashMap<Epoch, EpochDuties>;
fn deref(&self) -> &Self::Target {
&self.map
}
}
impl DerefMut for EpochDutiesMap {
fn deref_mut(&mut self) -> &mut HashMap<Epoch, EpochDuties> {
&mut self.map
}
}
impl EpochDutiesMap {
/// Checks if the validator has work to do.
pub fn is_work_slot(
&self,
slot: Slot,
signer: &PublicKey,
) -> Result<Option<WorkInfo>, EpochDutiesMapError> {
let epoch = slot.epoch(self.slots_per_epoch); let epoch = slot.epoch(self.slots_per_epoch);
let map = self.map.read().map_err(|_| DutiesReaderError::Poisoned)?; let epoch_duties = self
let duties = map .map
.get(&epoch) .get(&epoch)
.ok_or_else(|| DutiesReaderError::UnknownEpoch)?; .ok_or_else(|| EpochDutiesMapError::UnknownEpoch)?;
Ok(duties.is_block_production_slot(slot)) if let Some(epoch_duty) = epoch_duties.get(signer) {
} if let Some(duty) = epoch_duty {
// Retrieves the duty for a validator at a given slot
fn fork(&self) -> Result<Fork, DutiesReaderError> { return Ok(duty.is_work_slot(slot));
// TODO: this is garbage data. } else {
// // the validator isn't active
// It will almost certainly cause signatures to fail verification. return Ok(None);
Ok(Fork { }
previous_version: [0; 4], } else {
current_version: [0; 4], // validator isn't known
epoch: Epoch::new(0), return Err(EpochDutiesMapError::UnknownValidator);
}) }
} }
} }

Some files were not shown because too many files have changed in this diff Show More