diff --git a/Cargo.toml b/Cargo.toml index a473e0391..b2efe55ad 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,7 @@ [workspace] members = [ - "eth2/attestation_validation", + "eth2/attester", + "eth2/block_producer", "eth2/genesis", "eth2/naive_fork_choice", "eth2/types", @@ -12,9 +13,10 @@ members = [ "eth2/utils/ssz", "eth2/utils/vec_shuffle", "eth2/validator_induction", - "eth2/validator_shuffling", "beacon_node", "beacon_node/db", + "beacon_node/beacon_chain", + "beacon_node/beacon_chain/test_harness", "protos", "validator_client", ] diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 44e6cda01..e5893195e 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -6,6 +6,7 @@ edition = "2018" [dependencies] bls = { path = "../eth2/utils/bls" } +beacon_chain = { path = "beacon_chain" } grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } protobuf = "2.0.2" protos = { path = "../protos" } @@ -13,8 +14,11 @@ clap = "2.32.0" db = { path = "db" } dirs = "1.0.3" futures = "0.1.23" +genesis = { path = "../eth2/genesis" } slog = "^2.2.3" +slot_clock = { path = "../eth2/utils/slot_clock" } slog-term = "^2.4.0" slog-async = "^2.3.0" +types = { path = "../eth2/types" } ssz = { path = "../eth2/utils/ssz" } tokio = "0.1" diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml new file mode 100644 index 000000000..5c930403c --- /dev/null +++ b/beacon_node/beacon_chain/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "beacon_chain" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +block_producer = { path = "../../eth2/block_producer" } +bls = { path = "../../eth2/utils/bls" } +boolean-bitfield = { path = "../../eth2/utils/boolean-bitfield" } +db = { path = "../db" } +failure = "0.1" +failure_derive = "0.1" +genesis = { path = "../../eth2/genesis" } +hashing = { path = "../../eth2/utils/hashing" } +parking_lot = "0.7" +log = "0.4" +env_logger = "0.6" +serde = "1.0" +serde_derive = "1.0" +serde_json = "1.0" +slot_clock = { path = "../../eth2/utils/slot_clock" } +ssz = { path = "../../eth2/utils/ssz" } +types = { path = "../../eth2/types" } diff --git a/beacon_node/beacon_chain/src/attestation_aggregator.rs b/beacon_node/beacon_chain/src/attestation_aggregator.rs new file mode 100644 index 000000000..0f6aa388e --- /dev/null +++ b/beacon_node/beacon_chain/src/attestation_aggregator.rs @@ -0,0 +1,217 @@ +use std::collections::{HashMap, HashSet}; +use types::{ + beacon_state::CommitteesError, AggregateSignature, Attestation, AttestationData, BeaconState, + Bitfield, ChainSpec, FreeAttestation, Signature, +}; + +const PHASE_0_CUSTODY_BIT: bool = false; + +/// Provides the functionality to: +/// +/// - Recieve a `FreeAttestation` and aggregate it into an `Attestation` (or create a new if it +/// doesn't exist). +/// - Store all aggregated or created `Attestation`s. +/// - Produce a list of attestations that would be valid for inclusion in some `BeaconState` (and +/// therefore valid for inclusion in a `BeaconBlock`. +/// +/// Note: `Attestations` are stored in memory and never deleted. This is not scalable and must be +/// rectified in a future revision. +pub struct AttestationAggregator { + store: HashMap, Attestation>, +} + +pub struct Outcome { + pub valid: bool, + pub message: Message, +} + +pub enum Message { + /// The free attestation was added to an existing attestation. + Aggregated, + /// The free attestation has already been aggregated to an existing attestation. + AggregationNotRequired, + /// The free attestation was transformed into a new attestation. + NewAttestationCreated, + /// The supplied `validator_index` is not in the committee for the given `shard` and `slot`. + BadValidatorIndex, + /// The given `signature` did not match the `pubkey` in the given + /// `state.validator_registry`. + BadSignature, + /// The given `slot` does not match the validators committee assignment. + BadSlot, + /// The given `shard` does not match the validators committee assignment. + BadShard, +} + +macro_rules! some_or_invalid { + ($expression: expr, $error: expr) => { + match $expression { + Some(x) => x, + None => { + return Ok(Outcome { + valid: false, + message: $error, + }); + } + } + }; +} + +impl AttestationAggregator { + /// Instantiates a new AttestationAggregator with an empty database. + pub fn new() -> Self { + Self { + store: HashMap::new(), + } + } + + /// Accepts some `FreeAttestation`, validates it and either aggregates it upon some existing + /// `Attestation` or produces a new `Attestation`. + /// + /// The "validation" provided is not complete, instead the following points are checked: + /// - The given `validator_index` is in the committee for the given `shard` for the given + /// `slot`. + /// - The signature is verified against that of the validator at `validator_index`. + pub fn process_free_attestation( + &mut self, + state: &BeaconState, + free_attestation: &FreeAttestation, + spec: &ChainSpec, + ) -> Result { + let (slot, shard, committee_index) = some_or_invalid!( + state.attestation_slot_and_shard_for_validator( + free_attestation.validator_index as usize, + spec, + )?, + Message::BadValidatorIndex + ); + + if free_attestation.data.slot != slot { + return Ok(Outcome { + valid: false, + message: Message::BadSlot, + }); + } + if free_attestation.data.shard != shard { + return Ok(Outcome { + valid: false, + message: Message::BadShard, + }); + } + + let signable_message = free_attestation.data.signable_message(PHASE_0_CUSTODY_BIT); + + let validator_record = some_or_invalid!( + state + .validator_registry + .get(free_attestation.validator_index as usize), + Message::BadValidatorIndex + ); + + if !free_attestation + .signature + .verify(&signable_message, &validator_record.pubkey) + { + return Ok(Outcome { + valid: false, + message: Message::BadSignature, + }); + } + + if let Some(existing_attestation) = self.store.get(&signable_message) { + if let Some(updated_attestation) = aggregate_attestation( + existing_attestation, + &free_attestation.signature, + committee_index as usize, + ) { + self.store.insert(signable_message, updated_attestation); + Ok(Outcome { + valid: true, + message: Message::Aggregated, + }) + } else { + Ok(Outcome { + valid: true, + message: Message::AggregationNotRequired, + }) + } + } else { + let mut aggregate_signature = AggregateSignature::new(); + aggregate_signature.add(&free_attestation.signature); + let mut aggregation_bitfield = Bitfield::new(); + aggregation_bitfield.set(committee_index as usize, true); + let new_attestation = Attestation { + data: free_attestation.data.clone(), + aggregation_bitfield, + custody_bitfield: Bitfield::new(), + aggregate_signature, + }; + self.store.insert(signable_message, new_attestation); + Ok(Outcome { + valid: true, + message: Message::NewAttestationCreated, + }) + } + } + + /// Returns all known attestations which are: + /// + /// - Valid for the given state + /// - Not already in `state.latest_attestations`. + pub fn get_attestations_for_state( + &self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Vec { + let mut known_attestation_data: HashSet = HashSet::new(); + + state.latest_attestations.iter().for_each(|attestation| { + known_attestation_data.insert(attestation.data.clone()); + }); + + self.store + .values() + .filter_map(|attestation| { + if state + .validate_attestation_without_signature(attestation, spec) + .is_ok() + && !known_attestation_data.contains(&attestation.data) + { + Some(attestation.clone()) + } else { + None + } + }) + .collect() + } +} + +/// Produces a new `Attestation` where: +/// +/// - `signature` is added to `Attestation.aggregate_signature` +/// - Attestation.aggregation_bitfield[committee_index]` is set to true. +fn aggregate_attestation( + existing_attestation: &Attestation, + signature: &Signature, + committee_index: usize, +) -> Option { + let already_signed = existing_attestation + .aggregation_bitfield + .get(committee_index) + .unwrap_or(false); + + if already_signed { + None + } else { + let mut aggregation_bitfield = existing_attestation.aggregation_bitfield.clone(); + aggregation_bitfield.set(committee_index, true); + let mut aggregate_signature = existing_attestation.aggregate_signature.clone(); + aggregate_signature.add(&signature); + + Some(Attestation { + aggregation_bitfield, + aggregate_signature, + ..existing_attestation.clone() + }) + } +} diff --git a/beacon_node/beacon_chain/src/attestation_targets.rs b/beacon_node/beacon_chain/src/attestation_targets.rs new file mode 100644 index 000000000..6585e4a47 --- /dev/null +++ b/beacon_node/beacon_chain/src/attestation_targets.rs @@ -0,0 +1,22 @@ +use std::collections::HashMap; +use types::Hash256; + +pub struct AttestationTargets { + map: HashMap, +} + +impl AttestationTargets { + pub fn new() -> Self { + Self { + map: HashMap::new(), + } + } + + pub fn get(&self, validator_index: u64) -> Option<&Hash256> { + self.map.get(&validator_index) + } + + pub fn insert(&mut self, validator_index: u64, block_hash: Hash256) -> Option { + self.map.insert(validator_index, block_hash) + } +} diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs new file mode 100644 index 000000000..9eaa7d7c1 --- /dev/null +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -0,0 +1,586 @@ +use db::{ + stores::{BeaconBlockStore, BeaconStateStore}, + ClientDB, DBError, +}; +use genesis::{genesis_beacon_block, genesis_beacon_state}; +use log::{debug, trace}; +use parking_lot::{RwLock, RwLockReadGuard}; +use slot_clock::SlotClock; +use ssz::ssz_encode; +use std::sync::Arc; +use types::{ + beacon_state::{BlockProcessingError, CommitteesError, SlotProcessingError}, + readers::{BeaconBlockReader, BeaconStateReader}, + AttestationData, BeaconBlock, BeaconBlockBody, BeaconState, ChainSpec, Eth1Data, + FreeAttestation, Hash256, PublicKey, Signature, +}; + +use crate::attestation_aggregator::{AttestationAggregator, Outcome as AggregationOutcome}; +use crate::attestation_targets::AttestationTargets; +use crate::block_graph::BlockGraph; +use crate::checkpoint::CheckPoint; + +#[derive(Debug, PartialEq)] +pub enum Error { + InsufficientValidators, + BadRecentBlockRoots, + CommitteesError(CommitteesError), + DBInconsistent(String), + DBError(String), +} + +#[derive(Debug, PartialEq)] +pub enum ValidBlock { + /// The block was sucessfully processed. + Processed, +} + +#[derive(Debug, PartialEq)] +pub enum InvalidBlock { + /// The block slot is greater than the present slot. + FutureSlot, + /// The block state_root does not match the generated state. + StateRootMismatch, + /// The blocks parent_root is unknown. + ParentUnknown, + /// There was an error whilst advancing the parent state to the present slot. This condition + /// should not occur, it likely represents an internal error. + SlotProcessingError(SlotProcessingError), + /// The block could not be applied to the state, it is invalid. + PerBlockProcessingError(BlockProcessingError), +} + +#[derive(Debug, PartialEq)] +pub enum BlockProcessingOutcome { + /// The block was sucessfully validated. + ValidBlock(ValidBlock), + /// The block was not sucessfully validated. + InvalidBlock(InvalidBlock), +} + +pub struct BeaconChain { + pub block_store: Arc>, + pub state_store: Arc>, + pub slot_clock: U, + pub block_graph: BlockGraph, + pub attestation_aggregator: RwLock, + canonical_head: RwLock, + finalized_head: RwLock, + justified_head: RwLock, + pub state: RwLock, + pub latest_attestation_targets: RwLock, + pub spec: ChainSpec, +} + +impl BeaconChain +where + T: ClientDB, + U: SlotClock, +{ + /// Instantiate a new Beacon Chain, from genesis. + pub fn genesis( + state_store: Arc>, + block_store: Arc>, + slot_clock: U, + spec: ChainSpec, + ) -> Result { + if spec.initial_validators.is_empty() { + return Err(Error::InsufficientValidators); + } + + let genesis_state = genesis_beacon_state(&spec); + let state_root = genesis_state.canonical_root(); + state_store.put(&state_root, &ssz_encode(&genesis_state)[..])?; + + let genesis_block = genesis_beacon_block(state_root, &spec); + let block_root = genesis_block.canonical_root(); + block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?; + + let block_graph = BlockGraph::new(); + block_graph.add_leaf(&Hash256::zero(), block_root.clone()); + + let finalized_head = RwLock::new(CheckPoint::new( + genesis_block.clone(), + block_root.clone(), + genesis_state.clone(), + state_root.clone(), + )); + let justified_head = RwLock::new(CheckPoint::new( + genesis_block.clone(), + block_root.clone(), + genesis_state.clone(), + state_root.clone(), + )); + let canonical_head = RwLock::new(CheckPoint::new( + genesis_block.clone(), + block_root.clone(), + genesis_state.clone(), + state_root.clone(), + )); + let attestation_aggregator = RwLock::new(AttestationAggregator::new()); + + let latest_attestation_targets = RwLock::new(AttestationTargets::new()); + + Ok(Self { + block_store, + state_store, + slot_clock, + block_graph, + attestation_aggregator, + state: RwLock::new(genesis_state.clone()), + justified_head, + finalized_head, + canonical_head, + latest_attestation_targets, + spec: spec, + }) + } + + /// Update the canonical head to some new values. + pub fn update_canonical_head( + &self, + new_beacon_block: BeaconBlock, + new_beacon_block_root: Hash256, + new_beacon_state: BeaconState, + new_beacon_state_root: Hash256, + ) { + let mut head = self.canonical_head.write(); + head.update( + new_beacon_block, + new_beacon_block_root, + new_beacon_state, + new_beacon_state_root, + ); + } + + /// Returns a read-lock guarded `CheckPoint` struct for reading the head (as chosen by the + /// fork-choice rule). + /// + /// It is important to note that the `beacon_state` returned may not match the present slot. It + /// is the state as it was when the head block was recieved, which could be some slots prior to + /// now. + pub fn head(&self) -> RwLockReadGuard { + self.canonical_head.read() + } + + /// Update the justified head to some new values. + pub fn update_finalized_head( + &self, + new_beacon_block: BeaconBlock, + new_beacon_block_root: Hash256, + new_beacon_state: BeaconState, + new_beacon_state_root: Hash256, + ) { + let mut finalized_head = self.finalized_head.write(); + finalized_head.update( + new_beacon_block, + new_beacon_block_root, + new_beacon_state, + new_beacon_state_root, + ); + } + + /// Returns a read-lock guarded `CheckPoint` struct for reading the justified head (as chosen, + /// indirectly, by the fork-choice rule). + pub fn finalized_head(&self) -> RwLockReadGuard { + self.finalized_head.read() + } + + /// Advance the `self.state` `BeaconState` to the supplied slot. + /// + /// This will perform per_slot and per_epoch processing as required. + /// + /// The `previous_block_root` will be set to the root of the current head block (as determined + /// by the fork-choice rule). + /// + /// It is important to note that this is _not_ the state corresponding to the canonical head + /// block, instead it is that state which may or may not have had additional per slot/epoch + /// processing applied to it. + pub fn advance_state(&self, slot: u64) -> Result<(), SlotProcessingError> { + let state_slot = self.state.read().slot; + let head_block_root = self.head().beacon_block_root; + for _ in state_slot..slot { + self.state + .write() + .per_slot_processing(head_block_root.clone(), &self.spec)?; + } + Ok(()) + } + + /// Returns the the validator index (if any) for the given public key. + /// + /// Information is retrieved from the present `beacon_state.validator_registry`. + pub fn validator_index(&self, pubkey: &PublicKey) -> Option { + for (i, validator) in self + .head() + .beacon_state + .validator_registry + .iter() + .enumerate() + { + if validator.pubkey == *pubkey { + return Some(i); + } + } + None + } + + /// Returns the number of slots the validator has been required to propose. + /// + /// Returns `None` if the `validator_index` is invalid. + /// + /// Information is retrieved from the present `beacon_state.validator_registry`. + pub fn proposer_slots(&self, validator_index: usize) -> Option { + if let Some(validator) = self.state.read().validator_registry.get(validator_index) { + Some(validator.proposer_slots) + } else { + None + } + } + + /// Reads the slot clock, returns `None` if the slot is unavailable. + /// + /// The slot might be unavailable due to an error with the system clock, or if the present time + /// is before genesis (i.e., a negative slot). + /// + /// This is distinct to `present_slot`, which simply reads the latest state. If a + /// call to `read_slot_clock` results in a higher slot than a call to `present_slot`, + /// `self.state` should undergo per slot processing. + pub fn read_slot_clock(&self) -> Option { + match self.slot_clock.present_slot() { + Ok(some_slot) => some_slot, + _ => None, + } + } + + /// Returns slot of the present state. + /// + /// This is distinct to `read_slot_clock`, which reads from the actual system clock. If + /// `self.state` has not been transitioned it is possible for the system clock to be on a + /// different slot to what is returned from this call. + pub fn present_slot(&self) -> u64 { + self.state.read().slot + } + + /// Returns the block proposer for a given slot. + /// + /// Information is read from the present `beacon_state` shuffling, so only information from the + /// present and prior epoch is available. + pub fn block_proposer(&self, slot: u64) -> Result { + let index = self + .state + .read() + .get_beacon_proposer_index(slot, &self.spec)?; + + Ok(index) + } + + /// Returns the justified slot for the present state. + pub fn justified_slot(&self) -> u64 { + self.state.read().justified_slot + } + + /// Returns the attestation slot and shard for a given validator index. + /// + /// Information is read from the current state, so only information from the present and prior + /// epoch is available. + pub fn validator_attestion_slot_and_shard( + &self, + validator_index: usize, + ) -> Result, CommitteesError> { + if let Some((slot, shard, _committee)) = self + .state + .read() + .attestation_slot_and_shard_for_validator(validator_index, &self.spec)? + { + Ok(Some((slot, shard))) + } else { + Ok(None) + } + } + + /// Produce an `AttestationData` that is valid for the present `slot` and given `shard`. + pub fn produce_attestation_data(&self, shard: u64) -> Result { + let justified_slot = self.justified_slot(); + let justified_block_root = self + .state + .read() + .get_block_root(justified_slot, &self.spec) + .ok_or_else(|| Error::BadRecentBlockRoots)? + .clone(); + + let epoch_boundary_root = self + .state + .read() + .get_block_root( + self.state.read().current_epoch_start_slot(&self.spec), + &self.spec, + ) + .ok_or_else(|| Error::BadRecentBlockRoots)? + .clone(); + + Ok(AttestationData { + slot: self.state.read().slot, + shard, + beacon_block_root: self.head().beacon_block_root.clone(), + epoch_boundary_root, + shard_block_root: Hash256::zero(), + latest_crosslink_root: Hash256::zero(), + justified_slot, + justified_block_root, + }) + } + + /// Validate a `FreeAttestation` and either: + /// + /// - Create a new `Attestation`. + /// - Aggregate it to an existing `Attestation`. + pub fn process_free_attestation( + &self, + free_attestation: FreeAttestation, + ) -> Result { + self.attestation_aggregator + .write() + .process_free_attestation(&self.state.read(), &free_attestation, &self.spec) + .map_err(|e| e.into()) + } + + /// Set the latest attestation target for some validator. + pub fn insert_latest_attestation_target(&self, validator_index: u64, block_root: Hash256) { + let mut targets = self.latest_attestation_targets.write(); + targets.insert(validator_index, block_root); + } + + /// Get the latest attestation target for some validator. + pub fn get_latest_attestation_target(&self, validator_index: u64) -> Option { + let targets = self.latest_attestation_targets.read(); + + match targets.get(validator_index) { + Some(hash) => Some(hash.clone()), + None => None, + } + } + + /// Dumps the entire canonical chain, from the head to genesis to a vector for analysis. + /// + /// This could be a very expensive operation and should only be done in testing/analysis + /// activities. + pub fn chain_dump(&self) -> Result, Error> { + let mut dump = vec![]; + + let mut last_slot = CheckPoint { + beacon_block: self.head().beacon_block.clone(), + beacon_block_root: self.head().beacon_block_root, + beacon_state: self.head().beacon_state.clone(), + beacon_state_root: self.head().beacon_state_root, + }; + + dump.push(last_slot.clone()); + + loop { + let beacon_block_root = last_slot.beacon_block.parent_root; + + if beacon_block_root == self.spec.zero_hash { + break; // Genesis has been reached. + } + + let beacon_block = self + .block_store + .get_deserialized(&beacon_block_root)? + .ok_or_else(|| { + Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) + })?; + let beacon_state_root = beacon_block.state_root; + let beacon_state = self + .state_store + .get_deserialized(&beacon_state_root)? + .ok_or_else(|| { + Error::DBInconsistent(format!("Missing state {}", beacon_state_root)) + })?; + + let slot = CheckPoint { + beacon_block, + beacon_block_root, + beacon_state, + beacon_state_root, + }; + + dump.push(slot.clone()); + last_slot = slot; + } + + Ok(dump) + } + + /// Accept some block and attempt to add it to block DAG. + /// + /// Will accept blocks from prior slots, however it will reject any block from a future slot. + pub fn process_block(&self, block: BeaconBlock) -> Result { + debug!("Processing block with slot {}...", block.slot()); + + let block_root = block.canonical_root(); + + let present_slot = self.present_slot(); + + if block.slot > present_slot { + return Ok(BlockProcessingOutcome::InvalidBlock( + InvalidBlock::FutureSlot, + )); + } + + // Load the blocks parent block from the database, returning invalid if that block is not + // found. + let parent_block_root = block.parent_root; + let parent_block = match self.block_store.get_reader(&parent_block_root)? { + Some(parent_root) => parent_root, + None => { + return Ok(BlockProcessingOutcome::InvalidBlock( + InvalidBlock::ParentUnknown, + )); + } + }; + + // Load the parent blocks state from the database, returning an error if it is not found. + // It is an error because if know the parent block we should also know the parent state. + let parent_state_root = parent_block.state_root(); + let parent_state = self + .state_store + .get_reader(&parent_state_root)? + .ok_or(Error::DBInconsistent(format!( + "Missing state {}", + parent_state_root + )))? + .into_beacon_state() + .ok_or(Error::DBInconsistent(format!( + "State SSZ invalid {}", + parent_state_root + )))?; + + // TODO: check the block proposer signature BEFORE doing a state transition. This will + // significantly lower exposure surface to DoS attacks. + + // Transition the parent state to the present slot. + let mut state = parent_state; + for _ in state.slot..present_slot { + if let Err(e) = state.per_slot_processing(parent_block_root.clone(), &self.spec) { + return Ok(BlockProcessingOutcome::InvalidBlock( + InvalidBlock::SlotProcessingError(e), + )); + } + } + + // Apply the recieved block to its parent state (which has been transitioned into this + // slot). + if let Err(e) = state.per_block_processing(&block, &self.spec) { + return Ok(BlockProcessingOutcome::InvalidBlock( + InvalidBlock::PerBlockProcessingError(e), + )); + } + + let state_root = state.canonical_root(); + + if block.state_root != state_root { + return Ok(BlockProcessingOutcome::InvalidBlock( + InvalidBlock::StateRootMismatch, + )); + } + + // Store the block and state. + self.block_store.put(&block_root, &ssz_encode(&block)[..])?; + self.state_store.put(&state_root, &ssz_encode(&state)[..])?; + + // Update the block DAG. + self.block_graph + .add_leaf(&parent_block_root, block_root.clone()); + + // If the parent block was the parent_block, automatically update the canonical head. + // + // TODO: this is a first-in-best-dressed scenario that is not ideal; fork_choice should be + // run instead. + if self.head().beacon_block_root == parent_block_root { + self.update_canonical_head( + block.clone(), + block_root.clone(), + state.clone(), + state_root.clone(), + ); + // Update the local state variable. + *self.state.write() = state.clone(); + } + + Ok(BlockProcessingOutcome::ValidBlock(ValidBlock::Processed)) + } + + /// Produce a new block at the present slot. + /// + /// The produced block will not be inheriently valid, it must be signed by a block producer. + /// Block signing is out of the scope of this function and should be done by a separate program. + pub fn produce_block(&self, randao_reveal: Signature) -> Option<(BeaconBlock, BeaconState)> { + debug!("Producing block at slot {}...", self.state.read().slot); + + let mut state = self.state.read().clone(); + + trace!("Finding attestations for new block..."); + + let attestations = self + .attestation_aggregator + .read() + .get_attestations_for_state(&state, &self.spec); + + trace!( + "Inserting {} attestation(s) into new block.", + attestations.len() + ); + + let parent_root = state + .get_block_root(state.slot.saturating_sub(1), &self.spec)? + .clone(); + + let mut block = BeaconBlock { + slot: state.slot, + parent_root, + state_root: Hash256::zero(), // Updated after the state is calculated. + randao_reveal: randao_reveal, + eth1_data: Eth1Data { + // TODO: replace with real data + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + }, + signature: self.spec.empty_signature.clone(), // To be completed by a validator. + body: BeaconBlockBody { + proposer_slashings: vec![], + casper_slashings: vec![], + attestations: attestations, + custody_reseeds: vec![], + custody_challenges: vec![], + custody_responses: vec![], + deposits: vec![], + exits: vec![], + }, + }; + + state + .per_block_processing_without_verifying_block_signature(&block, &self.spec) + .ok()?; + + let state_root = state.canonical_root(); + + block.state_root = state_root; + + trace!("Block produced."); + + Some((block, state)) + } +} + +impl From for Error { + fn from(e: DBError) -> Error { + Error::DBError(e.message) + } +} + +impl From for Error { + fn from(e: CommitteesError) -> Error { + Error::CommitteesError(e) + } +} diff --git a/beacon_node/beacon_chain/src/block_graph.rs b/beacon_node/beacon_chain/src/block_graph.rs new file mode 100644 index 000000000..5af851243 --- /dev/null +++ b/beacon_node/beacon_chain/src/block_graph.rs @@ -0,0 +1,44 @@ +use parking_lot::{RwLock, RwLockReadGuard}; +use std::collections::HashSet; +use types::Hash256; + +/// Maintains a view of the block DAG, also known as the "blockchain" (except, it tracks multiple +/// chains eminating from a single root instead of just the head of some canonical chain). +/// +/// The BlockGraph does not store the blocks, instead it tracks the block hashes of blocks at the +/// tip of the DAG. It is out of the scope of the object to retrieve blocks. +/// +/// Presently, the DAG root (genesis block) is not tracked. +/// +/// The BlogGraph is thread-safe due to internal RwLocks. +pub struct BlockGraph { + pub leaves: RwLock>, +} + +impl BlockGraph { + /// Create a new block graph without any leaves. + pub fn new() -> Self { + Self { + leaves: RwLock::new(HashSet::new()), + } + } + /// Add a new leaf to the block hash graph. Returns `true` if the leaf was built upon another + /// leaf. + pub fn add_leaf(&self, parent: &Hash256, leaf: Hash256) -> bool { + let mut leaves = self.leaves.write(); + + if leaves.contains(parent) { + leaves.remove(parent); + leaves.insert(leaf); + true + } else { + leaves.insert(leaf); + false + } + } + + /// Returns a read-guarded HashSet of all leaf blocks. + pub fn leaves(&self) -> RwLockReadGuard> { + self.leaves.read() + } +} diff --git a/beacon_node/beacon_chain/src/checkpoint.rs b/beacon_node/beacon_chain/src/checkpoint.rs new file mode 100644 index 000000000..bef97d2ed --- /dev/null +++ b/beacon_node/beacon_chain/src/checkpoint.rs @@ -0,0 +1,43 @@ +use serde_derive::Serialize; +use types::{BeaconBlock, BeaconState, Hash256}; + +/// Represents some block and it's associated state. Generally, this will be used for tracking the +/// head, justified head and finalized head. +#[derive(PartialEq, Clone, Serialize)] +pub struct CheckPoint { + pub beacon_block: BeaconBlock, + pub beacon_block_root: Hash256, + pub beacon_state: BeaconState, + pub beacon_state_root: Hash256, +} + +impl CheckPoint { + /// Create a new checkpoint. + pub fn new( + beacon_block: BeaconBlock, + beacon_block_root: Hash256, + beacon_state: BeaconState, + beacon_state_root: Hash256, + ) -> Self { + Self { + beacon_block, + beacon_block_root, + beacon_state, + beacon_state_root, + } + } + + /// Update all fields of the checkpoint. + pub fn update( + &mut self, + beacon_block: BeaconBlock, + beacon_block_root: Hash256, + beacon_state: BeaconState, + beacon_state_root: Hash256, + ) { + self.beacon_block = beacon_block; + self.beacon_block_root = beacon_block_root; + self.beacon_state = beacon_state; + self.beacon_state_root = beacon_state_root; + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs new file mode 100644 index 000000000..ef7273f36 --- /dev/null +++ b/beacon_node/beacon_chain/src/lib.rs @@ -0,0 +1,9 @@ +mod attestation_aggregator; +mod attestation_targets; +mod beacon_chain; +mod block_graph; +mod checkpoint; +mod lmd_ghost; + +pub use self::beacon_chain::{BeaconChain, Error}; +pub use self::checkpoint::CheckPoint; diff --git a/beacon_node/beacon_chain/src/lmd_ghost.rs b/beacon_node/beacon_chain/src/lmd_ghost.rs new file mode 100644 index 000000000..4d0a68c37 --- /dev/null +++ b/beacon_node/beacon_chain/src/lmd_ghost.rs @@ -0,0 +1,196 @@ +use crate::BeaconChain; +use db::{ + stores::{BeaconBlockAtSlotError, BeaconBlockStore}, + ClientDB, DBError, +}; +use slot_clock::{SlotClock, TestingSlotClockError}; +use std::collections::HashSet; +use std::sync::Arc; +use types::{ + readers::{BeaconBlockReader, BeaconStateReader}, + validator_registry::get_active_validator_indices, + Hash256, +}; + +#[derive(Debug, PartialEq)] +pub enum Error { + DBError(String), + MissingBeaconState(Hash256), + InvalidBeaconState(Hash256), + MissingBeaconBlock(Hash256), + InvalidBeaconBlock(Hash256), +} + +impl BeaconChain +where + T: ClientDB, + U: SlotClock, + Error: From<::Error>, +{ + /// Run the fork-choice rule on the current chain, updating the canonical head, if required. + pub fn fork_choice(&self) -> Result<(), Error> { + let present_head = &self.finalized_head().beacon_block_root; + + let new_head = self.slow_lmd_ghost(&self.finalized_head().beacon_block_root)?; + + if new_head != *present_head { + let block = self + .block_store + .get_deserialized(&new_head)? + .ok_or_else(|| Error::MissingBeaconBlock(new_head))?; + let block_root = block.canonical_root(); + + let state = self + .state_store + .get_deserialized(&block.state_root)? + .ok_or_else(|| Error::MissingBeaconState(block.state_root))?; + let state_root = state.canonical_root(); + + self.update_canonical_head(block, block_root, state, state_root); + } + + Ok(()) + } + + /// A very inefficient implementation of LMD ghost. + pub fn slow_lmd_ghost(&self, start_hash: &Hash256) -> Result { + let start = self + .block_store + .get_reader(&start_hash)? + .ok_or(Error::MissingBeaconBlock(*start_hash))?; + + let start_state_root = start.state_root(); + + let state = self + .state_store + .get_reader(&start_state_root)? + .ok_or(Error::MissingBeaconState(start_state_root))? + .into_beacon_state() + .ok_or(Error::InvalidBeaconState(start_state_root))?; + + let active_validator_indices = + get_active_validator_indices(&state.validator_registry, start.slot()); + + let mut attestation_targets = Vec::with_capacity(active_validator_indices.len()); + for i in active_validator_indices { + if let Some(target) = self.get_latest_attestation_target(i as u64) { + attestation_targets.push(target); + } + } + + let mut head_hash = Hash256::zero(); + let mut head_vote_count = 0; + + loop { + let child_hashes_and_slots = get_child_hashes_and_slots( + &self.block_store, + &head_hash, + &self.block_graph.leaves(), + )?; + + if child_hashes_and_slots.len() == 0 { + break; + } + + for (child_hash, child_slot) in child_hashes_and_slots { + let vote_count = get_vote_count( + &self.block_store, + &attestation_targets[..], + &child_hash, + child_slot, + )?; + + if vote_count > head_vote_count { + head_hash = child_hash; + head_vote_count = vote_count; + } + } + } + + Ok(head_hash) + } +} + +/// Get the total number of votes for some given block root. +/// +/// The vote count is incrememented each time an attestation target votes for a block root. +fn get_vote_count( + block_store: &Arc>, + attestation_targets: &[Hash256], + block_root: &Hash256, + slot: u64, +) -> Result { + let mut count = 0; + for target in attestation_targets { + let (root_at_slot, _) = block_store + .block_at_slot(&block_root, slot)? + .ok_or(Error::MissingBeaconBlock(*block_root))?; + if root_at_slot == *target { + count += 1; + } + } + Ok(count) +} + +/// Starting from some `leaf_hashes`, recurse back down each branch until the `root_hash`, adding +/// each `block_root` and `slot` to a HashSet. +fn get_child_hashes_and_slots( + block_store: &Arc>, + root_hash: &Hash256, + leaf_hashes: &HashSet, +) -> Result, Error> { + let mut hash_set = HashSet::new(); + + for leaf_hash in leaf_hashes { + let mut current_hash = *leaf_hash; + + loop { + if let Some(block_reader) = block_store.get_reader(¤t_hash)? { + let parent_root = block_reader.parent_root(); + + let new_hash = hash_set.insert((current_hash, block_reader.slot())); + + // If the hash just added was already in the set, break the loop. + // + // In such a case, the present branch has merged with a branch that is already in + // the set. + if !new_hash { + break; + } + + // The branch is exhausted if the parent of this block is the root_hash. + if parent_root == *root_hash { + break; + } + + current_hash = parent_root.clone(); + } else { + return Err(Error::MissingBeaconBlock(current_hash)); + } + } + } + + Ok(hash_set) +} + +impl From for Error { + fn from(e: DBError) -> Error { + Error::DBError(e.message) + } +} + +impl From for Error { + fn from(e: BeaconBlockAtSlotError) -> Error { + match e { + BeaconBlockAtSlotError::UnknownBeaconBlock(h) => Error::MissingBeaconBlock(h), + BeaconBlockAtSlotError::InvalidBeaconBlock(h) => Error::InvalidBeaconBlock(h), + BeaconBlockAtSlotError::DBError(msg) => Error::DBError(msg), + } + } +} + +impl From for Error { + fn from(_: TestingSlotClockError) -> Error { + unreachable!(); // Testing clock never throws an error. + } +} diff --git a/beacon_node/beacon_chain/test_harness/Cargo.toml b/beacon_node/beacon_chain/test_harness/Cargo.toml new file mode 100644 index 000000000..ce32b94c6 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "test_harness" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[[bench]] +name = "state_transition" +harness = false + +[dev-dependencies] +criterion = "0.2" + +[dependencies] +attester = { path = "../../../eth2/attester" } +beacon_chain = { path = "../../beacon_chain" } +block_producer = { path = "../../../eth2/block_producer" } +bls = { path = "../../../eth2/utils/bls" } +boolean-bitfield = { path = "../../../eth2/utils/boolean-bitfield" } +db = { path = "../../db" } +parking_lot = "0.7" +failure = "0.1" +failure_derive = "0.1" +genesis = { path = "../../../eth2/genesis" } +hashing = { path = "../../../eth2/utils/hashing" } +log = "0.4" +env_logger = "0.6.0" +rayon = "1.0" +serde = "1.0" +serde_derive = "1.0" +serde_json = "1.0" +slot_clock = { path = "../../../eth2/utils/slot_clock" } +ssz = { path = "../../../eth2/utils/ssz" } +types = { path = "../../../eth2/types" } diff --git a/beacon_node/beacon_chain/test_harness/benches/state_transition.rs b/beacon_node/beacon_chain/test_harness/benches/state_transition.rs new file mode 100644 index 000000000..013ecfd1e --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/benches/state_transition.rs @@ -0,0 +1,68 @@ +use criterion::Criterion; +use criterion::{black_box, criterion_group, criterion_main, Benchmark}; +// use env_logger::{Builder, Env}; +use test_harness::BeaconChainHarness; +use types::{ChainSpec, Hash256}; + +fn mid_epoch_state_transition(c: &mut Criterion) { + // Builder::from_env(Env::default().default_filter_or("debug")).init(); + + let validator_count = 1000; + let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count); + + let epoch_depth = (rig.spec.epoch_length * 2) + (rig.spec.epoch_length / 2); + + for _ in 0..epoch_depth { + rig.advance_chain_with_block(); + } + + let state = rig.beacon_chain.state.read().clone(); + + assert!((state.slot + 1) % rig.spec.epoch_length != 0); + + c.bench_function("mid-epoch state transition 10k validators", move |b| { + let state = state.clone(); + b.iter(|| { + let mut state = state.clone(); + black_box(state.per_slot_processing(Hash256::zero(), &rig.spec)) + }) + }); +} + +fn epoch_boundary_state_transition(c: &mut Criterion) { + // Builder::from_env(Env::default().default_filter_or("debug")).init(); + + let validator_count = 10000; + let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count); + + let epoch_depth = rig.spec.epoch_length * 2; + + for _ in 0..(epoch_depth - 1) { + rig.advance_chain_with_block(); + } + + let state = rig.beacon_chain.state.read().clone(); + + assert_eq!((state.slot + 1) % rig.spec.epoch_length, 0); + + c.bench( + "routines", + Benchmark::new("routine_1", move |b| { + let state = state.clone(); + b.iter(|| { + let mut state = state.clone(); + black_box(black_box( + state.per_slot_processing(Hash256::zero(), &rig.spec), + )) + }) + }) + .sample_size(5), // sample size is low because function is sloooow. + ); +} + +criterion_group!( + benches, + mid_epoch_state_transition, + epoch_boundary_state_transition +); +criterion_main!(benches); diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs new file mode 100644 index 000000000..64d18b4be --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -0,0 +1,235 @@ +use super::ValidatorHarness; +use beacon_chain::BeaconChain; +pub use beacon_chain::{CheckPoint, Error as BeaconChainError}; +use db::{ + stores::{BeaconBlockStore, BeaconStateStore}, + MemoryDB, +}; +use log::debug; +use rayon::prelude::*; +use slot_clock::TestingSlotClock; +use std::collections::HashSet; +use std::fs::File; +use std::io::prelude::*; +use std::iter::FromIterator; +use std::sync::Arc; +use types::{BeaconBlock, ChainSpec, FreeAttestation, Keypair, Validator}; + +/// The beacon chain harness simulates a single beacon node with `validator_count` validators connected +/// to it. Each validator is provided a borrow to the beacon chain, where it may read +/// information and submit blocks/attesations for processing. +/// +/// This test harness is useful for testing validator and internal state transition logic. It +/// is not useful for testing that multiple beacon nodes can reach consensus. +pub struct BeaconChainHarness { + pub db: Arc, + pub beacon_chain: Arc>, + pub block_store: Arc>, + pub state_store: Arc>, + pub validators: Vec, + pub spec: Arc, +} + +impl BeaconChainHarness { + /// Create a new harness with: + /// + /// - A keypair, `BlockProducer` and `Attester` for each validator. + /// - A new BeaconChain struct where the given validators are in the genesis. + pub fn new(mut spec: ChainSpec, validator_count: usize) -> Self { + let db = Arc::new(MemoryDB::open()); + let block_store = Arc::new(BeaconBlockStore::new(db.clone())); + let state_store = Arc::new(BeaconStateStore::new(db.clone())); + + let slot_clock = TestingSlotClock::new(spec.genesis_slot); + + // Remove the validators present in the spec (if any). + spec.initial_validators = Vec::with_capacity(validator_count); + spec.initial_balances = Vec::with_capacity(validator_count); + + debug!("Generating validator keypairs..."); + + let keypairs: Vec = (0..validator_count) + .collect::>() + .par_iter() + .map(|_| Keypair::random()) + .collect(); + + debug!("Creating validator records..."); + + spec.initial_validators = keypairs + .par_iter() + .map(|keypair| Validator { + pubkey: keypair.pk.clone(), + activation_slot: 0, + ..std::default::Default::default() + }) + .collect(); + + debug!("Setting validator balances..."); + + spec.initial_balances = spec + .initial_validators + .par_iter() + .map(|_| 32_000_000_000) // 32 ETH + .collect(); + + debug!("Creating the BeaconChain..."); + + // Create the Beacon Chain + let beacon_chain = Arc::new( + BeaconChain::genesis( + state_store.clone(), + block_store.clone(), + slot_clock, + spec.clone(), + ) + .unwrap(), + ); + + let spec = Arc::new(spec); + + debug!("Creating validator producer and attester instances..."); + + // Spawn the test validator instances. + let validators: Vec = keypairs + .iter() + .map(|keypair| { + ValidatorHarness::new(keypair.clone(), beacon_chain.clone(), spec.clone()) + }) + .collect(); + + debug!("Created {} ValidatorHarnesss", validators.len()); + + Self { + db, + beacon_chain, + block_store, + state_store, + validators, + spec, + } + } + + /// Move the `slot_clock` for the `BeaconChain` forward one slot. + /// + /// This is the equivalent of advancing a system clock forward one `SLOT_DURATION`. + /// + /// Returns the new slot. + pub fn increment_beacon_chain_slot(&mut self) -> u64 { + let slot = self.beacon_chain.present_slot() + 1; + + debug!("Incrementing BeaconChain slot to {}.", slot); + + self.beacon_chain.slot_clock.set_slot(slot); + self.beacon_chain.advance_state(slot).unwrap(); + slot + } + + /// Gather the `FreeAttestation`s from the valiators. + /// + /// Note: validators will only produce attestations _once per slot_. So, if you call this twice + /// you'll only get attestations on the first run. + pub fn gather_free_attesations(&mut self) -> Vec { + let present_slot = self.beacon_chain.present_slot(); + + let attesting_validators = self + .beacon_chain + .state + .read() + .get_crosslink_committees_at_slot(present_slot, &self.spec) + .unwrap() + .iter() + .fold(vec![], |mut acc, (committee, _slot)| { + acc.append(&mut committee.clone()); + acc + }); + let attesting_validators: HashSet = + HashSet::from_iter(attesting_validators.iter().cloned()); + + let free_attestations: Vec = self + .validators + .par_iter_mut() + .enumerate() + .filter_map(|(i, validator)| { + if attesting_validators.contains(&i) { + // Advance the validator slot. + validator.set_slot(present_slot); + + // Prompt the validator to produce an attestation (if required). + validator.produce_free_attestation().ok() + } else { + None + } + }) + .collect(); + + debug!( + "Gathered {} FreeAttestations for slot {}.", + free_attestations.len(), + present_slot + ); + + free_attestations + } + + /// Get the block from the proposer for the slot. + /// + /// Note: the validator will only produce it _once per slot_. So, if you call this twice you'll + /// only get a block once. + pub fn produce_block(&mut self) -> BeaconBlock { + let present_slot = self.beacon_chain.present_slot(); + + let proposer = self.beacon_chain.block_proposer(present_slot).unwrap(); + + debug!( + "Producing block from validator #{} for slot {}.", + proposer, present_slot + ); + + // Ensure the validators slot clock is accurate. + self.validators[proposer].set_slot(present_slot); + self.validators[proposer].produce_block().unwrap() + } + + /// Advances the chain with a BeaconBlock and attestations from all validators. + /// + /// This is the ideal scenario for the Beacon Chain, 100% honest participation from + /// validators. + pub fn advance_chain_with_block(&mut self) { + self.increment_beacon_chain_slot(); + + // Produce a new block. + let block = self.produce_block(); + debug!("Submitting block for processing..."); + self.beacon_chain.process_block(block).unwrap(); + debug!("...block processed by BeaconChain."); + + debug!("Producing free attestations..."); + + // Produce new attestations. + let free_attestations = self.gather_free_attesations(); + + debug!("Processing free attestations..."); + + free_attestations.par_iter().for_each(|free_attestation| { + self.beacon_chain + .process_free_attestation(free_attestation.clone()) + .unwrap(); + }); + + debug!("Free attestations processed."); + } + + /// Dump all blocks and states from the canonical beacon chain. + pub fn chain_dump(&self) -> Result, BeaconChainError> { + self.beacon_chain.chain_dump() + } + + /// Write the output of `chain_dump` to a JSON file. + pub fn dump_to_file(&self, filename: String, chain_dump: &Vec) { + let json = serde_json::to_string(chain_dump).unwrap(); + let mut file = File::create(filename).unwrap(); + file.write_all(json.as_bytes()) + .expect("Failed writing dump to file."); + } +} diff --git a/beacon_node/beacon_chain/test_harness/src/lib.rs b/beacon_node/beacon_chain/test_harness/src/lib.rs new file mode 100644 index 000000000..b04fc6996 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/lib.rs @@ -0,0 +1,5 @@ +mod beacon_chain_harness; +mod validator_harness; + +pub use self::beacon_chain_harness::BeaconChainHarness; +pub use self::validator_harness::ValidatorHarness; diff --git a/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs b/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs new file mode 100644 index 000000000..ed71f28d3 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs @@ -0,0 +1,121 @@ +use attester::{ + BeaconNode as AttesterBeaconNode, BeaconNodeError as NodeError, + PublishOutcome as AttestationPublishOutcome, +}; +use beacon_chain::BeaconChain; +use block_producer::{ + BeaconNode as BeaconBlockNode, BeaconNodeError as BeaconBlockNodeError, + PublishOutcome as BlockPublishOutcome, +}; +use db::ClientDB; +use parking_lot::RwLock; +use slot_clock::SlotClock; +use std::sync::Arc; +use types::{AttestationData, BeaconBlock, FreeAttestation, PublicKey, Signature}; + +// mod attester; +// mod producer; + +/// Connect directly to a borrowed `BeaconChain` instance so an attester/producer can request/submit +/// blocks/attestations. +/// +/// `BeaconBlock`s and `FreeAttestation`s are not actually published to the `BeaconChain`, instead +/// they are stored inside this struct. This is to allow one to benchmark the submission of the +/// block/attestation directly, or modify it before submission. +pub struct DirectBeaconNode { + beacon_chain: Arc>, + published_blocks: RwLock>, + published_attestations: RwLock>, +} + +impl DirectBeaconNode { + pub fn new(beacon_chain: Arc>) -> Self { + Self { + beacon_chain, + published_blocks: RwLock::new(vec![]), + published_attestations: RwLock::new(vec![]), + } + } + + /// Get the last published block (if any). + pub fn last_published_block(&self) -> Option { + Some(self.published_blocks.read().last()?.clone()) + } + + /// Get the last published attestation (if any). + pub fn last_published_free_attestation(&self) -> Option { + Some(self.published_attestations.read().last()?.clone()) + } +} + +impl AttesterBeaconNode for DirectBeaconNode { + fn produce_attestation_data( + &self, + _slot: u64, + shard: u64, + ) -> Result, NodeError> { + match self.beacon_chain.produce_attestation_data(shard) { + Ok(attestation_data) => Ok(Some(attestation_data)), + Err(e) => Err(NodeError::RemoteFailure(format!("{:?}", e))), + } + } + + fn publish_attestation_data( + &self, + free_attestation: FreeAttestation, + ) -> Result { + self.published_attestations.write().push(free_attestation); + Ok(AttestationPublishOutcome::ValidAttestation) + } +} + +impl BeaconBlockNode for DirectBeaconNode { + /// Requests the `proposer_nonce` from the `BeaconChain`. + fn proposer_nonce(&self, pubkey: &PublicKey) -> Result { + let validator_index = self + .beacon_chain + .validator_index(pubkey) + .ok_or_else(|| BeaconBlockNodeError::RemoteFailure("pubkey unknown.".to_string()))?; + + self.beacon_chain + .proposer_slots(validator_index) + .ok_or_else(|| { + BeaconBlockNodeError::RemoteFailure("validator_index unknown.".to_string()) + }) + } + + /// Requests a new `BeaconBlock from the `BeaconChain`. + fn produce_beacon_block( + &self, + slot: u64, + randao_reveal: &Signature, + ) -> Result, BeaconBlockNodeError> { + let (block, _state) = self + .beacon_chain + .produce_block(randao_reveal.clone()) + .ok_or_else(|| { + BeaconBlockNodeError::RemoteFailure(format!("Did not produce block.")) + })?; + + if block.slot == slot { + Ok(Some(block)) + } else { + Err(BeaconBlockNodeError::RemoteFailure( + "Unable to produce at non-current slot.".to_string(), + )) + } + } + + /// A block is not _actually_ published to the `BeaconChain`, instead it is stored in the + /// `published_block_vec` and a successful `ValidBlock` is returned to the caller. + /// + /// The block may be retrieved and then applied to the `BeaconChain` manually, potentially in a + /// benchmarking scenario. + fn publish_beacon_block( + &self, + block: BeaconBlock, + ) -> Result { + self.published_blocks.write().push(block); + Ok(BlockPublishOutcome::ValidBlock) + } +} diff --git a/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_duties.rs b/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_duties.rs new file mode 100644 index 000000000..e724b3e55 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_duties.rs @@ -0,0 +1,69 @@ +use attester::{ + DutiesReader as AttesterDutiesReader, DutiesReaderError as AttesterDutiesReaderError, +}; +use beacon_chain::BeaconChain; +use block_producer::{ + DutiesReader as ProducerDutiesReader, DutiesReaderError as ProducerDutiesReaderError, +}; +use db::ClientDB; +use slot_clock::SlotClock; +use std::sync::Arc; +use types::PublicKey; + +/// Connects directly to a borrowed `BeaconChain` and reads attester/proposer duties directly from +/// it. +pub struct DirectDuties { + beacon_chain: Arc>, + pubkey: PublicKey, +} + +impl DirectDuties { + pub fn new(pubkey: PublicKey, beacon_chain: Arc>) -> Self { + Self { + beacon_chain, + pubkey, + } + } +} + +impl ProducerDutiesReader for DirectDuties { + fn is_block_production_slot(&self, slot: u64) -> Result { + let validator_index = self + .beacon_chain + .validator_index(&self.pubkey) + .ok_or_else(|| ProducerDutiesReaderError::UnknownValidator)?; + + match self.beacon_chain.block_proposer(slot) { + Ok(proposer) if proposer == validator_index => Ok(true), + Ok(_) => Ok(false), + Err(_) => Err(ProducerDutiesReaderError::UnknownEpoch), + } + } +} + +impl AttesterDutiesReader for DirectDuties { + fn validator_index(&self) -> Option { + match self.beacon_chain.validator_index(&self.pubkey) { + Some(index) => Some(index as u64), + None => None, + } + } + + fn attestation_shard(&self, slot: u64) -> Result, AttesterDutiesReaderError> { + if let Some(validator_index) = self.validator_index() { + match self + .beacon_chain + .validator_attestion_slot_and_shard(validator_index as usize) + { + Ok(Some((attest_slot, attest_shard))) if attest_slot == slot => { + Ok(Some(attest_shard)) + } + Ok(Some(_)) => Ok(None), + Ok(None) => Err(AttesterDutiesReaderError::UnknownEpoch), + Err(_) => panic!("Error when getting validator attestation shard."), + } + } else { + Err(AttesterDutiesReaderError::UnknownValidator) + } + } +} diff --git a/beacon_node/beacon_chain/test_harness/src/validator_harness/local_signer.rs b/beacon_node/beacon_chain/test_harness/src/validator_harness/local_signer.rs new file mode 100644 index 000000000..8e901b057 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/validator_harness/local_signer.rs @@ -0,0 +1,47 @@ +use attester::Signer as AttesterSigner; +use block_producer::Signer as BlockProposerSigner; +use std::sync::RwLock; +use types::{Keypair, Signature}; + +/// A test-only struct used to perform signing for a proposer or attester. +pub struct LocalSigner { + keypair: Keypair, + should_sign: RwLock, +} + +impl LocalSigner { + /// Produce a new TestSigner with signing enabled by default. + pub fn new(keypair: Keypair) -> Self { + Self { + keypair, + should_sign: RwLock::new(true), + } + } + + /// If set to `false`, the service will refuse to sign all messages. Otherwise, all messages + /// will be signed. + pub fn enable_signing(&self, enabled: bool) { + *self.should_sign.write().unwrap() = enabled; + } + + /// Sign some message. + fn bls_sign(&self, message: &[u8]) -> Option { + Some(Signature::new(message, &self.keypair.sk)) + } +} + +impl BlockProposerSigner for LocalSigner { + fn sign_block_proposal(&self, message: &[u8]) -> Option { + self.bls_sign(message) + } + + fn sign_randao_reveal(&self, message: &[u8]) -> Option { + self.bls_sign(message) + } +} + +impl AttesterSigner for LocalSigner { + fn sign_attestation_message(&self, message: &[u8]) -> Option { + self.bls_sign(message) + } +} diff --git a/beacon_node/beacon_chain/test_harness/src/validator_harness/mod.rs b/beacon_node/beacon_chain/test_harness/src/validator_harness/mod.rs new file mode 100644 index 000000000..837334ade --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/validator_harness/mod.rs @@ -0,0 +1,6 @@ +mod direct_beacon_node; +mod direct_duties; +mod local_signer; +mod validator_harness; + +pub use self::validator_harness::ValidatorHarness; diff --git a/beacon_node/beacon_chain/test_harness/src/validator_harness/validator_harness.rs b/beacon_node/beacon_chain/test_harness/src/validator_harness/validator_harness.rs new file mode 100644 index 000000000..986d843bb --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/validator_harness/validator_harness.rs @@ -0,0 +1,133 @@ +use super::direct_beacon_node::DirectBeaconNode; +use super::direct_duties::DirectDuties; +use super::local_signer::LocalSigner; +use attester::PollOutcome as AttestationPollOutcome; +use attester::{Attester, Error as AttestationPollError}; +use beacon_chain::BeaconChain; +use block_producer::PollOutcome as BlockPollOutcome; +use block_producer::{BlockProducer, Error as BlockPollError}; +use db::MemoryDB; +use slot_clock::TestingSlotClock; +use std::sync::Arc; +use types::{BeaconBlock, ChainSpec, FreeAttestation, Keypair}; + +#[derive(Debug, PartialEq)] +pub enum BlockProduceError { + DidNotProduce(BlockPollOutcome), + PollError(BlockPollError), +} + +#[derive(Debug, PartialEq)] +pub enum AttestationProduceError { + DidNotProduce(AttestationPollOutcome), + PollError(AttestationPollError), +} + +/// A `BlockProducer` and `Attester` which sign using a common keypair. +/// +/// The test validator connects directly to a borrowed `BeaconChain` struct. It is useful for +/// testing that the core proposer and attester logic is functioning. Also for supporting beacon +/// chain tests. +pub struct ValidatorHarness { + pub block_producer: BlockProducer< + TestingSlotClock, + DirectBeaconNode, + DirectDuties, + LocalSigner, + >, + pub attester: Attester< + TestingSlotClock, + DirectBeaconNode, + DirectDuties, + LocalSigner, + >, + pub spec: Arc, + pub epoch_map: Arc>, + pub keypair: Keypair, + pub beacon_node: Arc>, + pub slot_clock: Arc, + pub signer: Arc, +} + +impl ValidatorHarness { + /// Create a new ValidatorHarness that signs with the given keypair, operates per the given spec and connects to the + /// supplied beacon node. + /// + /// A `BlockProducer` and `Attester` is created.. + pub fn new( + keypair: Keypair, + beacon_chain: Arc>, + spec: Arc, + ) -> Self { + let slot_clock = Arc::new(TestingSlotClock::new(spec.genesis_slot)); + let signer = Arc::new(LocalSigner::new(keypair.clone())); + let beacon_node = Arc::new(DirectBeaconNode::new(beacon_chain.clone())); + let epoch_map = Arc::new(DirectDuties::new(keypair.pk.clone(), beacon_chain.clone())); + + let block_producer = BlockProducer::new( + spec.clone(), + keypair.pk.clone(), + epoch_map.clone(), + slot_clock.clone(), + beacon_node.clone(), + signer.clone(), + ); + + let attester = Attester::new( + epoch_map.clone(), + slot_clock.clone(), + beacon_node.clone(), + signer.clone(), + ); + + Self { + block_producer, + attester, + spec, + epoch_map, + keypair, + beacon_node, + slot_clock, + signer, + } + } + + /// Run the `poll` function on the `BlockProducer` and produce a block. + /// + /// An error is returned if the producer refuses to produce. + pub fn produce_block(&mut self) -> Result { + // Using `DirectBeaconNode`, the validator will always return sucessufully if it tries to + // publish a block. + match self.block_producer.poll() { + Ok(BlockPollOutcome::BlockProduced(_)) => {} + Ok(outcome) => return Err(BlockProduceError::DidNotProduce(outcome)), + Err(error) => return Err(BlockProduceError::PollError(error)), + }; + Ok(self + .beacon_node + .last_published_block() + .expect("Unable to obtain produced block.")) + } + + /// Run the `poll` function on the `Attester` and produce a `FreeAttestation`. + /// + /// An error is returned if the attester refuses to attest. + pub fn produce_free_attestation(&mut self) -> Result { + match self.attester.poll() { + Ok(AttestationPollOutcome::AttestationProduced(_)) => {} + Ok(outcome) => return Err(AttestationProduceError::DidNotProduce(outcome)), + Err(error) => return Err(AttestationProduceError::PollError(error)), + }; + Ok(self + .beacon_node + .last_published_free_attestation() + .expect("Unable to obtain produced attestation.")) + } + + /// Set the validators slot clock to the specified slot. + /// + /// The validators slot clock will always read this value until it is set to something else. + pub fn set_slot(&mut self, slot: u64) { + self.slot_clock.set_slot(slot) + } +} diff --git a/beacon_node/beacon_chain/test_harness/tests/chain.rs b/beacon_node/beacon_chain/test_harness/tests/chain.rs new file mode 100644 index 000000000..c0b537695 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/tests/chain.rs @@ -0,0 +1,47 @@ +use env_logger::{Builder, Env}; +use log::debug; +use test_harness::BeaconChainHarness; +use types::ChainSpec; + +#[test] +#[ignore] +fn it_can_build_on_genesis_block() { + let mut spec = ChainSpec::foundation(); + spec.genesis_slot = spec.epoch_length * 8; + + /* + spec.shard_count = spec.shard_count / 8; + spec.target_committee_size = spec.target_committee_size / 8; + */ + let validator_count = 1000; + + let mut harness = BeaconChainHarness::new(spec, validator_count as usize); + + harness.advance_chain_with_block(); +} + +#[test] +#[ignore] +fn it_can_produce_past_first_epoch_boundary() { + Builder::from_env(Env::default().default_filter_or("debug")).init(); + + let validator_count = 100; + + debug!("Starting harness build..."); + + let mut harness = BeaconChainHarness::new(ChainSpec::foundation(), validator_count); + + debug!("Harness built, tests starting.."); + + let blocks = harness.spec.epoch_length * 3 + 1; + + for i in 0..blocks { + harness.advance_chain_with_block(); + debug!("Produced block {}/{}.", i, blocks); + } + let dump = harness.chain_dump().expect("Chain dump failed."); + + assert_eq!(dump.len() as u64, blocks + 1); // + 1 for genesis block. + + harness.dump_to_file("/tmp/chaindump.json".to_string(), &dump); +} diff --git a/beacon_node/db/src/stores/beacon_block_store.rs b/beacon_node/db/src/stores/beacon_block_store.rs index 6477573e8..fcdd595f5 100644 --- a/beacon_node/db/src/stores/beacon_block_store.rs +++ b/beacon_node/db/src/stores/beacon_block_store.rs @@ -6,8 +6,8 @@ use types::{readers::BeaconBlockReader, BeaconBlock, Hash256}; #[derive(Clone, Debug, PartialEq)] pub enum BeaconBlockAtSlotError { - UnknownBeaconBlock, - InvalidBeaconBlock, + UnknownBeaconBlock(Hash256), + InvalidBeaconBlock(Hash256), DBError(String), } @@ -26,6 +26,18 @@ impl BeaconBlockStore { Self { db } } + pub fn get_deserialized(&self, hash: &Hash256) -> Result, DBError> { + match self.get(&hash)? { + None => Ok(None), + Some(ssz) => { + let (block, _) = BeaconBlock::ssz_decode(&ssz, 0).map_err(|_| DBError { + message: "Bad BeaconBlock SSZ.".to_string(), + })?; + Ok(Some(block)) + } + } + } + /// Retuns an object implementing `BeaconBlockReader`, or `None` (if hash not known). /// /// Note: Presently, this function fully deserializes a `BeaconBlock` and returns that. In the @@ -73,7 +85,7 @@ impl BeaconBlockStore { current_hash = block_reader.parent_root(); } } else { - break Err(BeaconBlockAtSlotError::UnknownBeaconBlock); + break Err(BeaconBlockAtSlotError::UnknownBeaconBlock(current_hash)); } } } @@ -145,7 +157,7 @@ mod tests { db.put(DB_COLUMN, hash, ssz).unwrap(); assert_eq!( store.block_at_slot(other_hash, 42), - Err(BeaconBlockAtSlotError::UnknownBeaconBlock) + Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*other_hash)) ); } @@ -243,7 +255,11 @@ mod tests { let ssz = bs.block_at_slot(&hashes[4], 6).unwrap(); assert_eq!(ssz, None); - let ssz = bs.block_at_slot(&Hash256::from("unknown".as_bytes()), 2); - assert_eq!(ssz, Err(BeaconBlockAtSlotError::UnknownBeaconBlock)); + let bad_hash = &Hash256::from("unknown".as_bytes()); + let ssz = bs.block_at_slot(bad_hash, 2); + assert_eq!( + ssz, + Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*bad_hash)) + ); } } diff --git a/beacon_node/db/src/stores/beacon_state_store.rs b/beacon_node/db/src/stores/beacon_state_store.rs index a54e19249..ed22696cb 100644 --- a/beacon_node/db/src/stores/beacon_state_store.rs +++ b/beacon_node/db/src/stores/beacon_state_store.rs @@ -19,6 +19,18 @@ impl BeaconStateStore { Self { db } } + pub fn get_deserialized(&self, hash: &Hash256) -> Result, DBError> { + match self.get(&hash)? { + None => Ok(None), + Some(ssz) => { + let (state, _) = BeaconState::ssz_decode(&ssz, 0).map_err(|_| DBError { + message: "Bad State SSZ.".to_string(), + })?; + Ok(Some(state)) + } + } + } + /// Retuns an object implementing `BeaconStateReader`, or `None` (if hash not known). /// /// Note: Presently, this function fully deserializes a `BeaconState` and returns that. In the diff --git a/beacon_node/db/src/stores/mod.rs b/beacon_node/db/src/stores/mod.rs index c78d10dbf..44de7eed1 100644 --- a/beacon_node/db/src/stores/mod.rs +++ b/beacon_node/db/src/stores/mod.rs @@ -12,8 +12,6 @@ pub use self::beacon_state_store::BeaconStateStore; pub use self::pow_chain_store::PoWChainStore; pub use self::validator_store::{ValidatorStore, ValidatorStoreError}; -use super::bls; - pub const BLOCKS_DB_COLUMN: &str = "blocks"; pub const STATES_DB_COLUMN: &str = "states"; pub const POW_CHAIN_DB_COLUMN: &str = "powchain"; diff --git a/beacon_node/db/src/stores/validator_store.rs b/beacon_node/db/src/stores/validator_store.rs index 500bb50af..02e90dc5c 100644 --- a/beacon_node/db/src/stores/validator_store.rs +++ b/beacon_node/db/src/stores/validator_store.rs @@ -1,9 +1,9 @@ extern crate bytes; use self::bytes::{BufMut, BytesMut}; -use super::bls::PublicKey; use super::VALIDATOR_DB_COLUMN as DB_COLUMN; use super::{ClientDB, DBError}; +use bls::PublicKey; use ssz::{ssz_encode, Decodable}; use std::sync::Arc; @@ -80,8 +80,8 @@ impl ValidatorStore { #[cfg(test)] mod tests { use super::super::super::MemoryDB; - use super::super::bls::Keypair; use super::*; + use bls::Keypair; #[test] fn test_prefix_bytes() { diff --git a/beacon_node/src/beacon_chain/block_processing.rs b/beacon_node/src/beacon_chain/block_processing.rs deleted file mode 100644 index b24a6f1f8..000000000 --- a/beacon_node/src/beacon_chain/block_processing.rs +++ /dev/null @@ -1,72 +0,0 @@ -use super::{BeaconChain, ClientDB, DBError, SlotClock}; -use slot_clock::TestingSlotClockError; -use ssz::{ssz_encode, Encodable}; -use types::{readers::BeaconBlockReader, Hash256}; - -#[derive(Debug, PartialEq)] -pub enum Outcome { - FutureSlot, - Processed, - - NewCanonicalBlock, - NewReorgBlock, - NewForkBlock, -} - -#[derive(Debug, PartialEq)] -pub enum Error { - DBError(String), - NotImplemented, - PresentSlotIsNone, -} - -impl BeaconChain -where - T: ClientDB, - U: SlotClock, - Error: From<::Error>, -{ - pub fn process_block(&mut self, block: &V) -> Result<(Outcome, Hash256), Error> - where - V: BeaconBlockReader + Encodable + Sized, - { - let block_root = block.canonical_root(); - - let present_slot = self - .slot_clock - .present_slot()? - .ok_or(Error::PresentSlotIsNone)?; - - // Block from future slots (i.e., greater than the present slot) should not be processed. - if block.slot() > present_slot { - return Ok((Outcome::FutureSlot, block_root)); - } - - // TODO: block processing has been removed. - // https://github.com/sigp/lighthouse/issues/98 - - // Update leaf blocks. - self.block_store.put(&block_root, &ssz_encode(block)[..])?; - if self.leaf_blocks.contains(&block.parent_root()) { - self.leaf_blocks.remove(&block.parent_root()); - } - if self.canonical_leaf_block == block.parent_root() { - self.canonical_leaf_block = block_root; - } - self.leaf_blocks.insert(block_root); - - Ok((Outcome::Processed, block_root)) - } -} - -impl From for Error { - fn from(e: DBError) -> Error { - Error::DBError(e.message) - } -} - -impl From for Error { - fn from(_: TestingSlotClockError) -> Error { - unreachable!(); // Testing clock never throws an error. - } -} diff --git a/beacon_node/src/beacon_chain/block_production.rs b/beacon_node/src/beacon_chain/block_production.rs deleted file mode 100644 index ba781d6e9..000000000 --- a/beacon_node/src/beacon_chain/block_production.rs +++ /dev/null @@ -1,75 +0,0 @@ -use super::{BeaconChain, ClientDB, DBError, SlotClock}; -use slot_clock::TestingSlotClockError; -use types::{ - readers::{BeaconBlockReader, BeaconStateReader}, - BeaconBlock, BeaconState, Hash256, -}; - -#[derive(Debug, PartialEq)] -pub enum Error { - DBError(String), - PresentSlotIsNone, -} - -impl BeaconChain -where - T: ClientDB, - U: SlotClock, - Error: From<::Error>, -{ - pub fn produce_block(&mut self) -> Result<(BeaconBlock, BeaconState), Error> { - /* - * Important: this code is a big stub and only exists to ensure that tests pass. - * - * https://github.com/sigp/lighthouse/issues/107 - */ - let present_slot = self - .slot_clock - .present_slot()? - .ok_or(Error::PresentSlotIsNone)?; - let parent_root = self.canonical_leaf_block; - let parent_block_reader = self - .block_store - .get_reader(&parent_root)? - .ok_or_else(|| Error::DBError("Block not found.".to_string()))?; - let parent_state_reader = self - .state_store - .get_reader(&parent_block_reader.state_root())? - .ok_or_else(|| Error::DBError("State not found.".to_string()))?; - - let parent_block = parent_block_reader - .into_beacon_block() - .ok_or_else(|| Error::DBError("Bad parent block SSZ.".to_string()))?; - let mut block = BeaconBlock { - slot: present_slot, - parent_root, - state_root: Hash256::zero(), // Updated after the state is calculated. - ..parent_block - }; - - let parent_state = parent_state_reader - .into_beacon_state() - .ok_or_else(|| Error::DBError("Bad parent block SSZ.".to_string()))?; - let state = BeaconState { - slot: present_slot, - ..parent_state - }; - let state_root = state.canonical_root(); - - block.state_root = state_root; - - Ok((block, state)) - } -} - -impl From for Error { - fn from(e: DBError) -> Error { - Error::DBError(e.message) - } -} - -impl From for Error { - fn from(_: TestingSlotClockError) -> Error { - unreachable!(); // Testing clock never throws an error. - } -} diff --git a/beacon_node/src/beacon_chain/mod.rs b/beacon_node/src/beacon_chain/mod.rs deleted file mode 100644 index 09dec741c..000000000 --- a/beacon_node/src/beacon_chain/mod.rs +++ /dev/null @@ -1,80 +0,0 @@ -mod block_processing; -mod block_production; - -use db::{ - stores::{BeaconBlockStore, BeaconStateStore}, - ClientDB, DBError, -}; -use genesis::{genesis_beacon_block, genesis_beacon_state, GenesisError}; -use slot_clock::SlotClock; -use ssz::ssz_encode; -use std::collections::HashSet; -use std::sync::Arc; -use types::{ChainSpec, Hash256}; - -pub use crate::block_processing::Outcome as BlockProcessingOutcome; - -#[derive(Debug, PartialEq)] -pub enum BeaconChainError { - InsufficientValidators, - GenesisError(GenesisError), - DBError(String), -} - -pub struct BeaconChain { - pub block_store: Arc>, - pub state_store: Arc>, - pub slot_clock: U, - pub leaf_blocks: HashSet, - pub canonical_leaf_block: Hash256, - pub spec: ChainSpec, -} - -impl BeaconChain -where - T: ClientDB, - U: SlotClock, -{ - pub fn genesis( - state_store: Arc>, - block_store: Arc>, - slot_clock: U, - spec: ChainSpec, - ) -> Result { - if spec.initial_validators.is_empty() { - return Err(BeaconChainError::InsufficientValidators); - } - - let genesis_state = genesis_beacon_state(&spec)?; - let state_root = genesis_state.canonical_root(); - state_store.put(&state_root, &ssz_encode(&genesis_state)[..])?; - - let genesis_block = genesis_beacon_block(state_root, &spec); - let block_root = genesis_block.canonical_root(); - block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?; - - let mut leaf_blocks = HashSet::new(); - leaf_blocks.insert(block_root); - - Ok(Self { - block_store, - state_store, - slot_clock, - leaf_blocks, - canonical_leaf_block: block_root, - spec, - }) - } -} - -impl From for BeaconChainError { - fn from(e: DBError) -> BeaconChainError { - BeaconChainError::DBError(e.message) - } -} - -impl From for BeaconChainError { - fn from(e: GenesisError) -> BeaconChainError { - BeaconChainError::GenesisError(e) - } -} diff --git a/beacon_node/src/beacon_chain/tests/chain_test.rs b/beacon_node/src/beacon_chain/tests/chain_test.rs deleted file mode 100644 index fe7dfcf6c..000000000 --- a/beacon_node/src/beacon_chain/tests/chain_test.rs +++ /dev/null @@ -1,49 +0,0 @@ -use chain::{BeaconChain, BlockProcessingOutcome}; -use db::{ - stores::{BeaconBlockStore, BeaconStateStore}, - MemoryDB, -}; -use slot_clock::TestingSlotClock; -use std::sync::Arc; -use types::ChainSpec; - -fn in_memory_test_stores() -> ( - Arc, - Arc>, - Arc>, -) { - let db = Arc::new(MemoryDB::open()); - let block_store = Arc::new(BeaconBlockStore::new(db.clone())); - let state_store = Arc::new(BeaconStateStore::new(db.clone())); - (db, block_store, state_store) -} - -fn in_memory_test_chain( - spec: ChainSpec, -) -> (Arc, BeaconChain) { - let (db, block_store, state_store) = in_memory_test_stores(); - let slot_clock = TestingSlotClock::new(0); - - let chain = BeaconChain::genesis(state_store, block_store, slot_clock, spec); - (db, chain.unwrap()) -} - -#[test] -fn it_constructs() { - let (_db, _chain) = in_memory_test_chain(ChainSpec::foundation()); -} - -#[test] -fn it_produces() { - let (_db, mut chain) = in_memory_test_chain(ChainSpec::foundation()); - let (_block, _state) = chain.produce_block().unwrap(); -} - -#[test] -fn it_processes_a_block_it_produces() { - let (_db, mut chain) = in_memory_test_chain(ChainSpec::foundation()); - let (block, _state) = chain.produce_block().unwrap(); - let (outcome, new_block_hash) = chain.process_block(&block).unwrap(); - assert_eq!(outcome, BlockProcessingOutcome::Processed); - assert_eq!(chain.canonical_leaf_block, new_block_hash); -} diff --git a/beacon_node/src/beacon_chain/transition.rs b/beacon_node/src/beacon_chain/transition.rs deleted file mode 100644 index df434cc0c..000000000 --- a/beacon_node/src/beacon_chain/transition.rs +++ /dev/null @@ -1,29 +0,0 @@ -use super::BeaconChain; -use db::ClientDB; -use state_transition::{extend_active_state, StateTransitionError}; -use types::{ActiveState, BeaconBlock, CrystallizedState, Hash256}; - -impl BeaconChain -where - T: ClientDB + Sized, -{ - pub(crate) fn transition_states( - &self, - act_state: &ActiveState, - cry_state: &CrystallizedState, - block: &BeaconBlock, - block_hash: &Hash256, - ) -> Result<(ActiveState, Option), StateTransitionError> { - let state_recalc_distance = block - .slot - .checked_sub(cry_state.last_state_recalculation_slot) - .ok_or(StateTransitionError::BlockSlotBeforeRecalcSlot)?; - - if state_recalc_distance >= u64::from(self.spec.epoch_length) { - panic!("Not implemented!") - } else { - let new_act_state = extend_active_state(act_state, block, block_hash)?; - Ok((new_act_state, None)) - } - } -} diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 08660cbc4..25239a9f6 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -7,8 +7,16 @@ use std::path::PathBuf; use crate::config::LighthouseConfig; use crate::rpc::start_server; +use beacon_chain::BeaconChain; use clap::{App, Arg}; +use db::{ + stores::{BeaconBlockStore, BeaconStateStore}, + MemoryDB, +}; use slog::{error, info, o, Drain}; +use slot_clock::SystemTimeSlotClock; +use std::sync::Arc; +use types::ChainSpec; fn main() { let decorator = slog_term::TermDecorator::new().build(); @@ -58,6 +66,23 @@ fn main() { "data_dir" => &config.data_dir.to_str(), "port" => &config.p2p_listen_port); + // Specification (presently fixed to foundation). + let spec = ChainSpec::foundation(); + + // Database (presently in-memory) + let db = Arc::new(MemoryDB::open()); + let block_store = Arc::new(BeaconBlockStore::new(db.clone())); + let state_store = Arc::new(BeaconStateStore::new(db.clone())); + + // Slot clock + let slot_clock = SystemTimeSlotClock::new(spec.genesis_time, spec.slot_duration) + .expect("Unable to load SystemTimeSlotClock"); + + // Genesis chain + // TODO: persist chain to storage. + let _chain_result = + BeaconChain::genesis(state_store.clone(), block_store.clone(), slot_clock, spec); + let _server = start_server(log.clone()); loop { diff --git a/eth2/attestation_validation/Cargo.toml b/eth2/attestation_validation/Cargo.toml deleted file mode 100644 index b944f90f9..000000000 --- a/eth2/attestation_validation/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "attestation_validation" -version = "0.1.0" -authors = ["Paul Hauner "] -edition = "2018" - -[dependencies] -bls = { path = "../utils/bls" } -db = { path = "../../beacon_node/db" } -hashing = { path = "../utils/hashing" } -ssz = { path = "../utils/ssz" } -types = { path = "../types" } diff --git a/eth2/attestation_validation/src/block_inclusion.rs b/eth2/attestation_validation/src/block_inclusion.rs deleted file mode 100644 index 76a5c9797..000000000 --- a/eth2/attestation_validation/src/block_inclusion.rs +++ /dev/null @@ -1,246 +0,0 @@ -use super::{Error, Invalid, Outcome}; - -/// Check that an attestation is valid to be included in some block. -pub fn validate_attestation_for_block( - attestation_slot: u64, - block_slot: u64, - parent_block_slot: u64, - min_attestation_inclusion_delay: u64, - epoch_length: u64, -) -> Result { - /* - * There is a delay before an attestation may be included in a block, quantified by - * `slots` and defined as `min_attestation_inclusion_delay`. - * - * So, an attestation must be at least `min_attestation_inclusion_delay` slots "older" than the - * block it is contained in. - */ - verify_or!( - // TODO: this differs from the spec as it does not handle underflows correctly. - // https://github.com/sigp/lighthouse/issues/95 - attestation_slot < block_slot.saturating_sub(min_attestation_inclusion_delay - 1), - reject!(Invalid::AttestationTooRecent) - ); - - /* - * A block may not include attestations reference slots more than an epoch length + 1 prior to - * the block slot. - */ - verify_or!( - attestation_slot >= parent_block_slot.saturating_sub(epoch_length + 1), - reject!(Invalid::AttestationTooOld) - ); - - accept!() -} - -#[cfg(test)] -mod tests { - use super::*; - - /* - * Invalid::AttestationTooOld tests. - */ - - #[test] - fn test_inclusion_too_old_minimal() { - let min_attestation_inclusion_delay = 10; - let epoch_length = 20; - let block_slot = 100; - let parent_block_slot = block_slot - 1; - let attestation_slot = block_slot - min_attestation_inclusion_delay; - - let outcome = validate_attestation_for_block( - attestation_slot, - block_slot, - parent_block_slot, - min_attestation_inclusion_delay, - epoch_length, - ); - assert_eq!(outcome, Ok(Outcome::Valid)); - } - - #[test] - fn test_inclusion_too_old_maximal() { - let min_attestation_inclusion_delay = 10; - let epoch_length = 20; - let block_slot = 100; - let parent_block_slot = block_slot - 1; - let attestation_slot = block_slot - epoch_length + 1; - - let outcome = validate_attestation_for_block( - attestation_slot, - block_slot, - parent_block_slot, - min_attestation_inclusion_delay, - epoch_length, - ); - assert_eq!(outcome, Ok(Outcome::Valid)); - } - - #[test] - fn test_inclusion_too_old_saturating_non_zero_attestation_slot() { - let min_attestation_inclusion_delay = 10; - let epoch_length = 20; - let block_slot = epoch_length + 1; - let parent_block_slot = block_slot - 1; - let attestation_slot = block_slot - min_attestation_inclusion_delay; - - let outcome = validate_attestation_for_block( - attestation_slot, - block_slot, - parent_block_slot, - min_attestation_inclusion_delay, - epoch_length, - ); - assert_eq!(outcome, Ok(Outcome::Valid)); - } - - #[test] - fn test_inclusion_too_old_saturating_zero_attestation_slot() { - let min_attestation_inclusion_delay = 10; - let epoch_length = 20; - let block_slot = epoch_length + 1; - let parent_block_slot = block_slot - 1; - let attestation_slot = 0; - - let outcome = validate_attestation_for_block( - attestation_slot, - block_slot, - parent_block_slot, - min_attestation_inclusion_delay, - epoch_length, - ); - assert_eq!(outcome, Ok(Outcome::Valid)); - } - - #[test] - fn test_inclusion_too_old() { - let min_attestation_inclusion_delay = 10; - let epoch_length = 20; - let block_slot = epoch_length * 2; - let parent_block_slot = block_slot - 1; - let attestation_slot = parent_block_slot - (epoch_length + 2); - - let outcome = validate_attestation_for_block( - attestation_slot, - block_slot, - parent_block_slot, - min_attestation_inclusion_delay, - epoch_length, - ); - assert_eq!(outcome, Ok(Outcome::Invalid(Invalid::AttestationTooOld))); - } - - /* - * Invalid::AttestationTooRecent tests. - */ - - #[test] - fn test_inclusion_too_recent_minimal() { - let parent_block_slot = 99; - let min_attestation_inclusion_delay = 10; - let epoch_length = 20; - let block_slot = 100; - let attestation_slot = block_slot - min_attestation_inclusion_delay; - - let outcome = validate_attestation_for_block( - attestation_slot, - block_slot, - parent_block_slot, - min_attestation_inclusion_delay, - epoch_length, - ); - assert_eq!(outcome, Ok(Outcome::Valid)); - } - - #[test] - fn test_inclusion_too_recent_maximal() { - let parent_block_slot = 99; - let min_attestation_inclusion_delay = 10; - let epoch_length = 20; - let block_slot = 100; - let attestation_slot = block_slot - epoch_length; - - let outcome = validate_attestation_for_block( - attestation_slot, - block_slot, - parent_block_slot, - min_attestation_inclusion_delay, - epoch_length, - ); - assert_eq!(outcome, Ok(Outcome::Valid)); - } - - #[test] - fn test_inclusion_too_recent_insufficient() { - let parent_block_slot = 99; - let min_attestation_inclusion_delay = 10; - let epoch_length = 20; - let block_slot = 100; - let attestation_slot = block_slot - (min_attestation_inclusion_delay - 1); - - let outcome = validate_attestation_for_block( - attestation_slot, - block_slot, - parent_block_slot, - min_attestation_inclusion_delay, - epoch_length, - ); - assert_eq!(outcome, Ok(Outcome::Invalid(Invalid::AttestationTooRecent))); - } - - #[test] - fn test_inclusion_too_recent_first_possible_slot() { - let min_attestation_inclusion_delay = 10; - let epoch_length = 20; - let block_slot = min_attestation_inclusion_delay; - let attestation_slot = 0; - let parent_block_slot = block_slot - 1; - - let outcome = validate_attestation_for_block( - attestation_slot, - block_slot, - parent_block_slot, - min_attestation_inclusion_delay, - epoch_length, - ); - assert_eq!(outcome, Ok(Outcome::Valid)); - } - - #[test] - fn test_inclusion_too_recent_saturation_non_zero_slot() { - let min_attestation_inclusion_delay = 10; - let epoch_length = 20; - let block_slot = min_attestation_inclusion_delay - 1; - let parent_block_slot = block_slot - 1; - let attestation_slot = 0; - - let outcome = validate_attestation_for_block( - attestation_slot, - block_slot, - parent_block_slot, - min_attestation_inclusion_delay, - epoch_length, - ); - assert_eq!(outcome, Ok(Outcome::Invalid(Invalid::AttestationTooRecent))); - } - - #[test] - fn test_inclusion_too_recent_saturation_zero_slot() { - let min_attestation_inclusion_delay = 10; - let epoch_length = 20; - let block_slot = min_attestation_inclusion_delay - 1; - let parent_block_slot = block_slot - 1; - let attestation_slot = 0; - - let outcome = validate_attestation_for_block( - attestation_slot, - block_slot, - parent_block_slot, - min_attestation_inclusion_delay, - epoch_length, - ); - assert_eq!(outcome, Ok(Outcome::Invalid(Invalid::AttestationTooRecent))); - } -} diff --git a/eth2/attestation_validation/src/enums.rs b/eth2/attestation_validation/src/enums.rs deleted file mode 100644 index 6c94c628c..000000000 --- a/eth2/attestation_validation/src/enums.rs +++ /dev/null @@ -1,37 +0,0 @@ -/// Reasons why an `AttestationRecord` can be invalid. -#[derive(PartialEq, Debug)] -pub enum Invalid { - AttestationTooRecent, - AttestationTooOld, - JustifiedSlotImpermissable, - JustifiedBlockNotInChain, - JustifiedBlockHashMismatch, - UnknownShard, - ShardBlockHashMismatch, - SignatureInvalid, -} - -/// The outcome of validating the `AttestationRecord`. -/// -/// Distinct from the `Error` enum as an `Outcome` indicates that validation executed sucessfully -/// and determined the validity `AttestationRecord`. -#[derive(PartialEq, Debug)] -pub enum Outcome { - Valid, - Invalid(Invalid), -} - -/// Errors that prevent this function from correctly validating the `AttestationRecord`. -/// -/// Distinct from the `Outcome` enum as `Errors` indicate that validation encountered an unexpected -/// condition and was unable to perform its duty. -#[derive(PartialEq, Debug)] -pub enum Error { - BlockHasNoParent, - BadValidatorIndex, - UnableToLookupBlockAtSlot, - OutOfBoundsBitfieldIndex, - PublicKeyCorrupt, - NoPublicKeyForValidator, - DBError(String), -} diff --git a/eth2/attestation_validation/src/justified_block.rs b/eth2/attestation_validation/src/justified_block.rs deleted file mode 100644 index c4c3baa1f..000000000 --- a/eth2/attestation_validation/src/justified_block.rs +++ /dev/null @@ -1,80 +0,0 @@ -use super::db::stores::{BeaconBlockAtSlotError, BeaconBlockStore}; -use super::db::ClientDB; -use super::types::AttestationData; -use super::types::Hash256; -use super::{Error, Invalid, Outcome}; -use std::sync::Arc; - -/// Verify that a attestation's `data.justified_block_hash` matches the local hash of the block at the -/// attestation's `data.justified_slot`. -/// -/// `chain_tip_block_hash` is the tip of the chain in which the justified block hash should exist -/// locally. As Lightouse stores multiple chains locally, it is possible to have multiple blocks at -/// the same slot. `chain_tip_block_hash` serves to restrict the lookup to a single chain, where -/// each slot may have exactly zero or one blocks. -pub fn validate_attestation_justified_block_hash( - data: &AttestationData, - chain_tip_block_hash: &Hash256, - block_store: &Arc>, -) -> Result -where - T: ClientDB + Sized, -{ - /* - * The `justified_block_hash` in the attestation must match exactly the hash of the block at - * that slot in the local chain. - * - * This condition also infers that the `justified_slot` specified in attestation must exist - * locally. - */ - match block_hash_at_slot(chain_tip_block_hash, data.justified_slot, block_store)? { - None => reject!(Invalid::JustifiedBlockNotInChain), - Some(local_justified_block_hash) => { - verify_or!( - data.justified_block_root == local_justified_block_hash, - reject!(Invalid::JustifiedBlockHashMismatch) - ); - } - }; - accept!() -} - -/// Returns the hash (or None) of a block at a slot in the chain that is specified by -/// `chain_tip_hash`. -/// -/// Given that the database stores multiple chains, it is possible for there to be multiple blocks -/// at the given slot. `chain_tip_hash` specifies exactly which chain should be used. -fn block_hash_at_slot( - chain_tip_hash: &Hash256, - slot: u64, - block_store: &Arc>, -) -> Result, Error> -where - T: ClientDB + Sized, -{ - match block_store.block_at_slot(&chain_tip_hash, slot)? { - None => Ok(None), - Some((hash_bytes, _)) => Ok(Some(Hash256::from(&hash_bytes[..]))), - } -} - -impl From for Error { - fn from(e: BeaconBlockAtSlotError) -> Self { - match e { - BeaconBlockAtSlotError::DBError(s) => Error::DBError(s), - _ => Error::UnableToLookupBlockAtSlot, - } - } -} - -#[cfg(test)] -mod tests { - /* - * TODO: Implement tests. - * - * These tests will require the `BeaconBlock` and `BeaconBlockBody` updates, which are not - * yet included in the code base. Adding tests now will result in duplicated work. - * - * https://github.com/sigp/lighthouse/issues/97 - */ -} diff --git a/eth2/attestation_validation/src/justified_slot.rs b/eth2/attestation_validation/src/justified_slot.rs deleted file mode 100644 index ea71f2616..000000000 --- a/eth2/attestation_validation/src/justified_slot.rs +++ /dev/null @@ -1,39 +0,0 @@ -use super::types::{AttestationData, BeaconState}; -use super::{Error, Invalid, Outcome}; - -/// Verify that an attestation's `data.justified_slot` matches the justified slot known to the -/// `state`. -/// -/// In the case that an attestation references a slot _before_ the latest state transition, is -/// acceptable for the attestation to reference the previous known `justified_slot`. If this were -/// not the case, all attestations created _prior_ to the last state recalculation would be rejected -/// if a block was justified in that state recalculation. It is both ideal and likely that blocks -/// will be justified during a state recalcuation. -pub fn validate_attestation_justified_slot( - data: &AttestationData, - state: &BeaconState, - epoch_length: u64, -) -> Result { - let permissable_justified_slot = if data.slot >= state.slot - (state.slot % epoch_length) { - state.justified_slot - } else { - state.previous_justified_slot - }; - verify_or!( - data.justified_slot == permissable_justified_slot, - reject!(Invalid::JustifiedSlotImpermissable) - ); - accept!() -} - -#[cfg(test)] -mod tests { - /* - * TODO: Implement tests. - * - * These tests will require the `BeaconBlock` and `BeaconBlockBody` updates, which are not - * yet included in the code base. Adding tests now will result in duplicated work. - * - * https://github.com/sigp/lighthouse/issues/97 - */ -} diff --git a/eth2/attestation_validation/src/lib.rs b/eth2/attestation_validation/src/lib.rs deleted file mode 100644 index 825371ed0..000000000 --- a/eth2/attestation_validation/src/lib.rs +++ /dev/null @@ -1,22 +0,0 @@ -extern crate bls; -extern crate db; -extern crate hashing; -extern crate ssz; -extern crate types; - -#[macro_use] -mod macros; - -mod block_inclusion; -mod enums; -mod justified_block; -mod justified_slot; -mod shard_block; -mod signature; - -pub use crate::block_inclusion::validate_attestation_for_block; -pub use crate::enums::{Error, Invalid, Outcome}; -pub use crate::justified_block::validate_attestation_justified_block_hash; -pub use crate::justified_slot::validate_attestation_justified_slot; -pub use crate::shard_block::validate_attestation_data_shard_block_hash; -pub use crate::signature::validate_attestation_signature; diff --git a/eth2/attestation_validation/src/macros.rs b/eth2/attestation_validation/src/macros.rs deleted file mode 100644 index faae00fcf..000000000 --- a/eth2/attestation_validation/src/macros.rs +++ /dev/null @@ -1,19 +0,0 @@ -macro_rules! verify_or { - ($condition: expr, $result: expr) => { - if !$condition { - $result - } - }; -} - -macro_rules! reject { - ($result: expr) => { - return Ok(Outcome::Invalid($result)); - }; -} - -macro_rules! accept { - () => { - Ok(Outcome::Valid) - }; -} diff --git a/eth2/attestation_validation/src/shard_block.rs b/eth2/attestation_validation/src/shard_block.rs deleted file mode 100644 index ac5b0a067..000000000 --- a/eth2/attestation_validation/src/shard_block.rs +++ /dev/null @@ -1,46 +0,0 @@ -use super::db::ClientDB; -use super::types::{AttestationData, BeaconState}; -use super::{Error, Invalid, Outcome}; - -/// Check that an attestation is valid with reference to some state. -pub fn validate_attestation_data_shard_block_hash( - data: &AttestationData, - state: &BeaconState, -) -> Result -where - T: ClientDB + Sized, -{ - /* - * The `shard_block_hash` in the state's `latest_crosslinks` must match either the - * `latest_crosslink_hash` or the `shard_block_hash` on the attestation. - * - * TODO: figure out the reasoning behind this. - */ - match state.latest_crosslinks.get(data.shard as usize) { - None => reject!(Invalid::UnknownShard), - Some(crosslink) => { - let local_shard_block_hash = crosslink.shard_block_root; - let shard_block_hash_is_permissable = { - (local_shard_block_hash == data.latest_crosslink_root) - || (local_shard_block_hash == data.shard_block_root) - }; - verify_or!( - shard_block_hash_is_permissable, - reject!(Invalid::ShardBlockHashMismatch) - ); - } - }; - accept!() -} - -#[cfg(test)] -mod tests { - /* - * TODO: Implement tests. - * - * These tests will require the `BeaconBlock` and `BeaconBlockBody` updates, which are not - * yet included in the code base. Adding tests now will result in duplicated work. - * - * https://github.com/sigp/lighthouse/issues/97 - */ -} diff --git a/eth2/attestation_validation/src/signature.rs b/eth2/attestation_validation/src/signature.rs deleted file mode 100644 index 7d08be337..000000000 --- a/eth2/attestation_validation/src/signature.rs +++ /dev/null @@ -1,151 +0,0 @@ -use super::bls::{AggregatePublicKey, AggregateSignature}; -use super::db::stores::{ValidatorStore, ValidatorStoreError}; -use super::db::ClientDB; -use super::types::{AttestationData, Bitfield, BitfieldError}; -use super::{Error, Invalid, Outcome}; - -/// Validate that some signature is correct for some attestation data and known validator set. -pub fn validate_attestation_signature( - attestation_data: &AttestationData, - participation_bitfield: &Bitfield, - aggregate_signature: &AggregateSignature, - attestation_indices: &[usize], - validator_store: &ValidatorStore, -) -> Result -where - T: ClientDB + Sized, -{ - let mut agg_pub_key = AggregatePublicKey::new(); - - for i in 0..attestation_indices.len() { - let voted = participation_bitfield.get(i)?; - if voted { - // De-reference the attestation index into a canonical ValidatorRecord index. - let validator = *attestation_indices.get(i).ok_or(Error::BadValidatorIndex)?; - // Load the public key. - let pub_key = validator_store - .get_public_key_by_index(validator)? - .ok_or(Error::NoPublicKeyForValidator)?; - // Aggregate the public key. - agg_pub_key.add(&pub_key.as_raw()); - } - } - - let signed_message = attestation_data_signing_message(attestation_data); - verify_or!( - // TODO: ensure "domain" for aggregate signatures is included. - // https://github.com/sigp/lighthouse/issues/91 - aggregate_signature.verify(&signed_message, &agg_pub_key), - reject!(Invalid::SignatureInvalid) - ); - - accept!() -} - -fn attestation_data_signing_message(attestation_data: &AttestationData) -> Vec { - let mut signed_message = attestation_data.canonical_root().to_vec(); - signed_message.append(&mut vec![0]); - signed_message -} - -impl From for Error { - fn from(error: ValidatorStoreError) -> Self { - match error { - ValidatorStoreError::DBError(s) => Error::DBError(s), - ValidatorStoreError::DecodeError => Error::PublicKeyCorrupt, - } - } -} - -impl From for Error { - fn from(_error: BitfieldError) -> Self { - Error::OutOfBoundsBitfieldIndex - } -} - -#[cfg(test)] -mod tests { - use super::super::bls::{Keypair, Signature}; - use super::super::db::MemoryDB; - use super::*; - use std::sync::Arc; - - /* - * TODO: Test cases are not comprehensive. - * https://github.com/sigp/lighthouse/issues/94 - */ - - #[test] - fn test_signature_verification() { - let attestation_data = AttestationData::zero(); - let message = attestation_data_signing_message(&attestation_data); - let signing_keypairs = vec![ - Keypair::random(), - Keypair::random(), - Keypair::random(), - Keypair::random(), - Keypair::random(), - Keypair::random(), - ]; - let non_signing_keypairs = vec![ - Keypair::random(), - Keypair::random(), - Keypair::random(), - Keypair::random(), - Keypair::random(), - Keypair::random(), - ]; - /* - * Signing keypairs first, then non-signing - */ - let mut all_keypairs = signing_keypairs.clone(); - all_keypairs.append(&mut non_signing_keypairs.clone()); - - let attestation_indices: Vec = (0..all_keypairs.len()).collect(); - let mut bitfield = Bitfield::from_elem(all_keypairs.len(), false); - for i in 0..signing_keypairs.len() { - bitfield.set(i, true).unwrap(); - } - - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db); - - for (i, keypair) in all_keypairs.iter().enumerate() { - store.put_public_key_by_index(i, &keypair.pk).unwrap(); - } - - let mut agg_sig = AggregateSignature::new(); - for keypair in &signing_keypairs { - let sig = Signature::new(&message, &keypair.sk); - agg_sig.add(&sig); - } - - /* - * Test using all valid parameters. - */ - let outcome = validate_attestation_signature( - &attestation_data, - &bitfield, - &agg_sig, - &attestation_indices, - &store, - ) - .unwrap(); - assert_eq!(outcome, Outcome::Valid); - - /* - * Add another validator to the bitfield, run validation will all other - * parameters the same and assert that it fails. - */ - bitfield.set(signing_keypairs.len() + 1, true).unwrap(); - let outcome = validate_attestation_signature( - &attestation_data, - &bitfield, - &agg_sig, - &attestation_indices, - &store, - ) - .unwrap(); - assert_eq!(outcome, Outcome::Invalid(Invalid::SignatureInvalid)); - } -} diff --git a/eth2/attester/Cargo.toml b/eth2/attester/Cargo.toml new file mode 100644 index 000000000..956ecf565 --- /dev/null +++ b/eth2/attester/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "attester" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +slot_clock = { path = "../../eth2/utils/slot_clock" } +ssz = { path = "../../eth2/utils/ssz" } +types = { path = "../../eth2/types" } diff --git a/eth2/attester/src/lib.rs b/eth2/attester/src/lib.rs new file mode 100644 index 000000000..4dda24570 --- /dev/null +++ b/eth2/attester/src/lib.rs @@ -0,0 +1,250 @@ +pub mod test_utils; +mod traits; + +use slot_clock::SlotClock; +use std::sync::Arc; +use types::{AttestationData, FreeAttestation, Signature}; + +pub use self::traits::{ + BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer, +}; + +const PHASE_0_CUSTODY_BIT: bool = false; + +#[derive(Debug, PartialEq)] +pub enum PollOutcome { + AttestationProduced(u64), + AttestationNotRequired(u64), + SlashableAttestationNotProduced(u64), + BeaconNodeUnableToProduceAttestation(u64), + ProducerDutiesUnknown(u64), + SlotAlreadyProcessed(u64), + SignerRejection(u64), + ValidatorIsUnknown(u64), +} + +#[derive(Debug, PartialEq)] +pub enum Error { + SlotClockError, + SlotUnknowable, + EpochMapPoisoned, + SlotClockPoisoned, + EpochLengthIsZero, + BeaconNodeError(BeaconNodeError), +} + +/// A polling state machine which performs block production duties, based upon some epoch duties +/// (`EpochDutiesMap`) and a concept of time (`SlotClock`). +/// +/// Ensures that messages are not slashable. +/// +/// Relies upon an external service to keep the `EpochDutiesMap` updated. +pub struct Attester { + pub last_processed_slot: Option, + duties: Arc, + slot_clock: Arc, + beacon_node: Arc, + signer: Arc, +} + +impl Attester { + /// Returns a new instance where `last_processed_slot == 0`. + pub fn new(duties: Arc, slot_clock: Arc, beacon_node: Arc, signer: Arc) -> Self { + Self { + last_processed_slot: None, + duties, + slot_clock, + beacon_node, + signer, + } + } +} + +impl Attester { + /// Poll the `BeaconNode` and produce an attestation if required. + pub fn poll(&mut self) -> Result { + let slot = self + .slot_clock + .present_slot() + .map_err(|_| Error::SlotClockError)? + .ok_or(Error::SlotUnknowable)?; + + if !self.is_processed_slot(slot) { + self.last_processed_slot = Some(slot); + + let shard = match self.duties.attestation_shard(slot) { + Ok(Some(result)) => result, + Ok(None) => return Ok(PollOutcome::AttestationNotRequired(slot)), + Err(DutiesReaderError::UnknownEpoch) => { + return Ok(PollOutcome::ProducerDutiesUnknown(slot)); + } + Err(DutiesReaderError::UnknownValidator) => { + return Ok(PollOutcome::ValidatorIsUnknown(slot)); + } + Err(DutiesReaderError::EpochLengthIsZero) => return Err(Error::EpochLengthIsZero), + Err(DutiesReaderError::Poisoned) => return Err(Error::EpochMapPoisoned), + }; + + self.produce_attestation(slot, shard) + } else { + Ok(PollOutcome::SlotAlreadyProcessed(slot)) + } + } + + fn produce_attestation(&mut self, slot: u64, shard: u64) -> Result { + let attestation_data = match self.beacon_node.produce_attestation_data(slot, shard)? { + Some(attestation_data) => attestation_data, + None => return Ok(PollOutcome::BeaconNodeUnableToProduceAttestation(slot)), + }; + + if !self.safe_to_produce(&attestation_data) { + return Ok(PollOutcome::SlashableAttestationNotProduced(slot)); + } + + let signature = match self.sign_attestation_data(&attestation_data) { + Some(signature) => signature, + None => return Ok(PollOutcome::SignerRejection(slot)), + }; + + let validator_index = match self.duties.validator_index() { + Some(validator_index) => validator_index, + None => return Ok(PollOutcome::ValidatorIsUnknown(slot)), + }; + + let free_attestation = FreeAttestation { + data: attestation_data, + signature, + validator_index, + }; + + self.beacon_node + .publish_attestation_data(free_attestation)?; + Ok(PollOutcome::AttestationProduced(slot)) + } + + fn is_processed_slot(&self, slot: u64) -> bool { + match self.last_processed_slot { + Some(processed_slot) if slot <= processed_slot => true, + _ => false, + } + } + + /// Consumes a block, returning that block signed by the validators private key. + /// + /// Important: this function will not check to ensure the block is not slashable. This must be + /// done upstream. + fn sign_attestation_data(&mut self, attestation_data: &AttestationData) -> Option { + self.store_produce(attestation_data); + + self.signer + .sign_attestation_message(&attestation_data.signable_message(PHASE_0_CUSTODY_BIT)[..]) + } + + /// Returns `true` if signing some attestation_data is safe (non-slashable). + /// + /// !!! UNSAFE !!! + /// + /// Important: this function is presently stubbed-out. It provides ZERO SAFETY. + fn safe_to_produce(&self, _attestation_data: &AttestationData) -> bool { + // TODO: ensure the producer doesn't produce slashable blocks. + // https://github.com/sigp/lighthouse/issues/160 + true + } + + /// Record that a block was produced so that slashable votes may not be made in the future. + /// + /// !!! UNSAFE !!! + /// + /// Important: this function is presently stubbed-out. It provides ZERO SAFETY. + fn store_produce(&mut self, _block: &AttestationData) { + // TODO: record this block production to prevent future slashings. + // https://github.com/sigp/lighthouse/issues/160 + } +} + +impl From for Error { + fn from(e: BeaconNodeError) -> Error { + Error::BeaconNodeError(e) + } +} + +#[cfg(test)] +mod tests { + use super::test_utils::{EpochMap, LocalSigner, SimulatedBeaconNode}; + use super::*; + use slot_clock::TestingSlotClock; + use types::{ + test_utils::{SeedableRng, TestRandom, XorShiftRng}, + ChainSpec, Keypair, + }; + + // TODO: implement more thorough testing. + // https://github.com/sigp/lighthouse/issues/160 + // + // These tests should serve as a good example for future tests. + + #[test] + pub fn polling() { + let mut rng = XorShiftRng::from_seed([42; 16]); + + let spec = Arc::new(ChainSpec::foundation()); + let slot_clock = Arc::new(TestingSlotClock::new(0)); + let beacon_node = Arc::new(SimulatedBeaconNode::default()); + let signer = Arc::new(LocalSigner::new(Keypair::random())); + + let mut duties = EpochMap::new(spec.epoch_length); + let attest_slot = 100; + let attest_epoch = attest_slot / spec.epoch_length; + let attest_shard = 12; + duties.insert_attestation_shard(attest_slot, attest_shard); + duties.set_validator_index(Some(2)); + let duties = Arc::new(duties); + + let mut attester = Attester::new( + duties.clone(), + slot_clock.clone(), + beacon_node.clone(), + signer.clone(), + ); + + // Configure responses from the BeaconNode. + beacon_node.set_next_produce_result(Ok(Some(AttestationData::random_for_test(&mut rng)))); + beacon_node.set_next_publish_result(Ok(PublishOutcome::ValidAttestation)); + + // One slot before attestation slot... + slot_clock.set_slot(attest_slot - 1); + assert_eq!( + attester.poll(), + Ok(PollOutcome::AttestationNotRequired(attest_slot - 1)) + ); + + // On the attest slot... + slot_clock.set_slot(attest_slot); + assert_eq!( + attester.poll(), + Ok(PollOutcome::AttestationProduced(attest_slot)) + ); + + // Trying the same attest slot again... + slot_clock.set_slot(attest_slot); + assert_eq!( + attester.poll(), + Ok(PollOutcome::SlotAlreadyProcessed(attest_slot)) + ); + + // One slot after the attest slot... + slot_clock.set_slot(attest_slot + 1); + assert_eq!( + attester.poll(), + Ok(PollOutcome::AttestationNotRequired(attest_slot + 1)) + ); + + // In an epoch without known duties... + let slot = (attest_epoch + 1) * spec.epoch_length; + slot_clock.set_slot(slot); + assert_eq!( + attester.poll(), + Ok(PollOutcome::ProducerDutiesUnknown(slot)) + ); + } +} diff --git a/eth2/attester/src/test_utils/epoch_map.rs b/eth2/attester/src/test_utils/epoch_map.rs new file mode 100644 index 000000000..88e36c93c --- /dev/null +++ b/eth2/attester/src/test_utils/epoch_map.rs @@ -0,0 +1,44 @@ +use crate::{DutiesReader, DutiesReaderError}; +use std::collections::HashMap; + +pub struct EpochMap { + epoch_length: u64, + validator_index: Option, + map: HashMap, +} + +impl EpochMap { + pub fn new(epoch_length: u64) -> Self { + Self { + epoch_length, + validator_index: None, + map: HashMap::new(), + } + } + + pub fn insert_attestation_shard(&mut self, slot: u64, shard: u64) { + let epoch = slot / self.epoch_length; + + self.map.insert(epoch, (slot, shard)); + } + + pub fn set_validator_index(&mut self, index: Option) { + self.validator_index = index; + } +} + +impl DutiesReader for EpochMap { + fn attestation_shard(&self, slot: u64) -> Result, DutiesReaderError> { + let epoch = slot / self.epoch_length; + + match self.map.get(&epoch) { + Some((attest_slot, attest_shard)) if *attest_slot == slot => Ok(Some(*attest_shard)), + Some((attest_slot, _attest_shard)) if *attest_slot != slot => Ok(None), + _ => Err(DutiesReaderError::UnknownEpoch), + } + } + + fn validator_index(&self) -> Option { + self.validator_index + } +} diff --git a/eth2/attester/src/test_utils/local_signer.rs b/eth2/attester/src/test_utils/local_signer.rs new file mode 100644 index 000000000..c256d1050 --- /dev/null +++ b/eth2/attester/src/test_utils/local_signer.rs @@ -0,0 +1,31 @@ +use crate::traits::Signer; +use std::sync::RwLock; +use types::{Keypair, Signature}; + +/// A test-only struct used to simulate a Beacon Node. +pub struct LocalSigner { + keypair: Keypair, + should_sign: RwLock, +} + +impl LocalSigner { + /// Produce a new LocalSigner with signing enabled by default. + pub fn new(keypair: Keypair) -> Self { + Self { + keypair, + should_sign: RwLock::new(true), + } + } + + /// If set to `false`, the service will refuse to sign all messages. Otherwise, all messages + /// will be signed. + pub fn enable_signing(&self, enabled: bool) { + *self.should_sign.write().unwrap() = enabled; + } +} + +impl Signer for LocalSigner { + fn sign_attestation_message(&self, message: &[u8]) -> Option { + Some(Signature::new(message, &self.keypair.sk)) + } +} diff --git a/eth2/attester/src/test_utils/mod.rs b/eth2/attester/src/test_utils/mod.rs new file mode 100644 index 000000000..481247dd0 --- /dev/null +++ b/eth2/attester/src/test_utils/mod.rs @@ -0,0 +1,7 @@ +mod epoch_map; +mod local_signer; +mod simulated_beacon_node; + +pub use self::epoch_map::EpochMap; +pub use self::local_signer::LocalSigner; +pub use self::simulated_beacon_node::SimulatedBeaconNode; diff --git a/eth2/attester/src/test_utils/simulated_beacon_node.rs b/eth2/attester/src/test_utils/simulated_beacon_node.rs new file mode 100644 index 000000000..2f14c9fd3 --- /dev/null +++ b/eth2/attester/src/test_utils/simulated_beacon_node.rs @@ -0,0 +1,44 @@ +use crate::traits::{BeaconNode, BeaconNodeError, PublishOutcome}; +use std::sync::RwLock; +use types::{AttestationData, FreeAttestation}; + +type ProduceResult = Result, BeaconNodeError>; +type PublishResult = Result; + +/// A test-only struct used to simulate a Beacon Node. +#[derive(Default)] +pub struct SimulatedBeaconNode { + pub produce_input: RwLock>, + pub produce_result: RwLock>, + + pub publish_input: RwLock>, + pub publish_result: RwLock>, +} + +impl SimulatedBeaconNode { + pub fn set_next_produce_result(&self, result: ProduceResult) { + *self.produce_result.write().unwrap() = Some(result); + } + + pub fn set_next_publish_result(&self, result: PublishResult) { + *self.publish_result.write().unwrap() = Some(result); + } +} + +impl BeaconNode for SimulatedBeaconNode { + fn produce_attestation_data(&self, slot: u64, shard: u64) -> ProduceResult { + *self.produce_input.write().unwrap() = Some((slot, shard)); + match *self.produce_result.read().unwrap() { + Some(ref r) => r.clone(), + None => panic!("TestBeaconNode: produce_result == None"), + } + } + + fn publish_attestation_data(&self, free_attestation: FreeAttestation) -> PublishResult { + *self.publish_input.write().unwrap() = Some(free_attestation.clone()); + match *self.publish_result.read().unwrap() { + Some(ref r) => r.clone(), + None => panic!("TestBeaconNode: publish_result == None"), + } + } +} diff --git a/eth2/attester/src/traits.rs b/eth2/attester/src/traits.rs new file mode 100644 index 000000000..fd07fd171 --- /dev/null +++ b/eth2/attester/src/traits.rs @@ -0,0 +1,49 @@ +use types::{AttestationData, FreeAttestation, Signature}; + +#[derive(Debug, PartialEq, Clone)] +pub enum BeaconNodeError { + RemoteFailure(String), + DecodeFailure, +} + +#[derive(Debug, PartialEq, Clone)] +pub enum PublishOutcome { + ValidAttestation, + InvalidAttestation(String), +} + +/// Defines the methods required to produce and publish blocks on a Beacon Node. +pub trait BeaconNode: Send + Sync { + fn produce_attestation_data( + &self, + slot: u64, + shard: u64, + ) -> Result, BeaconNodeError>; + + fn publish_attestation_data( + &self, + free_attestation: FreeAttestation, + ) -> Result; +} + +#[derive(Debug, PartialEq, Clone)] +pub enum DutiesReaderError { + UnknownValidator, + UnknownEpoch, + EpochLengthIsZero, + Poisoned, +} + +/// Informs a validator of their duties (e.g., block production). +pub trait DutiesReader: Send + Sync { + /// Returns `Some(shard)` if this slot is an attestation slot. Otherwise, returns `None.` + fn attestation_shard(&self, slot: u64) -> Result, DutiesReaderError>; + + /// Returns `Some(shard)` if this slot is an attestation slot. Otherwise, returns `None.` + fn validator_index(&self) -> Option; +} + +/// Signs message using an internally-maintained private key. +pub trait Signer { + fn sign_attestation_message(&self, message: &[u8]) -> Option; +} diff --git a/eth2/block_producer/Cargo.toml b/eth2/block_producer/Cargo.toml new file mode 100644 index 000000000..86dde92f7 --- /dev/null +++ b/eth2/block_producer/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "block_producer" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +slot_clock = { path = "../../eth2/utils/slot_clock" } +ssz = { path = "../../eth2/utils/ssz" } +types = { path = "../../eth2/types" } diff --git a/validator_client/src/block_producer/mod.rs b/eth2/block_producer/src/lib.rs similarity index 59% rename from validator_client/src/block_producer/mod.rs rename to eth2/block_producer/src/lib.rs index e36d4ed80..8ed10ce1a 100644 --- a/validator_client/src/block_producer/mod.rs +++ b/eth2/block_producer/src/lib.rs @@ -1,16 +1,14 @@ -mod grpc; -mod service; -#[cfg(test)] -mod test_node; +pub mod test_utils; mod traits; -use self::traits::{BeaconNode, BeaconNodeError}; -use super::EpochDutiesMap; use slot_clock::SlotClock; -use std::sync::{Arc, RwLock}; -use types::{BeaconBlock, ChainSpec}; +use ssz::ssz_encode; +use std::sync::Arc; +use types::{BeaconBlock, ChainSpec, PublicKey}; -pub use self::service::BlockProducerService; +pub use self::traits::{ + BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer, +}; #[derive(Debug, PartialEq)] pub enum PollOutcome { @@ -26,6 +24,10 @@ pub enum PollOutcome { SlotAlreadyProcessed(u64), /// The Beacon Node was unable to produce a block at that slot. BeaconNodeUnableToProduceBlock(u64), + /// The signer failed to sign the message. + SignerRejection(u64), + /// The public key for this validator is not an active validator. + ValidatorIsUnknown(u64), } #[derive(Debug, PartialEq)] @@ -44,61 +46,65 @@ pub enum Error { /// Ensures that messages are not slashable. /// /// Relies upon an external service to keep the `EpochDutiesMap` updated. -pub struct BlockProducer { - pub last_processed_slot: u64, +pub struct BlockProducer { + pub last_processed_slot: Option, + pubkey: PublicKey, spec: Arc, - epoch_map: Arc>, - slot_clock: Arc>, + epoch_map: Arc, + slot_clock: Arc, beacon_node: Arc, + signer: Arc, } -impl BlockProducer { +impl BlockProducer { /// Returns a new instance where `last_processed_slot == 0`. pub fn new( spec: Arc, - epoch_map: Arc>, - slot_clock: Arc>, + pubkey: PublicKey, + epoch_map: Arc, + slot_clock: Arc, beacon_node: Arc, + signer: Arc, ) -> Self { Self { - last_processed_slot: 0, + last_processed_slot: None, + pubkey, spec, epoch_map, slot_clock, beacon_node, + signer, } } } -impl BlockProducer { +impl BlockProducer { /// "Poll" to see if the validator is required to take any action. /// /// The slot clock will be read and any new actions undertaken. pub fn poll(&mut self) -> Result { let slot = self .slot_clock - .read() - .map_err(|_| Error::SlotClockPoisoned)? .present_slot() .map_err(|_| Error::SlotClockError)? .ok_or(Error::SlotUnknowable)?; - let epoch = slot - .checked_div(self.spec.epoch_length) - .ok_or(Error::EpochLengthIsZero)?; - // If this is a new slot. - if slot > self.last_processed_slot { - let is_block_production_slot = { - let epoch_map = self.epoch_map.read().map_err(|_| Error::EpochMapPoisoned)?; - match epoch_map.get(&epoch) { - None => return Ok(PollOutcome::ProducerDutiesUnknown(slot)), - Some(duties) => duties.is_block_production_slot(slot), + if !self.is_processed_slot(slot) { + let is_block_production_slot = match self.epoch_map.is_block_production_slot(slot) { + Ok(result) => result, + Err(DutiesReaderError::UnknownEpoch) => { + return Ok(PollOutcome::ProducerDutiesUnknown(slot)); } + Err(DutiesReaderError::UnknownValidator) => { + return Ok(PollOutcome::ValidatorIsUnknown(slot)); + } + Err(DutiesReaderError::EpochLengthIsZero) => return Err(Error::EpochLengthIsZero), + Err(DutiesReaderError::Poisoned) => return Err(Error::EpochMapPoisoned), }; if is_block_production_slot { - self.last_processed_slot = slot; + self.last_processed_slot = Some(slot); self.produce_block(slot) } else { @@ -109,6 +115,13 @@ impl BlockProducer { } } + fn is_processed_slot(&self, slot: u64) -> bool { + match self.last_processed_slot { + Some(processed_slot) if processed_slot >= slot => true, + _ => false, + } + } + /// Produce a block at some slot. /// /// Assumes that a block is required at this slot (does not check the duties). @@ -120,11 +133,29 @@ impl BlockProducer { /// The slash-protection code is not yet implemented. There is zero protection against /// slashing. fn produce_block(&mut self, slot: u64) -> Result { - if let Some(block) = self.beacon_node.produce_beacon_block(slot)? { + let randao_reveal = { + let producer_nonce = self.beacon_node.proposer_nonce(&self.pubkey)?; + + // TODO: add domain, etc to this message. + let message = ssz_encode(&producer_nonce); + + match self.signer.sign_randao_reveal(&message) { + None => return Ok(PollOutcome::SignerRejection(slot)), + Some(signature) => signature, + } + }; + + if let Some(block) = self + .beacon_node + .produce_beacon_block(slot, &randao_reveal)? + { if self.safe_to_produce(&block) { - let block = self.sign_block(block); - self.beacon_node.publish_beacon_block(block)?; - Ok(PollOutcome::BlockProduced(slot)) + if let Some(block) = self.sign_block(block) { + self.beacon_node.publish_beacon_block(block)?; + Ok(PollOutcome::BlockProduced(slot)) + } else { + Ok(PollOutcome::SignerRejection(slot)) + } } else { Ok(PollOutcome::SlashableBlockNotProduced(slot)) } @@ -137,11 +168,19 @@ impl BlockProducer { /// /// Important: this function will not check to ensure the block is not slashable. This must be /// done upstream. - fn sign_block(&mut self, block: BeaconBlock) -> BeaconBlock { - // TODO: sign the block - // https://github.com/sigp/lighthouse/issues/160 + fn sign_block(&mut self, mut block: BeaconBlock) -> Option { self.store_produce(&block); - block + + match self + .signer + .sign_block_proposal(&block.proposal_root(&self.spec)[..]) + { + None => None, + Some(signature) => { + block.signature = signature; + Some(block) + } + } } /// Returns `true` if signing a block is safe (non-slashable). @@ -174,11 +213,13 @@ impl From for Error { #[cfg(test)] mod tests { - use super::test_node::TestBeaconNode; + use super::test_utils::{EpochMap, LocalSigner, SimulatedBeaconNode}; use super::*; - use crate::duties::EpochDuties; use slot_clock::TestingSlotClock; - use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use types::{ + test_utils::{SeedableRng, TestRandom, XorShiftRng}, + Keypair, + }; // TODO: implement more thorough testing. // https://github.com/sigp/lighthouse/issues/160 @@ -190,53 +231,54 @@ mod tests { let mut rng = XorShiftRng::from_seed([42; 16]); let spec = Arc::new(ChainSpec::foundation()); - let epoch_map = Arc::new(RwLock::new(EpochDutiesMap::new())); - let slot_clock = Arc::new(RwLock::new(TestingSlotClock::new(0))); - let beacon_node = Arc::new(TestBeaconNode::default()); + let slot_clock = Arc::new(TestingSlotClock::new(0)); + let beacon_node = Arc::new(SimulatedBeaconNode::default()); + let signer = Arc::new(LocalSigner::new(Keypair::random())); + + let mut epoch_map = EpochMap::new(spec.epoch_length); + let produce_slot = 100; + let produce_epoch = produce_slot / spec.epoch_length; + epoch_map.map.insert(produce_epoch, produce_slot); + let epoch_map = Arc::new(epoch_map); + let keypair = Keypair::random(); let mut block_producer = BlockProducer::new( spec.clone(), + keypair.pk.clone(), epoch_map.clone(), slot_clock.clone(), beacon_node.clone(), + signer.clone(), ); // Configure responses from the BeaconNode. beacon_node.set_next_produce_result(Ok(Some(BeaconBlock::random_for_test(&mut rng)))); - beacon_node.set_next_publish_result(Ok(true)); - - // Setup some valid duties for the validator - let produce_slot = 100; - let duties = EpochDuties { - block_production_slot: Some(produce_slot), - ..std::default::Default::default() - }; - let produce_epoch = produce_slot / spec.epoch_length; - epoch_map.write().unwrap().insert(produce_epoch, duties); + beacon_node.set_next_publish_result(Ok(PublishOutcome::ValidBlock)); + beacon_node.set_next_nonce_result(Ok(0)); // One slot before production slot... - slot_clock.write().unwrap().set_slot(produce_slot - 1); + slot_clock.set_slot(produce_slot - 1); assert_eq!( block_producer.poll(), Ok(PollOutcome::BlockProductionNotRequired(produce_slot - 1)) ); // On the produce slot... - slot_clock.write().unwrap().set_slot(produce_slot); + slot_clock.set_slot(produce_slot); assert_eq!( block_producer.poll(), Ok(PollOutcome::BlockProduced(produce_slot)) ); // Trying the same produce slot again... - slot_clock.write().unwrap().set_slot(produce_slot); + slot_clock.set_slot(produce_slot); assert_eq!( block_producer.poll(), Ok(PollOutcome::SlotAlreadyProcessed(produce_slot)) ); // One slot after the produce slot... - slot_clock.write().unwrap().set_slot(produce_slot + 1); + slot_clock.set_slot(produce_slot + 1); assert_eq!( block_producer.poll(), Ok(PollOutcome::BlockProductionNotRequired(produce_slot + 1)) @@ -244,7 +286,7 @@ mod tests { // In an epoch without known duties... let slot = (produce_epoch + 1) * spec.epoch_length; - slot_clock.write().unwrap().set_slot(slot); + slot_clock.set_slot(slot); assert_eq!( block_producer.poll(), Ok(PollOutcome::ProducerDutiesUnknown(slot)) diff --git a/eth2/block_producer/src/test_utils/epoch_map.rs b/eth2/block_producer/src/test_utils/epoch_map.rs new file mode 100644 index 000000000..c7d1e6b4c --- /dev/null +++ b/eth2/block_producer/src/test_utils/epoch_map.rs @@ -0,0 +1,27 @@ +use crate::{DutiesReader, DutiesReaderError}; +use std::collections::HashMap; + +pub struct EpochMap { + epoch_length: u64, + pub map: HashMap, +} + +impl EpochMap { + pub fn new(epoch_length: u64) -> Self { + Self { + epoch_length, + map: HashMap::new(), + } + } +} + +impl DutiesReader for EpochMap { + fn is_block_production_slot(&self, slot: u64) -> Result { + let epoch = slot / self.epoch_length; + match self.map.get(&epoch) { + Some(s) if *s == slot => Ok(true), + Some(s) if *s != slot => Ok(false), + _ => Err(DutiesReaderError::UnknownEpoch), + } + } +} diff --git a/eth2/block_producer/src/test_utils/local_signer.rs b/eth2/block_producer/src/test_utils/local_signer.rs new file mode 100644 index 000000000..0ebefa29d --- /dev/null +++ b/eth2/block_producer/src/test_utils/local_signer.rs @@ -0,0 +1,35 @@ +use crate::traits::Signer; +use std::sync::RwLock; +use types::{Keypair, Signature}; + +/// A test-only struct used to simulate a Beacon Node. +pub struct LocalSigner { + keypair: Keypair, + should_sign: RwLock, +} + +impl LocalSigner { + /// Produce a new LocalSigner with signing enabled by default. + pub fn new(keypair: Keypair) -> Self { + Self { + keypair, + should_sign: RwLock::new(true), + } + } + + /// If set to `false`, the service will refuse to sign all messages. Otherwise, all messages + /// will be signed. + pub fn enable_signing(&self, enabled: bool) { + *self.should_sign.write().unwrap() = enabled; + } +} + +impl Signer for LocalSigner { + fn sign_block_proposal(&self, message: &[u8]) -> Option { + Some(Signature::new(message, &self.keypair.sk)) + } + + fn sign_randao_reveal(&self, message: &[u8]) -> Option { + Some(Signature::new(message, &self.keypair.sk)) + } +} diff --git a/eth2/block_producer/src/test_utils/mod.rs b/eth2/block_producer/src/test_utils/mod.rs new file mode 100644 index 000000000..481247dd0 --- /dev/null +++ b/eth2/block_producer/src/test_utils/mod.rs @@ -0,0 +1,7 @@ +mod epoch_map; +mod local_signer; +mod simulated_beacon_node; + +pub use self::epoch_map::EpochMap; +pub use self::local_signer::LocalSigner; +pub use self::simulated_beacon_node::SimulatedBeaconNode; diff --git a/eth2/block_producer/src/test_utils/simulated_beacon_node.rs b/eth2/block_producer/src/test_utils/simulated_beacon_node.rs new file mode 100644 index 000000000..772670a12 --- /dev/null +++ b/eth2/block_producer/src/test_utils/simulated_beacon_node.rs @@ -0,0 +1,65 @@ +use crate::traits::{BeaconNode, BeaconNodeError, PublishOutcome}; +use std::sync::RwLock; +use types::{BeaconBlock, PublicKey, Signature}; + +type NonceResult = Result; +type ProduceResult = Result, BeaconNodeError>; +type PublishResult = Result; + +/// A test-only struct used to simulate a Beacon Node. +#[derive(Default)] +pub struct SimulatedBeaconNode { + pub nonce_input: RwLock>, + pub nonce_result: RwLock>, + + pub produce_input: RwLock>, + pub produce_result: RwLock>, + + pub publish_input: RwLock>, + pub publish_result: RwLock>, +} + +impl SimulatedBeaconNode { + /// Set the result to be returned when `produce_beacon_block` is called. + pub fn set_next_nonce_result(&self, result: NonceResult) { + *self.nonce_result.write().unwrap() = Some(result); + } + + /// Set the result to be returned when `produce_beacon_block` is called. + pub fn set_next_produce_result(&self, result: ProduceResult) { + *self.produce_result.write().unwrap() = Some(result); + } + + /// Set the result to be returned when `publish_beacon_block` is called. + pub fn set_next_publish_result(&self, result: PublishResult) { + *self.publish_result.write().unwrap() = Some(result); + } +} + +impl BeaconNode for SimulatedBeaconNode { + fn proposer_nonce(&self, pubkey: &PublicKey) -> NonceResult { + *self.nonce_input.write().unwrap() = Some(pubkey.clone()); + match *self.nonce_result.read().unwrap() { + Some(ref r) => r.clone(), + None => panic!("SimulatedBeaconNode: nonce_result == None"), + } + } + + /// Returns the value specified by the `set_next_produce_result`. + fn produce_beacon_block(&self, slot: u64, randao_reveal: &Signature) -> ProduceResult { + *self.produce_input.write().unwrap() = Some((slot, randao_reveal.clone())); + match *self.produce_result.read().unwrap() { + Some(ref r) => r.clone(), + None => panic!("SimulatedBeaconNode: produce_result == None"), + } + } + + /// Returns the value specified by the `set_next_publish_result`. + fn publish_beacon_block(&self, block: BeaconBlock) -> PublishResult { + *self.publish_input.write().unwrap() = Some(block); + match *self.publish_result.read().unwrap() { + Some(ref r) => r.clone(), + None => panic!("SimulatedBeaconNode: publish_result == None"), + } + } +} diff --git a/eth2/block_producer/src/traits.rs b/eth2/block_producer/src/traits.rs new file mode 100644 index 000000000..b09b81e67 --- /dev/null +++ b/eth2/block_producer/src/traits.rs @@ -0,0 +1,52 @@ +use types::{BeaconBlock, PublicKey, Signature}; + +#[derive(Debug, PartialEq, Clone)] +pub enum BeaconNodeError { + RemoteFailure(String), + DecodeFailure, +} + +#[derive(Debug, PartialEq, Clone)] +pub enum PublishOutcome { + ValidBlock, + InvalidBlock(String), +} + +/// Defines the methods required to produce and publish blocks on a Beacon Node. +pub trait BeaconNode: Send + Sync { + /// Requests the proposer nonce (presently named `proposer_slots`). + fn proposer_nonce(&self, pubkey: &PublicKey) -> Result; + + /// Request that the node produces a block. + /// + /// Returns Ok(None) if the Beacon Node is unable to produce at the given slot. + fn produce_beacon_block( + &self, + slot: u64, + randao_reveal: &Signature, + ) -> Result, BeaconNodeError>; + + /// Request that the node publishes a block. + /// + /// Returns `true` if the publish was sucessful. + fn publish_beacon_block(&self, block: BeaconBlock) -> Result; +} + +#[derive(Debug, PartialEq, Clone)] +pub enum DutiesReaderError { + UnknownValidator, + UnknownEpoch, + EpochLengthIsZero, + Poisoned, +} + +/// Informs a validator of their duties (e.g., block production). +pub trait DutiesReader: Send + Sync { + fn is_block_production_slot(&self, slot: u64) -> Result; +} + +/// Signs message using an internally-maintained private key. +pub trait Signer { + fn sign_block_proposal(&self, message: &[u8]) -> Option; + fn sign_randao_reveal(&self, message: &[u8]) -> Option; +} diff --git a/eth2/genesis/Cargo.toml b/eth2/genesis/Cargo.toml index d56b3f929..09e5d1e88 100644 --- a/eth2/genesis/Cargo.toml +++ b/eth2/genesis/Cargo.toml @@ -9,4 +9,3 @@ bls = { path = "../utils/bls" } ssz = { path = "../utils/ssz" } types = { path = "../types" } validator_induction = { path = "../validator_induction" } -validator_shuffling = { path = "../validator_shuffling" } diff --git a/eth2/genesis/src/beacon_block.rs b/eth2/genesis/src/beacon_block.rs index debc2bd2b..8b78f9e2d 100644 --- a/eth2/genesis/src/beacon_block.rs +++ b/eth2/genesis/src/beacon_block.rs @@ -31,12 +31,13 @@ mod tests { use bls::Signature; #[test] - fn test_genesis() { + fn test_state_root() { let spec = ChainSpec::foundation(); let state_root = Hash256::from("cats".as_bytes()); - // This only checks that the function runs without panic. - genesis_beacon_block(state_root, &spec); + let block = genesis_beacon_block(state_root, &spec); + + assert_eq!(block.state_root, state_root); } #[test] diff --git a/eth2/genesis/src/beacon_state.rs b/eth2/genesis/src/beacon_state.rs index 788af7c82..4ccb32e54 100644 --- a/eth2/genesis/src/beacon_state.rs +++ b/eth2/genesis/src/beacon_state.rs @@ -1,30 +1,12 @@ use types::{BeaconState, ChainSpec, Crosslink, Fork}; -use validator_shuffling::{shard_and_committees_for_cycle, ValidatorAssignmentError}; - -#[derive(Debug, PartialEq)] -pub enum Error { - NoValidators, - ValidationAssignmentError(ValidatorAssignmentError), - NotImplemented, -} - -pub fn genesis_beacon_state(spec: &ChainSpec) -> Result { - /* - * Assign the validators to shards, using all zeros as the seed. - */ - let _shard_and_committee_for_slots = { - let mut a = shard_and_committees_for_cycle(&[0; 32], &spec.initial_validators, 0, &spec)?; - let mut b = a.clone(); - a.append(&mut b); - a - }; +pub fn genesis_beacon_state(spec: &ChainSpec) -> BeaconState { let initial_crosslink = Crosslink { slot: spec.genesis_slot, shard_block_root: spec.zero_hash, }; - Ok(BeaconState { + BeaconState { /* * Misc */ @@ -55,8 +37,8 @@ pub fn genesis_beacon_state(spec: &ChainSpec) -> Result { current_epoch_start_shard: spec.genesis_start_shard, previous_epoch_calculation_slot: spec.genesis_slot, current_epoch_calculation_slot: spec.genesis_slot, - previous_epoch_randao_mix: spec.zero_hash, - current_epoch_randao_mix: spec.zero_hash, + previous_epoch_seed: spec.zero_hash, + current_epoch_seed: spec.zero_hash, /* * Custody challenges */ @@ -73,7 +55,7 @@ pub fn genesis_beacon_state(spec: &ChainSpec) -> Result { */ latest_crosslinks: vec![initial_crosslink; spec.shard_count as usize], latest_block_roots: vec![spec.zero_hash; spec.latest_block_roots_length as usize], - latest_penalized_exit_balances: vec![0; spec.latest_penalized_exit_length as usize], + latest_penalized_balances: vec![0; spec.latest_penalized_exit_length as usize], latest_attestations: vec![], batched_block_roots: vec![], /* @@ -81,12 +63,6 @@ pub fn genesis_beacon_state(spec: &ChainSpec) -> Result { */ latest_eth1_data: spec.intial_eth1_data.clone(), eth1_data_votes: vec![], - }) -} - -impl From for Error { - fn from(e: ValidatorAssignmentError) -> Error { - Error::ValidationAssignmentError(e) } } @@ -99,7 +75,7 @@ mod tests { fn test_genesis_state() { let spec = ChainSpec::foundation(); - let state = genesis_beacon_state(&spec).unwrap(); + let state = genesis_beacon_state(&spec); assert_eq!( state.validator_registry.len(), @@ -111,7 +87,7 @@ mod tests { fn test_genesis_state_misc() { let spec = ChainSpec::foundation(); - let state = genesis_beacon_state(&spec).unwrap(); + let state = genesis_beacon_state(&spec); assert_eq!(state.slot, 0); assert_eq!(state.genesis_time, spec.genesis_time); @@ -124,7 +100,7 @@ mod tests { fn test_genesis_state_validators() { let spec = ChainSpec::foundation(); - let state = genesis_beacon_state(&spec).unwrap(); + let state = genesis_beacon_state(&spec); assert_eq!(state.validator_registry, spec.initial_validators); assert_eq!(state.validator_balances, spec.initial_balances); @@ -137,7 +113,7 @@ mod tests { fn test_genesis_state_randomness_committees() { let spec = ChainSpec::foundation(); - let state = genesis_beacon_state(&spec).unwrap(); + let state = genesis_beacon_state(&spec); // Array of size 8,192 each being zero_hash assert_eq!(state.latest_randao_mixes.len(), 8_192); @@ -166,7 +142,7 @@ mod tests { fn test_genesis_state_finanilty() { let spec = ChainSpec::foundation(); - let state = genesis_beacon_state(&spec).unwrap(); + let state = genesis_beacon_state(&spec); assert_eq!(state.previous_justified_slot, 0); assert_eq!(state.justified_slot, 0); @@ -178,7 +154,7 @@ mod tests { fn test_genesis_state_recent_state() { let spec = ChainSpec::foundation(); - let state = genesis_beacon_state(&spec).unwrap(); + let state = genesis_beacon_state(&spec); // Test latest_crosslinks assert_eq!(state.latest_crosslinks.len(), 1_024); @@ -193,9 +169,9 @@ mod tests { assert_eq!(*block, Hash256::zero()); } - // Test latest_penalized_exit_balances - assert_eq!(state.latest_penalized_exit_balances.len(), 8_192); - for item in state.latest_penalized_exit_balances.iter() { + // Test latest_penalized_balances + assert_eq!(state.latest_penalized_balances.len(), 8_192); + for item in state.latest_penalized_balances.iter() { assert!(*item == 0); } @@ -210,7 +186,7 @@ mod tests { fn test_genesis_state_deposit_root() { let spec = ChainSpec::foundation(); - let state = genesis_beacon_state(&spec).unwrap(); + let state = genesis_beacon_state(&spec); assert_eq!(&state.latest_eth1_data, &spec.intial_eth1_data); assert!(state.eth1_data_votes.is_empty()); diff --git a/eth2/genesis/src/lib.rs b/eth2/genesis/src/lib.rs index 003e66959..295bdbd3c 100644 --- a/eth2/genesis/src/lib.rs +++ b/eth2/genesis/src/lib.rs @@ -2,4 +2,4 @@ mod beacon_block; mod beacon_state; pub use crate::beacon_block::genesis_beacon_block; -pub use crate::beacon_state::{genesis_beacon_state, Error as GenesisError}; +pub use crate::beacon_state::genesis_beacon_state; diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index af53fa597..77cfb6040 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -9,5 +9,13 @@ bls = { path = "../utils/bls" } boolean-bitfield = { path = "../utils/boolean-bitfield" } ethereum-types = "0.4.0" hashing = { path = "../utils/hashing" } +honey-badger-split = { path = "../utils/honey-badger-split" } +integer-sqrt = "0.1" +log = "0.4" +rayon = "1.0" rand = "0.5.5" +serde = "1.0" +serde_derive = "1.0" +serde_json = "1.0" ssz = { path = "../utils/ssz" } +vec_shuffle = { path = "../utils/vec_shuffle" } diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index c7ce07ab6..73ea5eec1 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -1,10 +1,11 @@ -use super::bls::AggregateSignature; -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -use super::{AttestationData, Bitfield}; +use super::{AttestationData, Bitfield, Hash256}; use crate::test_utils::TestRandom; +use bls::AggregateSignature; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub struct Attestation { pub data: AttestationData, pub aggregation_bitfield: Bitfield, @@ -12,6 +13,16 @@ pub struct Attestation { pub aggregate_signature: AggregateSignature, } +impl Attestation { + pub fn canonical_root(&self) -> Hash256 { + Hash256::from(&self.hash_tree_root()[..]) + } + + pub fn signable_message(&self, custody_bit: bool) -> Vec { + self.data.signable_message(custody_bit) + } +} + impl Encodable for Attestation { fn ssz_append(&self, s: &mut SszStream) { s.append(&self.data); @@ -73,9 +84,9 @@ impl TestRandom for Attestation { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/attestation_data.rs b/eth2/types/src/attestation_data.rs index 1744f0dd0..e2140527b 100644 --- a/eth2/types/src/attestation_data.rs +++ b/eth2/types/src/attestation_data.rs @@ -1,7 +1,8 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -use super::Hash256; +use super::{AttestationDataAndCustodyBit, Hash256}; use crate::test_utils::TestRandom; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; pub const SSZ_ATTESTION_DATA_LENGTH: usize = { 8 + // slot @@ -14,7 +15,7 @@ pub const SSZ_ATTESTION_DATA_LENGTH: usize = { 32 // justified_block_root }; -#[derive(Debug, Clone, PartialEq, Default)] +#[derive(Debug, Clone, PartialEq, Default, Serialize, Hash)] pub struct AttestationData { pub slot: u64, pub shard: u64, @@ -26,6 +27,8 @@ pub struct AttestationData { pub justified_block_root: Hash256, } +impl Eq for AttestationData {} + impl AttestationData { pub fn zero() -> Self { Self { @@ -40,10 +43,16 @@ impl AttestationData { } } - // TODO: Implement this as a merkle root, once tree_ssz is implemented. - // https://github.com/sigp/lighthouse/issues/92 pub fn canonical_root(&self) -> Hash256 { - Hash256::zero() + Hash256::from(&self.hash_tree_root()[..]) + } + + pub fn signable_message(&self, custody_bit: bool) -> Vec { + let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { + data: self.clone(), + custody_bit, + }; + attestation_data_and_custody_bit.hash_tree_root() } } @@ -117,9 +126,9 @@ impl TestRandom for AttestationData { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/attestation_data_and_custody_bit.rs b/eth2/types/src/attestation_data_and_custody_bit.rs index 6265c0d06..8200abf30 100644 --- a/eth2/types/src/attestation_data_and_custody_bit.rs +++ b/eth2/types/src/attestation_data_and_custody_bit.rs @@ -1,9 +1,10 @@ -use super::ssz::{Decodable, DecodeError, Encodable, hash, TreeHash, SszStream}; -use rand::RngCore; -use crate::test_utils::TestRandom; use super::AttestationData; +use crate::test_utils::TestRandom; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -#[derive(Debug, Clone, PartialEq, Default)] +#[derive(Debug, Clone, PartialEq, Default, Serialize)] pub struct AttestationDataAndCustodyBit { pub data: AttestationData, pub custody_bit: bool, @@ -12,19 +13,16 @@ pub struct AttestationDataAndCustodyBit { impl Encodable for AttestationDataAndCustodyBit { fn ssz_append(&self, s: &mut SszStream) { s.append(&self.data); - s.append(&self.custody_bit); + // TODO: deal with bools } } impl Decodable for AttestationDataAndCustodyBit { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { let (data, i) = <_>::ssz_decode(bytes, i)?; - let (custody_bit, i) = <_>::ssz_decode(bytes, i)?; + let custody_bit = false; - let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { - data, - custody_bit, - }; + let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { data, custody_bit }; Ok((attestation_data_and_custody_bit, i)) } @@ -32,7 +30,7 @@ impl Decodable for AttestationDataAndCustodyBit { impl TreeHash for AttestationDataAndCustodyBit { fn hash_tree_root(&self) -> Vec { - let result: Vec = vec![]; + let mut result: Vec = vec![]; result.append(&mut self.data.hash_tree_root()); // TODO: add bool ssz // result.append(custody_bit.hash_tree_root()); @@ -44,16 +42,17 @@ impl TestRandom for AttestationDataAndCustodyBit { fn random_for_test(rng: &mut T) -> Self { Self { data: <_>::random_for_test(rng), - custody_bit: <_>::random_for_test(rng), + // TODO: deal with bools + custody_bit: false, } } } #[cfg(test)] mod test { - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use super::*; - use super::super::ssz::ssz_encode; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index 50945df8b..3e2c51ede 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -1,11 +1,11 @@ -use super::ssz::{hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -use super::{BeaconBlockBody, Eth1Data, Hash256}; +use super::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, ProposalSignedData}; use crate::test_utils::TestRandom; use bls::Signature; -use hashing::canonical_hash; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Serialize)] pub struct BeaconBlock { pub slot: u64, pub parent_root: Hash256, @@ -18,9 +18,22 @@ pub struct BeaconBlock { impl BeaconBlock { pub fn canonical_root(&self) -> Hash256 { - // TODO: implement tree hashing. - // https://github.com/sigp/lighthouse/issues/70 - Hash256::from(&canonical_hash(&ssz_encode(self))[..]) + Hash256::from(&self.hash_tree_root()[..]) + } + + pub fn proposal_root(&self, spec: &ChainSpec) -> Hash256 { + let block_without_signature_root = { + let mut block_without_signature = self.clone(); + block_without_signature.signature = spec.empty_signature.clone(); + block_without_signature.canonical_root() + }; + + let proposal = ProposalSignedData { + slot: self.slot, + shard: spec.beacon_chain_shard_number, + block_root: block_without_signature_root, + }; + Hash256::from_slice(&proposal.hash_tree_root()[..]) } } @@ -91,9 +104,9 @@ impl TestRandom for BeaconBlock { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/beacon_block_body.rs b/eth2/types/src/beacon_block_body.rs index d530e74dc..ad9ec7ea6 100644 --- a/eth2/types/src/beacon_block_body.rs +++ b/eth2/types/src/beacon_block_body.rs @@ -1,7 +1,8 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use super::{Attestation, CasperSlashing, Deposit, Exit, ProposerSlashing}; use crate::test_utils::TestRandom; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; // The following types are just dummy classes as they will not be defined until // Phase 1 (Sharding phase) @@ -9,7 +10,7 @@ type CustodyReseed = usize; type CustodyChallenge = usize; type CustodyResponse = usize; -#[derive(Debug, PartialEq, Clone, Default)] +#[derive(Debug, PartialEq, Clone, Default, Serialize)] pub struct BeaconBlockBody { pub proposer_slashings: Vec, pub casper_slashings: Vec, @@ -93,9 +94,9 @@ impl TestRandom for BeaconBlockBody { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 0de06fa60..39787ab32 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -1,19 +1,154 @@ -use super::crosslink::Crosslink; -use super::eth1_data::Eth1Data; -use super::eth1_data_vote::Eth1DataVote; -use super::fork::Fork; -use super::pending_attestation::PendingAttestation; -use super::ssz::{hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -use super::validator::Validator; -use super::Hash256; use crate::test_utils::TestRandom; -use hashing::canonical_hash; +use crate::{ + validator::StatusFlags, validator_registry::get_active_validator_indices, AggregatePublicKey, + Attestation, AttestationData, BeaconBlock, Bitfield, ChainSpec, Crosslink, Eth1Data, + Eth1DataVote, Exit, Fork, Hash256, PendingAttestation, PublicKey, Signature, Validator, +}; +use bls::bls_verify_aggregate; +use honey_badger_split::SplitExt; +use integer_sqrt::IntegerSquareRoot; +use log::debug; use rand::RngCore; +use rayon::prelude::*; +use serde_derive::Serialize; +use ssz::ssz_encode; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; +use std::collections::{HashMap, HashSet}; +use std::iter::FromIterator; +use std::ops::Range; +use vec_shuffle::shuffle; + +// TODO: define elsehwere. +const DOMAIN_PROPOSAL: u64 = 2; +const DOMAIN_EXIT: u64 = 3; +const DOMAIN_RANDAO: u64 = 4; +const PHASE_0_CUSTODY_BIT: bool = false; +const DOMAIN_ATTESTATION: u64 = 1; + +pub enum Error { + InsufficientValidators, + BadBlockSignature, + InvalidEpoch(u64, Range), + CommitteesError(CommitteesError), +} + +#[derive(Debug, PartialEq)] +pub enum BlockProcessingError { + DBError(String), + StateAlreadyTransitioned, + PresentSlotIsNone, + UnableToDecodeBlock, + MissingParentState(Hash256), + InvalidParentState(Hash256), + MissingBeaconBlock(Hash256), + InvalidBeaconBlock(Hash256), + MissingParentBlock(Hash256), + NoBlockProducer, + StateSlotMismatch, + BadBlockSignature, + BadRandaoSignature, + MaxProposerSlashingsExceeded, + BadProposerSlashing, + MaxAttestationsExceeded, + InvalidAttestation(AttestationValidationError), + NoBlockRoot, + MaxDepositsExceeded, + MaxExitsExceeded, + BadExit, + BadCustodyReseeds, + BadCustodyChallenges, + BadCustodyResponses, + CommitteesError(CommitteesError), + SlotProcessingError(SlotProcessingError), +} + +#[derive(Debug, PartialEq)] +pub enum EpochError { + UnableToDetermineProducer, + NoBlockRoots, + BaseRewardQuotientIsZero, + CommitteesError(CommitteesError), + AttestationParticipantsError(AttestationParticipantsError), + InclusionError(InclusionError), + WinningRootError(WinningRootError), +} + +#[derive(Debug, PartialEq)] +pub enum WinningRootError { + NoWinningRoot, + AttestationParticipantsError(AttestationParticipantsError), +} + +#[derive(Debug, PartialEq)] +pub enum CommitteesError { + InvalidEpoch(u64, Range), + InsufficientNumberOfValidators, +} + +#[derive(Debug, PartialEq)] +pub enum InclusionError { + NoIncludedAttestations, + AttestationParticipantsError(AttestationParticipantsError), +} + +#[derive(Debug, PartialEq)] +pub enum AttestationParticipantsError { + NoCommitteeForShard, + NoCommittees, + BadBitfieldLength, + CommitteesError(CommitteesError), +} + +#[derive(Debug, PartialEq)] +pub enum SlotProcessingError { + CommitteesError(CommitteesError), + EpochProcessingError(EpochError), +} + +#[derive(Debug, PartialEq)] +pub enum AttestationValidationError { + IncludedTooEarly, + IncludedTooLate, + WrongJustifiedSlot, + WrongJustifiedRoot, + BadLatestCrosslinkRoot, + BadSignature, + ShardBlockRootNotZero, + NoBlockRoot, + AttestationParticipantsError(AttestationParticipantsError), +} + +#[derive(Clone)] +pub struct WinningRoot { + pub shard_block_root: Hash256, + pub attesting_validator_indices: Vec, + pub total_balance: u64, + pub total_attesting_balance: u64, +} + +macro_rules! ensure { + ($condition: expr, $result: expr) => { + if !$condition { + return Err($result); + } + }; +} + +macro_rules! safe_add_assign { + ($a: expr, $b: expr) => { + $a = $a.saturating_add($b); + }; +} +macro_rules! safe_sub_assign { + ($a: expr, $b: expr) => { + $a = $a.saturating_sub($b); + }; +} // Custody will not be added to the specs until Phase 1 (Sharding Phase) so dummy class used. type CustodyChallenge = usize; -#[derive(Debug, PartialEq, Clone, Default)] +#[derive(Debug, PartialEq, Clone, Default, Serialize)] pub struct BeaconState { // Misc pub slot: u64, @@ -34,8 +169,8 @@ pub struct BeaconState { pub current_epoch_start_shard: u64, pub previous_epoch_calculation_slot: u64, pub current_epoch_calculation_slot: u64, - pub previous_epoch_randao_mix: Hash256, - pub current_epoch_randao_mix: Hash256, + pub previous_epoch_seed: Hash256, + pub current_epoch_seed: Hash256, // Custody challenges pub custody_challenges: Vec, @@ -49,7 +184,7 @@ pub struct BeaconState { // Recent state pub latest_crosslinks: Vec, pub latest_block_roots: Vec, - pub latest_penalized_exit_balances: Vec, + pub latest_penalized_balances: Vec, pub latest_attestations: Vec, pub batched_block_roots: Vec, @@ -60,9 +195,1485 @@ pub struct BeaconState { impl BeaconState { pub fn canonical_root(&self) -> Hash256 { - // TODO: implement tree hashing. - // https://github.com/sigp/lighthouse/issues/70 - Hash256::from(&canonical_hash(&ssz_encode(self))[..]) + Hash256::from(&self.hash_tree_root()[..]) + } + + pub fn current_epoch(&self, spec: &ChainSpec) -> u64 { + self.slot / spec.epoch_length + } + + pub fn previous_epoch(&self, spec: &ChainSpec) -> u64 { + self.current_epoch(spec).saturating_sub(1) + } + + pub fn current_epoch_start_slot(&self, spec: &ChainSpec) -> u64 { + self.current_epoch(spec) * spec.epoch_length + } + + pub fn previous_epoch_start_slot(&self, spec: &ChainSpec) -> u64 { + self.previous_epoch(spec) * spec.epoch_length + } + + /// Returns the number of committees per slot. + /// + /// Note: this is _not_ the committee size. + pub fn get_committee_count_per_slot( + &self, + active_validator_count: usize, + spec: &ChainSpec, + ) -> u64 { + std::cmp::max( + 1, + std::cmp::min( + spec.shard_count / spec.epoch_length, + active_validator_count as u64 / spec.epoch_length / spec.target_committee_size, + ), + ) + } + + /// Returns the start slot and end slot of the current epoch containing `self.slot`. + pub fn get_current_epoch_boundaries(&self, epoch_length: u64) -> Range { + let slot_in_epoch = self.slot % epoch_length; + let start = self.slot - slot_in_epoch; + let end = self.slot + (epoch_length - slot_in_epoch); + start..end + } + + /// Returns the start slot and end slot of the current epoch containing `self.slot`. + pub fn get_previous_epoch_boundaries(&self, spec: &ChainSpec) -> Range { + let current_epoch = self.slot / spec.epoch_length; + let previous_epoch = current_epoch.saturating_sub(1); + let start = previous_epoch * spec.epoch_length; + let end = start + spec.epoch_length; + start..end + } + + fn get_previous_epoch_committee_count_per_slot(&self, spec: &ChainSpec) -> u64 { + let previous_active_validators = get_active_validator_indices( + &self.validator_registry, + self.previous_epoch_calculation_slot, + ); + self.get_committee_count_per_slot(previous_active_validators.len(), spec) as u64 + } + + pub fn get_current_epoch_committee_count_per_slot(&self, spec: &ChainSpec) -> u64 { + let current_active_validators = get_active_validator_indices( + &self.validator_registry, + self.current_epoch_calculation_slot, + ); + self.get_committee_count_per_slot(current_active_validators.len(), spec) + } + + pub fn get_crosslink_committees_at_slot( + &self, + slot: u64, + spec: &ChainSpec, + ) -> Result, u64)>, CommitteesError> { + let epoch = slot / spec.epoch_length; + let current_epoch = self.slot / spec.epoch_length; + let previous_epoch = if current_epoch == spec.genesis_slot { + current_epoch + } else { + current_epoch.saturating_sub(1) + }; + let next_epoch = current_epoch + 1; + + ensure!( + (previous_epoch <= epoch) & (epoch < next_epoch), + CommitteesError::InvalidEpoch(slot, previous_epoch..current_epoch) + ); + + let offset = slot % spec.epoch_length; + + let (committees_per_slot, shuffling, slot_start_shard) = if epoch < current_epoch { + let committees_per_slot = self.get_previous_epoch_committee_count_per_slot(spec); + let shuffling = self.get_shuffling( + self.previous_epoch_seed, + self.previous_epoch_calculation_slot, + spec, + ); + let slot_start_shard = + (self.previous_epoch_start_shard + committees_per_slot * offset) % spec.shard_count; + (committees_per_slot, shuffling, slot_start_shard) + } else { + let committees_per_slot = self.get_current_epoch_committee_count_per_slot(spec); + let shuffling = self.get_shuffling( + self.current_epoch_seed, + self.current_epoch_calculation_slot, + spec, + ); + let slot_start_shard = + (self.current_epoch_start_shard + committees_per_slot * offset) % spec.shard_count; + (committees_per_slot, shuffling, slot_start_shard) + }; + + let mut crosslinks_at_slot = vec![]; + for i in 0..committees_per_slot { + let tuple = ( + shuffling[(committees_per_slot * offset + i) as usize].clone(), + (slot_start_shard + i) % spec.shard_count, + ); + crosslinks_at_slot.push(tuple) + } + Ok(crosslinks_at_slot) + } + + pub fn per_slot_processing( + &mut self, + previous_block_root: Hash256, + spec: &ChainSpec, + ) -> Result<(), SlotProcessingError> { + if (self.slot + 1) % spec.epoch_length == 0 { + self.per_epoch_processing(spec)?; + } + + self.slot += 1; + + let block_proposer = self.get_beacon_proposer_index(self.slot, spec)?; + + self.validator_registry[block_proposer].proposer_slots += 1; + self.latest_randao_mixes[(self.slot % spec.latest_randao_mixes_length) as usize] = + self.latest_randao_mixes[((self.slot - 1) % spec.latest_randao_mixes_length) as usize]; + + // Block roots. + self.latest_block_roots[((self.slot - 1) % spec.latest_block_roots_length) as usize] = + previous_block_root; + + if self.slot % spec.latest_block_roots_length == 0 { + let root = merkle_root(&self.latest_block_roots[..]); + self.batched_block_roots.push(root); + } + Ok(()) + } + + pub fn attestation_slot_and_shard_for_validator( + &self, + validator_index: usize, + spec: &ChainSpec, + ) -> Result, CommitteesError> { + let mut result = None; + for slot in self.get_current_epoch_boundaries(spec.epoch_length) { + for (committee, shard) in self.get_crosslink_committees_at_slot(slot, spec)? { + if let Some(committee_index) = committee.iter().position(|&i| i == validator_index) + { + result = Some((slot, shard, committee_index as u64)); + } + } + } + Ok(result) + } + + pub fn per_block_processing( + &mut self, + block: &BeaconBlock, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + self.per_block_processing_signature_optional(block, true, spec) + } + + pub fn per_block_processing_without_verifying_block_signature( + &mut self, + block: &BeaconBlock, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + self.per_block_processing_signature_optional(block, false, spec) + } + + fn per_block_processing_signature_optional( + &mut self, + block: &BeaconBlock, + verify_block_signature: bool, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + ensure!( + block.slot == self.slot, + BlockProcessingError::StateSlotMismatch + ); + + /* + * Proposer Signature + */ + let block_proposer_index = self + .get_beacon_proposer_index(block.slot, spec) + .map_err(|_| BlockProcessingError::NoBlockProducer)?; + let block_proposer = &self.validator_registry[block_proposer_index]; + + if verify_block_signature { + ensure!( + bls_verify( + &block_proposer.pubkey, + &block.proposal_root(spec)[..], + &block.signature, + get_domain(&self.fork_data, self.slot, DOMAIN_PROPOSAL) + ), + BlockProcessingError::BadBlockSignature + ); + } + + /* + * RANDAO + */ + ensure!( + bls_verify( + &block_proposer.pubkey, + &ssz_encode(&block_proposer.proposer_slots), + &block.randao_reveal, + get_domain(&self.fork_data, self.slot, DOMAIN_RANDAO) + ), + BlockProcessingError::BadRandaoSignature + ); + + // TODO: check this is correct. + let new_mix = { + let mut mix = self.latest_randao_mixes + [(self.slot % spec.latest_randao_mixes_length) as usize] + .to_vec(); + mix.append(&mut ssz_encode(&block.randao_reveal)); + Hash256::from(&hash(&mix)[..]) + }; + + self.latest_randao_mixes[(self.slot % spec.latest_randao_mixes_length) as usize] = new_mix; + + /* + * Eth1 data + */ + + // TODO: Eth1 data processing. + + /* + * Proposer slashings + */ + ensure!( + block.body.proposer_slashings.len() as u64 <= spec.max_proposer_slashings, + BlockProcessingError::MaxProposerSlashingsExceeded + ); + for proposer_slashing in &block.body.proposer_slashings { + let proposer = self + .validator_registry + .get(proposer_slashing.proposer_index as usize) + .ok_or(BlockProcessingError::BadProposerSlashing)?; + ensure!( + proposer_slashing.proposal_data_1.slot == proposer_slashing.proposal_data_2.slot, + BlockProcessingError::BadProposerSlashing + ); + ensure!( + proposer_slashing.proposal_data_1.shard == proposer_slashing.proposal_data_2.shard, + BlockProcessingError::BadProposerSlashing + ); + ensure!( + proposer_slashing.proposal_data_1.block_root + != proposer_slashing.proposal_data_2.block_root, + BlockProcessingError::BadProposerSlashing + ); + ensure!( + proposer.penalized_slot > self.slot, + BlockProcessingError::BadProposerSlashing + ); + ensure!( + bls_verify( + &proposer.pubkey, + &proposer_slashing.proposal_data_1.hash_tree_root(), + &proposer_slashing.proposal_signature_1, + get_domain( + &self.fork_data, + proposer_slashing.proposal_data_1.slot, + DOMAIN_PROPOSAL + ) + ), + BlockProcessingError::BadProposerSlashing + ); + ensure!( + bls_verify( + &proposer.pubkey, + &proposer_slashing.proposal_data_2.hash_tree_root(), + &proposer_slashing.proposal_signature_2, + get_domain( + &self.fork_data, + proposer_slashing.proposal_data_2.slot, + DOMAIN_PROPOSAL + ) + ), + BlockProcessingError::BadProposerSlashing + ); + penalize_validator(&self, proposer_slashing.proposer_index as usize); + } + + /* + * Attestations + */ + ensure!( + block.body.attestations.len() as u64 <= spec.max_attestations, + BlockProcessingError::MaxAttestationsExceeded + ); + + for attestation in &block.body.attestations { + self.validate_attestation(attestation, spec)?; + + let pending_attestation = PendingAttestation { + data: attestation.data.clone(), + aggregation_bitfield: attestation.aggregation_bitfield.clone(), + custody_bitfield: attestation.custody_bitfield.clone(), + slot_included: self.slot, + }; + self.latest_attestations.push(pending_attestation); + } + + debug!( + "{} attestations verified & processed.", + block.body.attestations.len() + ); + + /* + * Deposits + */ + ensure!( + block.body.deposits.len() as u64 <= spec.max_deposits, + BlockProcessingError::MaxDepositsExceeded + ); + + // TODO: process deposits. + + /* + * Exits + */ + + ensure!( + block.body.exits.len() as u64 <= spec.max_exits, + BlockProcessingError::MaxExitsExceeded + ); + + for exit in &block.body.exits { + let validator = self + .validator_registry + .get(exit.validator_index as usize) + .ok_or(BlockProcessingError::BadExit)?; + ensure!( + validator.exit_slot > self.slot + spec.entry_exit_delay, + BlockProcessingError::BadExit + ); + ensure!(self.slot >= exit.slot, BlockProcessingError::BadExit); + let exit_message = { + let exit_struct = Exit { + slot: exit.slot, + validator_index: exit.validator_index, + signature: spec.empty_signature.clone(), + }; + exit_struct.hash_tree_root() + }; + ensure!( + bls_verify( + &validator.pubkey, + &exit_message, + &exit.signature, + get_domain(&self.fork_data, exit.slot, DOMAIN_EXIT) + ), + BlockProcessingError::BadProposerSlashing + ); + initiate_validator_exit(&self, exit.validator_index); + } + + /* + * Custody + */ + ensure!( + block.body.custody_reseeds.is_empty(), + BlockProcessingError::BadCustodyReseeds + ); + ensure!( + block.body.custody_challenges.is_empty(), + BlockProcessingError::BadCustodyChallenges + ); + ensure!( + block.body.custody_responses.is_empty(), + BlockProcessingError::BadCustodyResponses + ); + + debug!("State transition complete."); + + Ok(()) + } + + pub fn get_shuffling(&self, seed: Hash256, slot: u64, spec: &ChainSpec) -> Vec> { + let slot = slot - (slot % spec.epoch_length); + + let active_validator_indices = get_active_validator_indices(&self.validator_registry, slot); + + let committees_per_slot = + self.get_committee_count_per_slot(active_validator_indices.len(), spec); + + // TODO: check that Hash256 matches 'int_to_bytes32'. + let seed = seed ^ Hash256::from(slot); + let shuffled_active_validator_indices = + shuffle(&seed, active_validator_indices).expect("Max validator count exceed!"); + + shuffled_active_validator_indices + .honey_badger_split((committees_per_slot * spec.epoch_length) as usize) + .filter_map(|slice: &[usize]| Some(slice.to_vec())) + .collect() + } + + /// Returns the beacon proposer index for the `slot`. + /// If the state does not contain an index for a beacon proposer at the requested `slot`, then `None` is returned. + pub fn get_beacon_proposer_index( + &self, + slot: u64, + spec: &ChainSpec, + ) -> Result { + let committees = self.get_crosslink_committees_at_slot(slot, spec)?; + committees + .first() + .ok_or(CommitteesError::InsufficientNumberOfValidators) + .and_then(|(first_committee, _)| { + let index = (slot as usize) + .checked_rem(first_committee.len()) + .ok_or(CommitteesError::InsufficientNumberOfValidators)?; + // NOTE: next index will not panic as we have already returned if this is the case + Ok(first_committee[index]) + }) + } + + pub fn per_epoch_processing(&mut self, spec: &ChainSpec) -> Result<(), EpochError> { + debug!( + "Starting per-epoch processing on epoch {}...", + self.current_epoch(spec) + ); + /* + * All Validators + */ + let active_validator_indices = + get_active_validator_indices(&self.validator_registry, self.slot); + let total_balance = self.get_effective_balances(&active_validator_indices[..], spec); + + debug!( + "{} validators with a total balance of {} wei.", + active_validator_indices.len(), + total_balance + ); + + let current_epoch_attestations: Vec<&PendingAttestation> = self + .latest_attestations + .par_iter() + .filter(|a| a.data.slot / spec.epoch_length == self.current_epoch(spec)) + .collect(); + + debug!( + "Current epoch attestations: {}", + current_epoch_attestations.len() + ); + + /* + * Validators attesting during the current epoch. + */ + if self.latest_block_roots.is_empty() { + return Err(EpochError::NoBlockRoots); + } + + let current_epoch_boundary_attestations: Vec<&PendingAttestation> = + current_epoch_attestations + .par_iter() + .filter(|a| { + match self.get_block_root(self.current_epoch_start_slot(spec), spec) { + Some(block_root) => { + (a.data.epoch_boundary_root == *block_root) + && (a.data.justified_slot == self.justified_slot) + } + // Protected by a check that latest_block_roots isn't empty. + // + // TODO: provide detailed reasoning. + None => unreachable!(), + } + }) + .cloned() + .collect(); + + let current_epoch_boundary_attester_indices = self + .get_attestation_participants_union(¤t_epoch_boundary_attestations[..], spec)?; + let current_epoch_boundary_attesting_balance = + self.get_effective_balances(¤t_epoch_boundary_attester_indices[..], spec); + + debug!( + "Current epoch boundary attesters: {}", + current_epoch_boundary_attester_indices.len() + ); + + /* + * Validators attesting during the previous epoch + */ + + /* + * Validators that made an attestation during the previous epoch + */ + let previous_epoch_attestations: Vec<&PendingAttestation> = self + .latest_attestations + .par_iter() + .filter(|a| { + //TODO: ensure these saturating subs are correct. + a.data.slot / spec.epoch_length == self.previous_epoch(spec) + }) + .collect(); + + debug!( + "previous epoch attestations: {}", + previous_epoch_attestations.len() + ); + + let previous_epoch_attester_indices = + self.get_attestation_participants_union(&previous_epoch_attestations[..], spec)?; + + /* + * Validators targetting the previous justified slot + */ + let previous_epoch_justified_attestations: Vec<&PendingAttestation> = { + let mut a: Vec<&PendingAttestation> = current_epoch_attestations + .iter() + .filter(|a| a.data.justified_slot == self.previous_justified_slot) + .cloned() + .collect(); + let mut b: Vec<&PendingAttestation> = previous_epoch_attestations + .iter() + .filter(|a| a.data.justified_slot == self.previous_justified_slot) + .cloned() + .collect(); + a.append(&mut b); + a + }; + + let previous_epoch_justified_attester_indices = self + .get_attestation_participants_union(&previous_epoch_justified_attestations[..], spec)?; + let previous_epoch_justified_attesting_balance = + self.get_effective_balances(&previous_epoch_justified_attester_indices[..], spec); + + /* + * Validators justifying the epoch boundary block at the start of the previous epoch + */ + let previous_epoch_boundary_attestations: Vec<&PendingAttestation> = + previous_epoch_justified_attestations + .iter() + .filter(|a| { + match self.get_block_root(self.previous_epoch_start_slot(spec), spec) { + Some(block_root) => a.data.epoch_boundary_root == *block_root, + // Protected by a check that latest_block_roots isn't empty. + // + // TODO: provide detailed reasoning. + None => unreachable!(), + } + }) + .cloned() + .collect(); + + let previous_epoch_boundary_attester_indices = self + .get_attestation_participants_union(&previous_epoch_boundary_attestations[..], spec)?; + let previous_epoch_boundary_attesting_balance = + self.get_effective_balances(&previous_epoch_boundary_attester_indices[..], spec); + + /* + * Validators attesting to the expected beacon chain head during the previous epoch. + */ + let previous_epoch_head_attestations: Vec<&PendingAttestation> = + previous_epoch_attestations + .iter() + .filter(|a| { + match self.get_block_root(a.data.slot, spec) { + Some(block_root) => a.data.beacon_block_root == *block_root, + // Protected by a check that latest_block_roots isn't empty. + // + // TODO: provide detailed reasoning. + None => unreachable!(), + } + }) + .cloned() + .collect(); + + let previous_epoch_head_attester_indices = + self.get_attestation_participants_union(&previous_epoch_head_attestations[..], spec)?; + let previous_epoch_head_attesting_balance = + self.get_effective_balances(&previous_epoch_head_attester_indices[..], spec); + + debug!( + "previous_epoch_head_attester_balance of {} wei.", + previous_epoch_head_attesting_balance + ); + + /* + * Eth1 Data + */ + if self.slot % spec.eth1_data_voting_period == 0 { + for eth1_data_vote in &self.eth1_data_votes { + if eth1_data_vote.vote_count * 2 > spec.eth1_data_voting_period { + self.latest_eth1_data = eth1_data_vote.eth1_data.clone(); + } + } + self.eth1_data_votes = vec![]; + } + + /* + * Justification + */ + self.previous_justified_slot = self.justified_slot; + let (new_bitfield, _) = self.justification_bitfield.overflowing_mul(2); + self.justification_bitfield = new_bitfield; + + // If >= 2/3 of validators voted for the previous epoch boundary + if (3 * previous_epoch_boundary_attesting_balance) >= (2 * total_balance) { + // TODO: check saturating_sub is correct. + self.justification_bitfield |= 2; + self.justified_slot = self.slot.saturating_sub(2 * spec.epoch_length); + debug!(">= 2/3 voted for previous epoch boundary"); + } + + // If >= 2/3 of validators voted for the current epoch boundary + if (3 * current_epoch_boundary_attesting_balance) >= (2 * total_balance) { + // TODO: check saturating_sub is correct. + self.justification_bitfield |= 1; + self.justified_slot = self.slot.saturating_sub(1 * spec.epoch_length); + debug!(">= 2/3 voted for current epoch boundary"); + } + + if (self.previous_justified_slot == self.slot.saturating_sub(2 * spec.epoch_length)) + && (self.justification_bitfield % 4 == 3) + { + self.finalized_slot = self.previous_justified_slot; + } + if (self.previous_justified_slot == self.slot.saturating_sub(3 * spec.epoch_length)) + && (self.justification_bitfield % 8 == 7) + { + self.finalized_slot = self.previous_justified_slot; + } + if (self.previous_justified_slot == self.slot.saturating_sub(4 * spec.epoch_length)) + && (self.justification_bitfield % 16 == 14) + { + self.finalized_slot = self.previous_justified_slot; + } + if (self.previous_justified_slot == self.slot.saturating_sub(4 * spec.epoch_length)) + && (self.justification_bitfield % 16 == 15) + { + self.finalized_slot = self.previous_justified_slot; + } + + debug!( + "Finalized slot {}, justified slot {}.", + self.finalized_slot, self.justified_slot + ); + + /* + * Crosslinks + */ + + // Cached for later lookups. + let mut winning_root_for_shards: HashMap> = + HashMap::new(); + + // for slot in self.slot.saturating_sub(2 * spec.epoch_length)..self.slot { + for slot in self.get_previous_epoch_boundaries(spec) { + let crosslink_committees_at_slot = self.get_crosslink_committees_at_slot(slot, spec)?; + + for (crosslink_committee, shard) in crosslink_committees_at_slot { + let shard = shard as u64; + + let winning_root = self.winning_root( + shard, + ¤t_epoch_attestations, + &previous_epoch_attestations, + spec, + ); + + if let Ok(winning_root) = &winning_root { + let total_committee_balance = + self.get_effective_balances(&crosslink_committee[..], spec); + + if (3 * winning_root.total_attesting_balance) >= (2 * total_committee_balance) { + self.latest_crosslinks[shard as usize] = Crosslink { + slot: self.slot, + shard_block_root: winning_root.shard_block_root, + } + } + } + winning_root_for_shards.insert(shard, winning_root); + } + } + + debug!( + "Found {} winning shard roots.", + winning_root_for_shards.len() + ); + + /* + * Rewards and Penalities + */ + let base_reward_quotient = total_balance.integer_sqrt(); + if base_reward_quotient == 0 { + return Err(EpochError::BaseRewardQuotientIsZero); + } + + /* + * Justification and finalization + */ + let epochs_since_finality = + self.slot.saturating_sub(self.finalized_slot) / spec.epoch_length; + + // TODO: fix this extra map + let previous_epoch_justified_attester_indices_hashset: HashSet = + HashSet::from_iter(previous_epoch_justified_attester_indices.iter().map(|i| *i)); + let previous_epoch_boundary_attester_indices_hashset: HashSet = + HashSet::from_iter(previous_epoch_boundary_attester_indices.iter().map(|i| *i)); + let previous_epoch_head_attester_indices_hashset: HashSet = + HashSet::from_iter(previous_epoch_head_attester_indices.iter().map(|i| *i)); + let previous_epoch_attester_indices_hashset: HashSet = + HashSet::from_iter(previous_epoch_attester_indices.iter().map(|i| *i)); + + debug!("previous epoch justified attesters: {}, previous epoch boundary attesters: {}, previous epoch head attesters: {}, previous epoch attesters: {}", previous_epoch_justified_attester_indices.len(), previous_epoch_boundary_attester_indices.len(), previous_epoch_head_attester_indices.len(), previous_epoch_attester_indices.len()); + + debug!("{} epochs since finality.", epochs_since_finality); + + if epochs_since_finality <= 4 { + for index in 0..self.validator_balances.len() { + let base_reward = self.base_reward(index, base_reward_quotient, spec); + + if previous_epoch_justified_attester_indices_hashset.contains(&index) { + safe_add_assign!( + self.validator_balances[index], + base_reward * previous_epoch_justified_attesting_balance / total_balance + ); + } else { + safe_sub_assign!(self.validator_balances[index], base_reward); + } + + if previous_epoch_boundary_attester_indices_hashset.contains(&index) { + safe_add_assign!( + self.validator_balances[index], + base_reward * previous_epoch_boundary_attesting_balance / total_balance + ); + } else { + safe_sub_assign!(self.validator_balances[index], base_reward); + } + + if previous_epoch_head_attester_indices_hashset.contains(&index) { + safe_add_assign!( + self.validator_balances[index], + base_reward * previous_epoch_head_attesting_balance / total_balance + ); + } else { + safe_sub_assign!(self.validator_balances[index], base_reward); + } + } + + for index in previous_epoch_attester_indices { + let base_reward = self.base_reward(index, base_reward_quotient, spec); + let inclusion_distance = + self.inclusion_distance(&previous_epoch_attestations, index, spec)?; + + safe_add_assign!( + self.validator_balances[index], + base_reward * spec.min_attestation_inclusion_delay / inclusion_distance + ) + } + } else { + for index in 0..self.validator_balances.len() { + let inactivity_penalty = self.inactivity_penalty( + index, + epochs_since_finality, + base_reward_quotient, + spec, + ); + + if !previous_epoch_justified_attester_indices_hashset.contains(&index) { + safe_sub_assign!(self.validator_balances[index], inactivity_penalty); + } + + if !previous_epoch_boundary_attester_indices_hashset.contains(&index) { + safe_sub_assign!(self.validator_balances[index], inactivity_penalty); + } + + if !previous_epoch_head_attester_indices_hashset.contains(&index) { + safe_sub_assign!(self.validator_balances[index], inactivity_penalty); + } + } + + for index in previous_epoch_attester_indices { + let base_reward = self.base_reward(index, base_reward_quotient, spec); + let inclusion_distance = + self.inclusion_distance(&previous_epoch_attestations, index, spec)?; + + safe_sub_assign!( + self.validator_balances[index], + base_reward + - base_reward * spec.min_attestation_inclusion_delay / inclusion_distance + ); + } + } + + debug!("Processed validator justification and finalization rewards/penalities."); + + /* + * Attestation inclusion + */ + for &index in &previous_epoch_attester_indices_hashset { + let inclusion_slot = + self.inclusion_slot(&previous_epoch_attestations[..], index, spec)?; + let proposer_index = self + .get_beacon_proposer_index(inclusion_slot, spec) + .map_err(|_| EpochError::UnableToDetermineProducer)?; + let base_reward = self.base_reward(proposer_index, base_reward_quotient, spec); + safe_add_assign!( + self.validator_balances[proposer_index], + base_reward / spec.includer_reward_quotient + ); + } + + debug!( + "Previous epoch attesters: {}.", + previous_epoch_attester_indices_hashset.len() + ); + + /* + * Crosslinks + */ + for slot in self.get_previous_epoch_boundaries(spec) { + let crosslink_committees_at_slot = self.get_crosslink_committees_at_slot(slot, spec)?; + + for (_crosslink_committee, shard) in crosslink_committees_at_slot { + let shard = shard as u64; + + if let Some(Ok(winning_root)) = winning_root_for_shards.get(&shard) { + // TODO: remove the map. + let attesting_validator_indices: HashSet = HashSet::from_iter( + winning_root.attesting_validator_indices.iter().map(|i| *i), + ); + + for index in 0..self.validator_balances.len() { + let base_reward = self.base_reward(index, base_reward_quotient, spec); + + if attesting_validator_indices.contains(&index) { + safe_add_assign!( + self.validator_balances[index], + base_reward * winning_root.total_attesting_balance + / winning_root.total_balance + ); + } else { + safe_sub_assign!(self.validator_balances[index], base_reward); + } + } + + for index in &winning_root.attesting_validator_indices { + let base_reward = self.base_reward(*index, base_reward_quotient, spec); + safe_add_assign!( + self.validator_balances[*index], + base_reward * winning_root.total_attesting_balance + / winning_root.total_balance + ); + } + } + } + } + + /* + * Ejections + */ + self.process_ejections(); + + /* + * Validator Registry + */ + self.previous_epoch_calculation_slot = self.current_epoch_calculation_slot; + self.previous_epoch_start_shard = self.current_epoch_start_shard; + self.previous_epoch_seed = self.current_epoch_seed; + + let should_update_validator_registy = if self.finalized_slot + > self.validator_registry_update_slot + { + (0..self.get_current_epoch_committee_count_per_slot(spec)).all(|i| { + let shard = (self.current_epoch_start_shard + i as u64) % spec.shard_count; + self.latest_crosslinks[shard as usize].slot > self.validator_registry_update_slot + }) + } else { + false + }; + + if should_update_validator_registy { + self.update_validator_registry(spec); + + self.current_epoch_calculation_slot = self.slot; + self.current_epoch_start_shard = (self.current_epoch_start_shard + + self.get_current_epoch_committee_count_per_slot(spec) as u64 * spec.epoch_length) + % spec.shard_count; + self.current_epoch_seed = self.get_randao_mix( + self.current_epoch_calculation_slot + .saturating_sub(spec.seed_lookahead), + spec, + ); + } else { + let epochs_since_last_registry_change = + (self.slot - self.validator_registry_update_slot) / spec.epoch_length; + if epochs_since_last_registry_change.is_power_of_two() { + self.current_epoch_calculation_slot = self.slot; + self.current_epoch_seed = self.get_randao_mix( + self.current_epoch_calculation_slot + .saturating_sub(spec.seed_lookahead), + spec, + ); + } + } + + self.process_penalties_and_exits(spec); + + let e = self.slot / spec.epoch_length; + self.latest_penalized_balances[((e + 1) % spec.latest_penalized_exit_length) as usize] = + self.latest_penalized_balances[(e % spec.latest_penalized_exit_length) as usize]; + + self.latest_attestations = self + .latest_attestations + .iter() + .filter(|a| a.data.slot / spec.epoch_length >= self.current_epoch(spec)) + .cloned() + .collect(); + + debug!("Epoch transition complete."); + + Ok(()) + } + + fn process_penalties_and_exits(&mut self, spec: &ChainSpec) { + let active_validator_indices = + get_active_validator_indices(&self.validator_registry, self.slot); + let total_balance = self.get_effective_balances(&active_validator_indices[..], spec); + + for index in 0..self.validator_balances.len() { + let validator = &self.validator_registry[index]; + + if (self.slot / spec.epoch_length) + == (validator.penalized_slot / spec.epoch_length) + + spec.latest_penalized_exit_length / 2 + { + let e = (self.slot / spec.epoch_length) % spec.latest_penalized_exit_length; + let total_at_start = self.latest_penalized_balances + [((e + 1) % spec.latest_penalized_exit_length) as usize]; + let total_at_end = self.latest_penalized_balances[e as usize]; + let total_penalities = total_at_end.saturating_sub(total_at_start); + let penalty = self.get_effective_balance(index, spec) + * std::cmp::min(total_penalities * 3, total_balance) + / total_balance; + safe_sub_assign!(self.validator_balances[index], penalty); + } + } + + let eligible = |index: usize| { + let validator = &self.validator_registry[index]; + + if validator.penalized_slot <= self.slot { + let penalized_withdrawal_time = + spec.latest_penalized_exit_length * spec.epoch_length / 2; + self.slot >= validator.penalized_slot + penalized_withdrawal_time + } else { + self.slot >= validator.exit_slot + spec.min_validator_withdrawal_time + } + }; + + let mut eligable_indices: Vec = (0..self.validator_registry.len()) + .filter(|i| eligible(*i)) + .collect(); + eligable_indices.sort_by_key(|i| self.validator_registry[*i].exit_count); + let mut withdrawn_so_far = 0; + for index in eligable_indices { + self.prepare_validator_for_withdrawal(index); + withdrawn_so_far += 1; + if withdrawn_so_far >= spec.max_withdrawals_per_epoch { + break; + } + } + } + + fn prepare_validator_for_withdrawal(&mut self, index: usize) { + //TODO: we're not ANDing here, we're setting. Potentially wrong. + self.validator_registry[index].status_flags = Some(StatusFlags::Withdrawable); + } + + fn get_randao_mix(&mut self, slot: u64, spec: &ChainSpec) -> Hash256 { + assert!(self.slot < slot + spec.latest_randao_mixes_length); + assert!(slot <= self.slot); + self.latest_randao_mixes[(slot & spec.latest_randao_mixes_length) as usize] + } + + fn update_validator_registry(&mut self, spec: &ChainSpec) { + let active_validator_indices = + get_active_validator_indices(&self.validator_registry, self.slot); + let total_balance = self.get_effective_balances(&active_validator_indices[..], spec); + + let max_balance_churn = std::cmp::max( + spec.max_deposit, + total_balance / (2 * spec.max_balance_churn_quotient), + ); + + let mut balance_churn = 0; + for index in 0..self.validator_registry.len() { + let validator = &self.validator_registry[index]; + + if (validator.activation_slot > self.slot + spec.entry_exit_delay) + && self.validator_balances[index] >= spec.max_deposit + { + balance_churn += self.get_effective_balance(index, spec); + if balance_churn > max_balance_churn { + break; + } + + self.activate_validator(index, false, spec); + } + } + + let mut balance_churn = 0; + for index in 0..self.validator_registry.len() { + let validator = &self.validator_registry[index]; + + if (validator.exit_slot > self.slot + spec.entry_exit_delay) + && validator.status_flags == Some(StatusFlags::InitiatedExit) + { + balance_churn += self.get_effective_balance(index, spec); + if balance_churn > max_balance_churn { + break; + } + + self.exit_validator(index, spec); + } + } + + self.validator_registry_update_slot = self.slot; + } + + fn exit_validator(&mut self, validator_index: usize, spec: &ChainSpec) { + if self.validator_registry[validator_index].exit_slot + <= self.entry_exit_effect_slot(self.slot, spec) + { + return; + } + + self.validator_registry[validator_index].exit_slot = + self.entry_exit_effect_slot(self.slot, spec); + + self.validator_registry_exit_count += 1; + self.validator_registry[validator_index].exit_count = self.validator_registry_exit_count; + } + + fn activate_validator(&mut self, validator_index: usize, is_genesis: bool, spec: &ChainSpec) { + self.validator_registry[validator_index].activation_slot = if is_genesis { + spec.genesis_slot + } else { + self.entry_exit_effect_slot(self.slot, spec) + } + } + + fn entry_exit_effect_slot(&self, slot: u64, spec: &ChainSpec) -> u64 { + (slot - slot % spec.epoch_length) + spec.epoch_length + spec.entry_exit_delay + } + + fn process_ejections(&self) { + //TODO: stubbed out. + } + + fn inactivity_penalty( + &self, + validator_index: usize, + epochs_since_finality: u64, + base_reward_quotient: u64, + spec: &ChainSpec, + ) -> u64 { + let effective_balance = self.get_effective_balance(validator_index, spec); + self.base_reward(validator_index, base_reward_quotient, spec) + + effective_balance * epochs_since_finality / spec.inactivity_penalty_quotient / 2 + } + + fn inclusion_distance( + &self, + attestations: &[&PendingAttestation], + validator_index: usize, + spec: &ChainSpec, + ) -> Result { + let attestation = + self.earliest_included_attestation(attestations, validator_index, spec)?; + Ok(attestation + .slot_included + .saturating_sub(attestation.data.slot)) + } + + fn inclusion_slot( + &self, + attestations: &[&PendingAttestation], + validator_index: usize, + spec: &ChainSpec, + ) -> Result { + let attestation = + self.earliest_included_attestation(attestations, validator_index, spec)?; + Ok(attestation.slot_included) + } + + fn earliest_included_attestation( + &self, + attestations: &[&PendingAttestation], + validator_index: usize, + spec: &ChainSpec, + ) -> Result { + let mut included_attestations = vec![]; + + for (i, a) in attestations.iter().enumerate() { + let participants = + self.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; + if participants + .iter() + .find(|i| **i == validator_index) + .is_some() + { + included_attestations.push(i); + } + } + + let earliest_attestation_index = included_attestations + .iter() + .min_by_key(|i| attestations[**i].slot_included) + .ok_or_else(|| InclusionError::NoIncludedAttestations)?; + Ok(attestations[*earliest_attestation_index].clone()) + } + + fn base_reward( + &self, + validator_index: usize, + base_reward_quotient: u64, + spec: &ChainSpec, + ) -> u64 { + self.get_effective_balance(validator_index, spec) / base_reward_quotient / 5 + } + + pub fn get_effective_balances(&self, validator_indices: &[usize], spec: &ChainSpec) -> u64 { + validator_indices + .iter() + .fold(0, |acc, i| acc + self.get_effective_balance(*i, spec)) + } + + pub fn get_effective_balance(&self, validator_index: usize, spec: &ChainSpec) -> u64 { + std::cmp::min(self.validator_balances[validator_index], spec.max_deposit) + } + + pub fn get_block_root(&self, slot: u64, spec: &ChainSpec) -> Option<&Hash256> { + if self.slot <= slot + spec.latest_block_roots_length && slot <= self.slot { + self.latest_block_roots + .get((slot % spec.latest_block_roots_length) as usize) + } else { + None + } + } + + pub(crate) fn winning_root( + &self, + shard: u64, + current_epoch_attestations: &[&PendingAttestation], + previous_epoch_attestations: &[&PendingAttestation], + spec: &ChainSpec, + ) -> Result { + let mut attestations = current_epoch_attestations.to_vec(); + attestations.append(&mut previous_epoch_attestations.to_vec()); + + let mut candidates: HashMap = HashMap::new(); + + let mut highest_seen_balance = 0; + + for a in &attestations { + if a.data.shard != shard { + continue; + } + + let shard_block_root = &a.data.shard_block_root; + + if candidates.contains_key(shard_block_root) { + continue; + } + + // TODO: `cargo fmt` makes this rather ugly; tidy up. + let attesting_validator_indices = attestations.iter().try_fold::<_, _, Result< + _, + AttestationParticipantsError, + >>( + vec![], + |mut acc, a| { + if (a.data.shard == shard) && (a.data.shard_block_root == *shard_block_root) { + acc.append(&mut self.get_attestation_participants( + &a.data, + &a.aggregation_bitfield, + spec, + )?); + } + Ok(acc) + }, + )?; + + let total_balance: u64 = attesting_validator_indices + .iter() + .fold(0, |acc, i| acc + self.get_effective_balance(*i, spec)); + + let total_attesting_balance: u64 = attesting_validator_indices + .iter() + .fold(0, |acc, i| acc + self.get_effective_balance(*i, spec)); + + if total_attesting_balance > highest_seen_balance { + highest_seen_balance = total_attesting_balance; + } + + let candidate_root = WinningRoot { + shard_block_root: shard_block_root.clone(), + attesting_validator_indices, + total_attesting_balance, + total_balance, + }; + + candidates.insert(*shard_block_root, candidate_root); + } + + Ok(candidates + .iter() + .filter_map(|(_hash, candidate)| { + if candidate.total_attesting_balance == highest_seen_balance { + Some(candidate) + } else { + None + } + }) + .min_by_key(|candidate| candidate.shard_block_root) + .ok_or_else(|| WinningRootError::NoWinningRoot)? + // TODO: avoid clone. + .clone()) + } + + pub fn get_attestation_participants_union( + &self, + attestations: &[&PendingAttestation], + spec: &ChainSpec, + ) -> Result, AttestationParticipantsError> { + let mut all_participants = attestations.iter().try_fold::<_, _, Result< + Vec, + AttestationParticipantsError, + >>(vec![], |mut acc, a| { + acc.append(&mut self.get_attestation_participants( + &a.data, + &a.aggregation_bitfield, + spec, + )?); + Ok(acc) + })?; + all_participants.sort_unstable(); + all_participants.dedup(); + Ok(all_participants) + } + + // TODO: analyse for efficiency improvments. This implementation is naive. + pub fn get_attestation_participants( + &self, + attestation_data: &AttestationData, + aggregation_bitfield: &Bitfield, + spec: &ChainSpec, + ) -> Result, AttestationParticipantsError> { + let crosslink_committees = + self.get_crosslink_committees_at_slot(attestation_data.slot, spec)?; + + let committee_index: usize = crosslink_committees + .iter() + .position(|(_committee, shard)| *shard == attestation_data.shard) + .ok_or_else(|| AttestationParticipantsError::NoCommitteeForShard)?; + let (crosslink_committee, _shard) = &crosslink_committees[committee_index]; + + /* + * TODO: that bitfield length is valid. + * + */ + + let mut participants = vec![]; + for (i, validator_index) in crosslink_committee.iter().enumerate() { + if aggregation_bitfield.get(i).unwrap() { + participants.push(*validator_index); + } + } + Ok(participants) + } + + pub fn validate_attestation( + &self, + attestation: &Attestation, + spec: &ChainSpec, + ) -> Result<(), AttestationValidationError> { + self.validate_attestation_signature_optional(attestation, spec, true) + } + + pub fn validate_attestation_without_signature( + &self, + attestation: &Attestation, + spec: &ChainSpec, + ) -> Result<(), AttestationValidationError> { + self.validate_attestation_signature_optional(attestation, spec, false) + } + + fn validate_attestation_signature_optional( + &self, + attestation: &Attestation, + spec: &ChainSpec, + verify_signature: bool, + ) -> Result<(), AttestationValidationError> { + ensure!( + attestation.data.slot + spec.min_attestation_inclusion_delay <= self.slot, + AttestationValidationError::IncludedTooEarly + ); + ensure!( + attestation.data.slot + spec.epoch_length >= self.slot, + AttestationValidationError::IncludedTooLate + ); + if attestation.data.slot >= self.current_epoch_start_slot(spec) { + ensure!( + attestation.data.justified_slot == self.justified_slot, + AttestationValidationError::WrongJustifiedSlot + ); + } else { + ensure!( + attestation.data.justified_slot == self.previous_justified_slot, + AttestationValidationError::WrongJustifiedSlot + ); + } + ensure!( + attestation.data.justified_block_root + == *self + .get_block_root(attestation.data.justified_slot, &spec) + .ok_or(AttestationValidationError::NoBlockRoot)?, + AttestationValidationError::WrongJustifiedRoot + ); + ensure!( + (attestation.data.latest_crosslink_root + == self.latest_crosslinks[attestation.data.shard as usize].shard_block_root) + || (attestation.data.shard_block_root + == self.latest_crosslinks[attestation.data.shard as usize].shard_block_root), + AttestationValidationError::BadLatestCrosslinkRoot + ); + if verify_signature { + let participants = self.get_attestation_participants( + &attestation.data, + &attestation.aggregation_bitfield, + spec, + )?; + let mut group_public_key = AggregatePublicKey::new(); + for participant in participants { + group_public_key.add( + self.validator_registry[participant as usize] + .pubkey + .as_raw(), + ) + } + ensure!( + bls_verify_aggregate( + &group_public_key, + &attestation.signable_message(PHASE_0_CUSTODY_BIT), + &attestation.aggregate_signature, + get_domain(&self.fork_data, attestation.data.slot, DOMAIN_ATTESTATION) + ), + AttestationValidationError::BadSignature + ); + } + ensure!( + attestation.data.shard_block_root == spec.zero_hash, + AttestationValidationError::ShardBlockRootNotZero + ); + Ok(()) + } +} + +fn merkle_root(_input: &[Hash256]) -> Hash256 { + Hash256::zero() +} + +fn initiate_validator_exit(_state: &BeaconState, _index: u32) { + // TODO: stubbed out. +} + +fn penalize_validator(_state: &BeaconState, _proposer_index: usize) { + // TODO: stubbed out. +} + +fn get_domain(_fork: &Fork, _slot: u64, _domain_type: u64) -> u64 { + // TODO: stubbed out. + 0 +} + +fn bls_verify(pubkey: &PublicKey, message: &[u8], signature: &Signature, _domain: u64) -> bool { + // TODO: add domain + signature.verify(message, pubkey) +} + +impl From for AttestationValidationError { + fn from(e: AttestationParticipantsError) -> AttestationValidationError { + AttestationValidationError::AttestationParticipantsError(e) + } +} + +impl From for WinningRootError { + fn from(e: AttestationParticipantsError) -> WinningRootError { + WinningRootError::AttestationParticipantsError(e) + } +} + +impl From for AttestationParticipantsError { + fn from(e: CommitteesError) -> AttestationParticipantsError { + AttestationParticipantsError::CommitteesError(e) + } +} + +impl From for BlockProcessingError { + fn from(e: AttestationValidationError) -> BlockProcessingError { + BlockProcessingError::InvalidAttestation(e) + } +} + +impl From for BlockProcessingError { + fn from(e: CommitteesError) -> BlockProcessingError { + BlockProcessingError::CommitteesError(e) + } +} + +impl From for BlockProcessingError { + fn from(e: SlotProcessingError) -> BlockProcessingError { + BlockProcessingError::SlotProcessingError(e) + } +} + +impl From for SlotProcessingError { + fn from(e: CommitteesError) -> SlotProcessingError { + SlotProcessingError::CommitteesError(e) + } +} + +impl From for SlotProcessingError { + fn from(e: EpochError) -> SlotProcessingError { + SlotProcessingError::EpochProcessingError(e) + } +} + +impl From for InclusionError { + fn from(e: AttestationParticipantsError) -> InclusionError { + InclusionError::AttestationParticipantsError(e) + } +} + +impl From for EpochError { + fn from(e: InclusionError) -> EpochError { + EpochError::InclusionError(e) + } +} + +impl From for EpochError { + fn from(e: CommitteesError) -> EpochError { + EpochError::CommitteesError(e) + } +} + +impl From for EpochError { + fn from(e: AttestationParticipantsError) -> EpochError { + EpochError::AttestationParticipantsError(e) + } +} + +impl From for Error { + fn from(e: CommitteesError) -> Error { + Error::CommitteesError(e) } } @@ -82,8 +1693,8 @@ impl Encodable for BeaconState { s.append(&self.current_epoch_start_shard); s.append(&self.previous_epoch_calculation_slot); s.append(&self.current_epoch_calculation_slot); - s.append(&self.previous_epoch_randao_mix); - s.append(&self.current_epoch_randao_mix); + s.append(&self.previous_epoch_seed); + s.append(&self.current_epoch_seed); s.append(&self.custody_challenges); s.append(&self.previous_justified_slot); s.append(&self.justified_slot); @@ -91,7 +1702,7 @@ impl Encodable for BeaconState { s.append(&self.finalized_slot); s.append(&self.latest_crosslinks); s.append(&self.latest_block_roots); - s.append(&self.latest_penalized_exit_balances); + s.append(&self.latest_penalized_balances); s.append(&self.latest_attestations); s.append(&self.batched_block_roots); s.append(&self.latest_eth1_data); @@ -115,8 +1726,8 @@ impl Decodable for BeaconState { let (current_epoch_start_shard, i) = <_>::ssz_decode(bytes, i)?; let (previous_epoch_calculation_slot, i) = <_>::ssz_decode(bytes, i)?; let (current_epoch_calculation_slot, i) = <_>::ssz_decode(bytes, i)?; - let (previous_epoch_randao_mix, i) = <_>::ssz_decode(bytes, i)?; - let (current_epoch_randao_mix, i) = <_>::ssz_decode(bytes, i)?; + let (previous_epoch_seed, i) = <_>::ssz_decode(bytes, i)?; + let (current_epoch_seed, i) = <_>::ssz_decode(bytes, i)?; let (custody_challenges, i) = <_>::ssz_decode(bytes, i)?; let (previous_justified_slot, i) = <_>::ssz_decode(bytes, i)?; let (justified_slot, i) = <_>::ssz_decode(bytes, i)?; @@ -124,7 +1735,7 @@ impl Decodable for BeaconState { let (finalized_slot, i) = <_>::ssz_decode(bytes, i)?; let (latest_crosslinks, i) = <_>::ssz_decode(bytes, i)?; let (latest_block_roots, i) = <_>::ssz_decode(bytes, i)?; - let (latest_penalized_exit_balances, i) = <_>::ssz_decode(bytes, i)?; + let (latest_penalized_balances, i) = <_>::ssz_decode(bytes, i)?; let (latest_attestations, i) = <_>::ssz_decode(bytes, i)?; let (batched_block_roots, i) = <_>::ssz_decode(bytes, i)?; let (latest_eth1_data, i) = <_>::ssz_decode(bytes, i)?; @@ -146,8 +1757,8 @@ impl Decodable for BeaconState { current_epoch_start_shard, previous_epoch_calculation_slot, current_epoch_calculation_slot, - previous_epoch_randao_mix, - current_epoch_randao_mix, + previous_epoch_seed, + current_epoch_seed, custody_challenges, previous_justified_slot, justified_slot, @@ -155,7 +1766,7 @@ impl Decodable for BeaconState { finalized_slot, latest_crosslinks, latest_block_roots, - latest_penalized_exit_balances, + latest_penalized_balances, latest_attestations, batched_block_roots, latest_eth1_data, @@ -183,8 +1794,8 @@ impl TreeHash for BeaconState { result.append(&mut self.current_epoch_start_shard.hash_tree_root()); result.append(&mut self.previous_epoch_calculation_slot.hash_tree_root()); result.append(&mut self.current_epoch_calculation_slot.hash_tree_root()); - result.append(&mut self.previous_epoch_randao_mix.hash_tree_root()); - result.append(&mut self.current_epoch_randao_mix.hash_tree_root()); + result.append(&mut self.previous_epoch_seed.hash_tree_root()); + result.append(&mut self.current_epoch_seed.hash_tree_root()); result.append(&mut self.custody_challenges.hash_tree_root()); result.append(&mut self.previous_justified_slot.hash_tree_root()); result.append(&mut self.justified_slot.hash_tree_root()); @@ -192,7 +1803,7 @@ impl TreeHash for BeaconState { result.append(&mut self.finalized_slot.hash_tree_root()); result.append(&mut self.latest_crosslinks.hash_tree_root()); result.append(&mut self.latest_block_roots.hash_tree_root()); - result.append(&mut self.latest_penalized_exit_balances.hash_tree_root()); + result.append(&mut self.latest_penalized_balances.hash_tree_root()); result.append(&mut self.latest_attestations.hash_tree_root()); result.append(&mut self.batched_block_roots.hash_tree_root()); result.append(&mut self.latest_eth1_data.hash_tree_root()); @@ -218,8 +1829,8 @@ impl TestRandom for BeaconState { current_epoch_start_shard: <_>::random_for_test(rng), previous_epoch_calculation_slot: <_>::random_for_test(rng), current_epoch_calculation_slot: <_>::random_for_test(rng), - previous_epoch_randao_mix: <_>::random_for_test(rng), - current_epoch_randao_mix: <_>::random_for_test(rng), + previous_epoch_seed: <_>::random_for_test(rng), + current_epoch_seed: <_>::random_for_test(rng), custody_challenges: <_>::random_for_test(rng), previous_justified_slot: <_>::random_for_test(rng), justified_slot: <_>::random_for_test(rng), @@ -227,7 +1838,7 @@ impl TestRandom for BeaconState { finalized_slot: <_>::random_for_test(rng), latest_crosslinks: <_>::random_for_test(rng), latest_block_roots: <_>::random_for_test(rng), - latest_penalized_exit_balances: <_>::random_for_test(rng), + latest_penalized_balances: <_>::random_for_test(rng), latest_attestations: <_>::random_for_test(rng), batched_block_roots: <_>::random_for_test(rng), latest_eth1_data: <_>::random_for_test(rng), @@ -238,9 +1849,9 @@ impl TestRandom for BeaconState { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/casper_slashing.rs b/eth2/types/src/casper_slashing.rs index 62bad76dd..0eab069b4 100644 --- a/eth2/types/src/casper_slashing.rs +++ b/eth2/types/src/casper_slashing.rs @@ -1,9 +1,10 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use super::SlashableVoteData; use crate::test_utils::TestRandom; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Serialize)] pub struct CasperSlashing { pub slashable_vote_data_1: SlashableVoteData, pub slashable_vote_data_2: SlashableVoteData, @@ -51,9 +52,9 @@ impl TestRandom for CasperSlashing { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/crosslink.rs b/eth2/types/src/crosslink.rs index dfb02993d..f727d7b43 100644 --- a/eth2/types/src/crosslink.rs +++ b/eth2/types/src/crosslink.rs @@ -1,9 +1,10 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use super::Hash256; use crate::test_utils::TestRandom; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Serialize)] pub struct Crosslink { pub slot: u64, pub shard_block_root: Hash256, @@ -61,9 +62,9 @@ impl TestRandom for Crosslink { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/deposit.rs b/eth2/types/src/deposit.rs index d2f112ec8..85b002101 100644 --- a/eth2/types/src/deposit.rs +++ b/eth2/types/src/deposit.rs @@ -1,9 +1,10 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use super::{DepositData, Hash256}; use crate::test_utils::TestRandom; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Serialize)] pub struct Deposit { pub merkle_branch: Vec, pub merkle_tree_index: u64, @@ -57,9 +58,9 @@ impl TestRandom for Deposit { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/deposit_data.rs b/eth2/types/src/deposit_data.rs index 21ca6cd51..5c8c302f4 100644 --- a/eth2/types/src/deposit_data.rs +++ b/eth2/types/src/deposit_data.rs @@ -1,9 +1,10 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use super::DepositInput; use crate::test_utils::TestRandom; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Serialize)] pub struct DepositData { pub amount: u64, pub timestamp: u64, @@ -57,9 +58,9 @@ impl TestRandom for DepositData { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/deposit_input.rs b/eth2/types/src/deposit_input.rs index 5377926d6..fc53baae9 100644 --- a/eth2/types/src/deposit_input.rs +++ b/eth2/types/src/deposit_input.rs @@ -1,10 +1,11 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use super::Hash256; use crate::test_utils::TestRandom; use bls::{PublicKey, Signature}; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Serialize)] pub struct DepositInput { pub pubkey: PublicKey, pub withdrawal_credentials: Hash256, @@ -58,9 +59,9 @@ impl TestRandom for DepositInput { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/eth1_data.rs b/eth2/types/src/eth1_data.rs index 6d9e67a4a..6e9bb7d26 100644 --- a/eth2/types/src/eth1_data.rs +++ b/eth2/types/src/eth1_data.rs @@ -1,10 +1,11 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use super::Hash256; use crate::test_utils::TestRandom; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; // Note: this is refer to as DepositRootVote in specs -#[derive(Debug, PartialEq, Clone, Default)] +#[derive(Debug, PartialEq, Clone, Default, Serialize)] pub struct Eth1Data { pub deposit_root: Hash256, pub block_hash: Hash256, @@ -52,9 +53,9 @@ impl TestRandom for Eth1Data { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/eth1_data_vote.rs b/eth2/types/src/eth1_data_vote.rs index 09a9a7954..2bfee4d02 100644 --- a/eth2/types/src/eth1_data_vote.rs +++ b/eth2/types/src/eth1_data_vote.rs @@ -1,10 +1,11 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use super::Eth1Data; use crate::test_utils::TestRandom; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; // Note: this is refer to as DepositRootVote in specs -#[derive(Debug, PartialEq, Clone, Default)] +#[derive(Debug, PartialEq, Clone, Default, Serialize)] pub struct Eth1DataVote { pub eth1_data: Eth1Data, pub vote_count: u64, @@ -52,9 +53,9 @@ impl TestRandom for Eth1DataVote { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/exit.rs b/eth2/types/src/exit.rs index f70d26e41..97f1fd286 100644 --- a/eth2/types/src/exit.rs +++ b/eth2/types/src/exit.rs @@ -1,9 +1,10 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use crate::test_utils::TestRandom; use bls::Signature; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Serialize)] pub struct Exit { pub slot: u64, pub validator_index: u32, @@ -57,9 +58,9 @@ impl TestRandom for Exit { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/fork.rs b/eth2/types/src/fork.rs index 7915299a1..c5a06caea 100644 --- a/eth2/types/src/fork.rs +++ b/eth2/types/src/fork.rs @@ -1,8 +1,9 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use crate::test_utils::TestRandom; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -#[derive(Debug, Clone, PartialEq, Default)] +#[derive(Debug, Clone, PartialEq, Default, Serialize)] pub struct Fork { pub pre_fork_version: u64, pub post_fork_version: u64, @@ -56,9 +57,9 @@ impl TestRandom for Fork { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/free_attestation.rs b/eth2/types/src/free_attestation.rs new file mode 100644 index 000000000..16d4f6728 --- /dev/null +++ b/eth2/types/src/free_attestation.rs @@ -0,0 +1,12 @@ +/// Note: this object does not actually exist in the spec. +/// +/// We use it for managing attestations that have not been aggregated. +use super::{AttestationData, Signature}; +use serde_derive::Serialize; + +#[derive(Debug, Clone, PartialEq, Serialize)] +pub struct FreeAttestation { + pub data: AttestationData, + pub signature: Signature, + pub validator_index: u64, +} diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 1018e38af..79dd18255 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -1,12 +1,8 @@ -extern crate bls; -extern crate boolean_bitfield; -extern crate ethereum_types; -extern crate ssz; - pub mod test_utils; pub mod attestation; pub mod attestation_data; +pub mod attestation_data_and_custody_bit; pub mod beacon_block; pub mod beacon_block_body; pub mod beacon_state; @@ -19,6 +15,7 @@ pub mod eth1_data; pub mod eth1_data_vote; pub mod exit; pub mod fork; +pub mod free_attestation; pub mod pending_attestation; pub mod proposal_signed_data; pub mod proposer_slashing; @@ -33,11 +30,12 @@ pub mod validator_registry_delta_block; pub mod readers; -use self::ethereum_types::{H160, H256, U256}; +use ethereum_types::{H160, H256, U256}; use std::collections::HashMap; pub use crate::attestation::Attestation; pub use crate::attestation_data::AttestationData; +pub use crate::attestation_data_and_custody_bit::AttestationDataAndCustodyBit; pub use crate::beacon_block::BeaconBlock; pub use crate::beacon_block_body::BeaconBlockBody; pub use crate::beacon_state::BeaconState; @@ -50,6 +48,7 @@ pub use crate::eth1_data::Eth1Data; pub use crate::eth1_data_vote::Eth1DataVote; pub use crate::exit::Exit; pub use crate::fork::Fork; +pub use crate::free_attestation::FreeAttestation; pub use crate::pending_attestation::PendingAttestation; pub use crate::proposal_signed_data::ProposalSignedData; pub use crate::proposer_slashing::ProposerSlashing; @@ -72,4 +71,4 @@ pub type AttesterMap = HashMap<(u64, u64), Vec>; /// Maps a slot to a block proposer. pub type ProposerMap = HashMap; -pub use bls::{AggregatePublicKey, AggregateSignature, PublicKey, Signature}; +pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, Signature}; diff --git a/eth2/types/src/pending_attestation.rs b/eth2/types/src/pending_attestation.rs index 89ac7b07c..d2af52826 100644 --- a/eth2/types/src/pending_attestation.rs +++ b/eth2/types/src/pending_attestation.rs @@ -1,9 +1,10 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use super::{AttestationData, Bitfield}; use crate::test_utils::TestRandom; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub struct PendingAttestation { pub data: AttestationData, pub aggregation_bitfield: Bitfield, @@ -63,9 +64,9 @@ impl TestRandom for PendingAttestation { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/proposal_signed_data.rs b/eth2/types/src/proposal_signed_data.rs index 41f0cda81..829a16987 100644 --- a/eth2/types/src/proposal_signed_data.rs +++ b/eth2/types/src/proposal_signed_data.rs @@ -1,9 +1,10 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use super::Hash256; use crate::test_utils::TestRandom; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -#[derive(Debug, PartialEq, Clone, Default)] +#[derive(Debug, PartialEq, Clone, Default, Serialize)] pub struct ProposalSignedData { pub slot: u64, pub shard: u64, @@ -57,9 +58,9 @@ impl TestRandom for ProposalSignedData { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/proposer_slashing.rs b/eth2/types/src/proposer_slashing.rs index 8d9a843e2..a82a37074 100644 --- a/eth2/types/src/proposer_slashing.rs +++ b/eth2/types/src/proposer_slashing.rs @@ -1,10 +1,11 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use super::ProposalSignedData; use crate::test_utils::TestRandom; use bls::Signature; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Serialize)] pub struct ProposerSlashing { pub proposer_index: u32, pub proposal_data_1: ProposalSignedData, @@ -70,9 +71,9 @@ impl TestRandom for ProposerSlashing { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/readers/block_reader.rs b/eth2/types/src/readers/block_reader.rs index 91a2852ac..d87bc5caf 100644 --- a/eth2/types/src/readers/block_reader.rs +++ b/eth2/types/src/readers/block_reader.rs @@ -1,3 +1,4 @@ +use super::state_reader::BeaconStateReader; use crate::{BeaconBlock, Hash256}; use std::fmt::Debug; diff --git a/eth2/types/src/shard_committee.rs b/eth2/types/src/shard_committee.rs index 3e7202b2a..3632cb0c1 100644 --- a/eth2/types/src/shard_committee.rs +++ b/eth2/types/src/shard_committee.rs @@ -1,8 +1,9 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use crate::test_utils::TestRandom; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Serialize)] pub struct ShardCommittee { pub shard: u64, pub committee: Vec, @@ -44,9 +45,9 @@ impl TestRandom for ShardCommittee { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/shard_reassignment_record.rs b/eth2/types/src/shard_reassignment_record.rs index 8f48692fc..a239233df 100644 --- a/eth2/types/src/shard_reassignment_record.rs +++ b/eth2/types/src/shard_reassignment_record.rs @@ -1,8 +1,9 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use crate::test_utils::TestRandom; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Serialize)] pub struct ShardReassignmentRecord { pub validator_index: u64, pub shard: u64, @@ -56,9 +57,9 @@ impl TestRandom for ShardReassignmentRecord { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/slashable_vote_data.rs b/eth2/types/src/slashable_vote_data.rs index 3f088034f..acffca26d 100644 --- a/eth2/types/src/slashable_vote_data.rs +++ b/eth2/types/src/slashable_vote_data.rs @@ -1,10 +1,11 @@ -use super::ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use super::AttestationData; use crate::test_utils::TestRandom; use bls::AggregateSignature; use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Serialize)] pub struct SlashableVoteData { pub custody_bit_0_indices: Vec, pub custody_bit_1_indices: Vec, @@ -64,9 +65,9 @@ impl TestRandom for SlashableVoteData { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/spec/mod.rs b/eth2/types/src/spec/mod.rs index 1bf5b3d50..077b6bef5 100644 --- a/eth2/types/src/spec/mod.rs +++ b/eth2/types/src/spec/mod.rs @@ -3,7 +3,7 @@ mod foundation; use crate::{Address, Eth1Data, Hash256, Validator}; use bls::Signature; -#[derive(PartialEq, Debug)] +#[derive(PartialEq, Debug, Clone)] pub struct ChainSpec { /* * Misc diff --git a/eth2/types/src/special_record.rs b/eth2/types/src/special_record.rs index 21e9c88f6..2ab6f2b5b 100644 --- a/eth2/types/src/special_record.rs +++ b/eth2/types/src/special_record.rs @@ -1,9 +1,10 @@ +use serde_derive::Serialize; use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; /// The value of the "type" field of SpecialRecord. /// /// Note: this value must serialize to a u8 and therefore must not be greater than 255. -#[derive(Debug, PartialEq, Clone, Copy)] +#[derive(Debug, PartialEq, Clone, Copy, Serialize)] pub enum SpecialRecordKind { Logout = 0, CasperSlashing = 1, diff --git a/eth2/types/src/validator.rs b/eth2/types/src/validator.rs index 256be68df..5c7d0ad30 100644 --- a/eth2/types/src/validator.rs +++ b/eth2/types/src/validator.rs @@ -1,13 +1,13 @@ -use super::bls::PublicKey; use super::Hash256; -use crate::test_utils::TestRandom; +use crate::{test_utils::TestRandom, PublicKey}; use rand::RngCore; +use serde_derive::Serialize; use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; const STATUS_FLAG_INITIATED_EXIT: u8 = 1; const STATUS_FLAG_WITHDRAWABLE: u8 = 2; -#[derive(Debug, PartialEq, Clone, Copy)] +#[derive(Debug, PartialEq, Clone, Copy, Serialize)] pub enum StatusFlags { InitiatedExit, Withdrawable, @@ -43,7 +43,7 @@ fn status_flag_from_byte(flag: u8) -> Result, StatusFlagsDec } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub struct Validator { pub pubkey: PublicKey, pub withdrawal_credentials: Hash256, @@ -180,9 +180,9 @@ impl TestRandom for Validator { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/validator_registry_delta_block.rs b/eth2/types/src/validator_registry_delta_block.rs index c8bf41c94..196da754d 100644 --- a/eth2/types/src/validator_registry_delta_block.rs +++ b/eth2/types/src/validator_registry_delta_block.rs @@ -2,10 +2,11 @@ use super::Hash256; use crate::test_utils::TestRandom; use bls::PublicKey; use rand::RngCore; +use serde_derive::Serialize; use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; // The information gathered from the PoW chain validator registration function. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub struct ValidatorRegistryDeltaBlock { pub latest_registry_delta_root: Hash256, pub validator_index: u32, @@ -84,9 +85,9 @@ impl TestRandom for ValidatorRegistryDeltaBlock { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/utils/bls/Cargo.toml b/eth2/utils/bls/Cargo.toml index 5148cb7f2..465510c59 100644 --- a/eth2/utils/bls/Cargo.toml +++ b/eth2/utils/bls/Cargo.toml @@ -8,4 +8,5 @@ edition = "2018" bls-aggregates = { git = "https://github.com/sigp/signature-schemes", tag = "v0.3.0" } hashing = { path = "../hashing" } hex = "0.3" +serde = "1.0" ssz = { path = "../ssz" } diff --git a/eth2/utils/bls/src/aggregate_signature.rs b/eth2/utils/bls/src/aggregate_signature.rs index 6bccf128e..6fed183f0 100644 --- a/eth2/utils/bls/src/aggregate_signature.rs +++ b/eth2/utils/bls/src/aggregate_signature.rs @@ -1,6 +1,9 @@ -use super::ssz::{decode_ssz_list, hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use super::{AggregatePublicKey, Signature}; use bls_aggregates::AggregateSignature as RawAggregateSignature; +use serde::ser::{Serialize, Serializer}; +use ssz::{ + decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash, +}; /// A BLS aggregate signature. /// @@ -44,6 +47,15 @@ impl Decodable for AggregateSignature { } } +impl Serialize for AggregateSignature { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_bytes(&ssz_encode(self)) + } +} + impl TreeHash for AggregateSignature { fn hash_tree_root(&self) -> Vec { hash(&self.0.as_bytes()) @@ -52,9 +64,9 @@ impl TreeHash for AggregateSignature { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::super::{Keypair, Signature}; use super::*; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/utils/bls/src/lib.rs b/eth2/utils/bls/src/lib.rs index d7a3ff15d..646047d18 100644 --- a/eth2/utils/bls/src/lib.rs +++ b/eth2/utils/bls/src/lib.rs @@ -18,7 +18,7 @@ pub use self::bls_aggregates::AggregatePublicKey; pub const BLS_AGG_SIG_BYTE_SIZE: usize = 97; -use hashing::canonical_hash; +use hashing::hash; use ssz::ssz_encode; use std::default::Default; @@ -30,13 +30,23 @@ fn extend_if_needed(hash: &mut Vec) { /// For some signature and public key, ensure that the signature message was the public key and it /// was signed by the secret key that corresponds to that public key. pub fn verify_proof_of_possession(sig: &Signature, pubkey: &PublicKey) -> bool { - let mut hash = canonical_hash(&ssz_encode(pubkey)); + let mut hash = hash(&ssz_encode(pubkey)); extend_if_needed(&mut hash); sig.verify_hashed(&hash, &pubkey) } pub fn create_proof_of_possession(keypair: &Keypair) -> Signature { - let mut hash = canonical_hash(&ssz_encode(&keypair.pk)); + let mut hash = hash(&ssz_encode(&keypair.pk)); extend_if_needed(&mut hash); Signature::new_hashed(&hash, &keypair.sk) } + +pub fn bls_verify_aggregate( + pubkey: &AggregatePublicKey, + message: &[u8], + signature: &AggregateSignature, + _domain: u64, +) -> bool { + // TODO: add domain + signature.verify(message, pubkey) +} diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index d0635e6f5..0c2ad81bb 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -1,6 +1,7 @@ use super::SecretKey; use bls_aggregates::PublicKey as RawPublicKey; use hex::encode as hex_encode; +use serde::ser::{Serialize, Serializer}; use ssz::{ decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash, }; @@ -55,6 +56,15 @@ impl Decodable for PublicKey { } } +impl Serialize for PublicKey { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_bytes(&ssz_encode(self)) + } +} + impl TreeHash for PublicKey { fn hash_tree_root(&self) -> Vec { hash(&self.0.as_bytes()) @@ -75,8 +85,8 @@ impl Hash for PublicKey { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/utils/bls/src/secret_key.rs b/eth2/utils/bls/src/secret_key.rs index 3327d94d6..4ff9f8684 100644 --- a/eth2/utils/bls/src/secret_key.rs +++ b/eth2/utils/bls/src/secret_key.rs @@ -48,8 +48,8 @@ impl TreeHash for SecretKey { #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::*; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index 9e4945bb7..396e4eab7 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -1,6 +1,9 @@ -use super::ssz::{decode_ssz_list, hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use super::{PublicKey, SecretKey}; use bls_aggregates::Signature as RawSignature; +use serde::ser::{Serialize, Serializer}; +use ssz::{ + decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash, +}; /// A single BLS signature. /// @@ -63,11 +66,20 @@ impl TreeHash for Signature { } } +impl Serialize for Signature { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_bytes(&ssz_encode(self)) + } +} + #[cfg(test)] mod tests { - use super::super::ssz::ssz_encode; use super::super::Keypair; use super::*; + use ssz::ssz_encode; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/utils/boolean-bitfield/Cargo.toml b/eth2/utils/boolean-bitfield/Cargo.toml index b3d05f979..d94b9f7b1 100644 --- a/eth2/utils/boolean-bitfield/Cargo.toml +++ b/eth2/utils/boolean-bitfield/Cargo.toml @@ -7,3 +7,5 @@ edition = "2018" [dependencies] ssz = { path = "../ssz" } bit-vec = "0.5.0" +serde = "1.0" +serde_derive = "1.0" diff --git a/eth2/utils/boolean-bitfield/src/lib.rs b/eth2/utils/boolean-bitfield/src/lib.rs index 33c461361..16992c3fa 100644 --- a/eth2/utils/boolean-bitfield/src/lib.rs +++ b/eth2/utils/boolean-bitfield/src/lib.rs @@ -3,6 +3,7 @@ extern crate ssz; use bit_vec::BitVec; +use serde::ser::{Serialize, Serializer}; use std::cmp; use std::default; @@ -113,6 +114,28 @@ impl cmp::PartialEq for BooleanBitfield { } } +/// Create a new bitfield that is a union of two other bitfields. +/// +/// For example `union(0101, 1000) == 1101` +impl std::ops::BitAnd for BooleanBitfield { + type Output = Self; + + fn bitand(self, other: Self) -> Self { + let (biggest, smallest) = if self.len() > other.len() { + (&self, &other) + } else { + (&other, &self) + }; + let mut new = biggest.clone(); + for i in 0..smallest.len() { + if let Ok(true) = smallest.get(i) { + new.set(i, true); + } + } + new + } +} + impl ssz::Encodable for BooleanBitfield { // ssz_append encodes Self according to the `ssz` spec. fn ssz_append(&self, s: &mut ssz::SszStream) { @@ -149,6 +172,15 @@ impl ssz::Decodable for BooleanBitfield { } } +impl Serialize for BooleanBitfield { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_bytes(&ssz::ssz_encode(self)) + } +} + impl ssz::TreeHash for BooleanBitfield { fn hash_tree_root(&self) -> Vec { self.to_bytes().hash_tree_root() @@ -365,4 +397,12 @@ mod tests { let (decoded, _) = BooleanBitfield::ssz_decode(&ssz, 0).unwrap(); assert_eq!(original, decoded); } + + #[test] + fn test_bitand() { + let a = BooleanBitfield::from_bytes(&vec![2, 8, 1][..]); + let b = BooleanBitfield::from_bytes(&vec![4, 8, 16][..]); + let c = BooleanBitfield::from_bytes(&vec![6, 8, 17][..]); + assert_eq!(c, a & b); + } } diff --git a/eth2/utils/hashing/src/lib.rs b/eth2/utils/hashing/src/lib.rs index 02203dc16..b2bd5a279 100644 --- a/eth2/utils/hashing/src/lib.rs +++ b/eth2/utils/hashing/src/lib.rs @@ -1,8 +1,6 @@ -extern crate tiny_keccak; - use tiny_keccak::Keccak; -pub fn canonical_hash(input: &[u8]) -> Vec { +pub fn hash(input: &[u8]) -> Vec { let mut keccak = Keccak::new_keccak256(); keccak.update(input); let mut result = vec![0; 32]; @@ -19,7 +17,7 @@ mod tests { fn test_hashing() { let input: Vec = From::from("hello"); - let output = canonical_hash(input.as_ref()); + let output = hash(input.as_ref()); let expected = &[ 0x1c, 0x8a, 0xff, 0x95, 0x06, 0x85, 0xc2, 0xed, 0x4b, 0xc3, 0x17, 0x4f, 0x34, 0x72, 0x28, 0x7b, 0x56, 0xd9, 0x51, 0x7b, 0x9c, 0x94, 0x81, 0x27, 0x31, 0x9a, 0x09, 0xa7, diff --git a/eth2/utils/slot_clock/src/system_time_slot_clock.rs b/eth2/utils/slot_clock/src/system_time_slot_clock.rs index 8c1e2f66c..ba62b3b93 100644 --- a/eth2/utils/slot_clock/src/system_time_slot_clock.rs +++ b/eth2/utils/slot_clock/src/system_time_slot_clock.rs @@ -3,13 +3,14 @@ use std::time::{Duration, SystemTime}; pub use std::time::SystemTimeError; -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub enum Error { SlotDurationIsZero, - SystemTimeError(SystemTimeError), + SystemTimeError(String), } /// Determines the present slot based upon the present system time. +#[derive(Clone)] pub struct SystemTimeSlotClock { genesis_seconds: u64, slot_duration_seconds: u64, @@ -51,7 +52,7 @@ impl SlotClock for SystemTimeSlotClock { impl From for Error { fn from(e: SystemTimeError) -> Error { - Error::SystemTimeError(e) + Error::SystemTimeError(format!("{:?}", e)) } } diff --git a/eth2/utils/slot_clock/src/testing_slot_clock.rs b/eth2/utils/slot_clock/src/testing_slot_clock.rs index de1d7ddb3..330d47f1a 100644 --- a/eth2/utils/slot_clock/src/testing_slot_clock.rs +++ b/eth2/utils/slot_clock/src/testing_slot_clock.rs @@ -1,11 +1,12 @@ use super::SlotClock; +use std::sync::RwLock; #[derive(Debug, PartialEq)] pub enum Error {} /// Determines the present slot based upon the present system time. pub struct TestingSlotClock { - slot: u64, + slot: RwLock, } impl TestingSlotClock { @@ -13,11 +14,13 @@ impl TestingSlotClock { /// /// Returns an Error if `slot_duration_seconds == 0`. pub fn new(slot: u64) -> TestingSlotClock { - TestingSlotClock { slot } + TestingSlotClock { + slot: RwLock::new(slot), + } } - pub fn set_slot(&mut self, slot: u64) { - self.slot = slot; + pub fn set_slot(&self, slot: u64) { + *self.slot.write().expect("TestingSlotClock poisoned.") = slot; } } @@ -25,7 +28,8 @@ impl SlotClock for TestingSlotClock { type Error = Error; fn present_slot(&self) -> Result, Error> { - Ok(Some(self.slot)) + let slot = *self.slot.read().expect("TestingSlotClock poisoned."); + Ok(Some(slot)) } } @@ -35,7 +39,7 @@ mod tests { #[test] fn test_slot_now() { - let mut clock = TestingSlotClock::new(10); + let clock = TestingSlotClock::new(10); assert_eq!(clock.present_slot(), Ok(Some(10))); clock.set_slot(123); assert_eq!(clock.present_slot(), Ok(Some(123))); diff --git a/eth2/utils/ssz/src/impl_tree_hash.rs b/eth2/utils/ssz/src/impl_tree_hash.rs index 9463283cb..578977eec 100644 --- a/eth2/utils/ssz/src/impl_tree_hash.rs +++ b/eth2/utils/ssz/src/impl_tree_hash.rs @@ -1,5 +1,6 @@ use super::ethereum_types::{Address, H256}; -use super::{hash, merkle_hash, ssz_encode, TreeHash}; +use super::{merkle_hash, ssz_encode, TreeHash}; +use hashing::hash; impl TreeHash for u8 { fn hash_tree_root(&self) -> Vec { diff --git a/eth2/utils/ssz/src/lib.rs b/eth2/utils/ssz/src/lib.rs index 206040c2d..a6baa35a7 100644 --- a/eth2/utils/ssz/src/lib.rs +++ b/eth2/utils/ssz/src/lib.rs @@ -20,7 +20,9 @@ mod impl_tree_hash; pub use crate::decode::{decode_ssz, decode_ssz_list, Decodable, DecodeError}; pub use crate::encode::{Encodable, SszStream}; -pub use crate::tree_hash::{hash, merkle_hash, TreeHash}; +pub use crate::tree_hash::{merkle_hash, TreeHash}; + +pub use hashing::hash; pub const LENGTH_BYTES: usize = 4; pub const MAX_LIST_SIZE: usize = 1 << (4 * 8); diff --git a/eth2/utils/ssz/src/tree_hash.rs b/eth2/utils/ssz/src/tree_hash.rs index 0ec280a6f..a9ab0f467 100644 --- a/eth2/utils/ssz/src/tree_hash.rs +++ b/eth2/utils/ssz/src/tree_hash.rs @@ -1,4 +1,4 @@ -use hashing::canonical_hash; +use hashing::hash; const SSZ_CHUNK_SIZE: usize = 128; const HASHSIZE: usize = 32; @@ -65,10 +65,6 @@ fn list_to_blob(list: &mut Vec>) -> (usize, Vec) { (chunk_size, data) } -pub fn hash(data: &[u8]) -> Vec { - canonical_hash(data) -} - #[cfg(test)] mod tests { use super::*; diff --git a/eth2/utils/vec_shuffle/src/lib.rs b/eth2/utils/vec_shuffle/src/lib.rs index f5c2b7ebd..78bb8aa10 100644 --- a/eth2/utils/vec_shuffle/src/lib.rs +++ b/eth2/utils/vec_shuffle/src/lib.rs @@ -45,7 +45,7 @@ mod tests { use std::{fs::File, io::prelude::*, path::PathBuf}; - use super::{hashing::canonical_hash, *}; + use super::{hashing::hash, *}; #[test] fn test_shuffling() { @@ -70,7 +70,7 @@ mod tests { let seed_bytes = test_case["seed"].as_str().unwrap().as_bytes(); let seed = if seed_bytes.len() > 0 { - canonical_hash(seed_bytes) + hash(seed_bytes) } else { vec![] }; diff --git a/eth2/utils/vec_shuffle/src/rng.rs b/eth2/utils/vec_shuffle/src/rng.rs index e338647de..7a4a785ff 100644 --- a/eth2/utils/vec_shuffle/src/rng.rs +++ b/eth2/utils/vec_shuffle/src/rng.rs @@ -1,4 +1,4 @@ -use super::hashing::canonical_hash; +use super::hashing::hash; const SEED_SIZE_BYTES: usize = 32; const RAND_BYTES: usize = 3; // 24 / 8 @@ -16,7 +16,7 @@ impl ShuffleRng { /// Create a new instance given some "seed" bytes. pub fn new(initial_seed: &[u8]) -> Self { Self { - seed: canonical_hash(initial_seed), + seed: hash(initial_seed), idx: 0, rand_max: RAND_MAX, } @@ -24,7 +24,7 @@ impl ShuffleRng { /// "Regenerates" the seed by hashing it. fn rehash_seed(&mut self) { - self.seed = canonical_hash(&self.seed); + self.seed = hash(&self.seed); self.idx = 0; } diff --git a/eth2/validator_shuffling/Cargo.toml b/eth2/validator_shuffling/Cargo.toml deleted file mode 100644 index ae2babf1a..000000000 --- a/eth2/validator_shuffling/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "validator_shuffling" -version = "0.1.0" -authors = ["Paul Hauner "] -edition = "2018" - -[dependencies] -honey-badger-split = { path = "../utils/honey-badger-split" } -types = { path = "../types" } -vec_shuffle = { path = "../utils/vec_shuffle" } diff --git a/eth2/validator_shuffling/src/lib.rs b/eth2/validator_shuffling/src/lib.rs deleted file mode 100644 index 2307dd301..000000000 --- a/eth2/validator_shuffling/src/lib.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod shuffle; - -pub use crate::shuffle::{shard_and_committees_for_cycle, ValidatorAssignmentError}; diff --git a/eth2/validator_shuffling/src/shuffle.rs b/eth2/validator_shuffling/src/shuffle.rs deleted file mode 100644 index ea2cba7f4..000000000 --- a/eth2/validator_shuffling/src/shuffle.rs +++ /dev/null @@ -1,278 +0,0 @@ -use std::cmp::min; - -use honey_badger_split::SplitExt; -use types::{validator_registry::get_active_validator_indices, ChainSpec}; -use types::{ShardCommittee, Validator}; -use vec_shuffle::{shuffle, ShuffleErr}; - -type DelegatedCycle = Vec>; - -#[derive(Debug, PartialEq)] -pub enum ValidatorAssignmentError { - TooManyValidators, - TooFewShards, -} - -/// Delegates active validators into slots for a given cycle, given a random seed. -/// Returns a vector or ShardAndComitte vectors representing the shards and committiees for -/// each slot. -/// References get_new_shuffling (ethereum 2.1 specification) -pub fn shard_and_committees_for_cycle( - seed: &[u8], - validators: &[Validator], - crosslinking_shard_start: u16, - spec: &ChainSpec, -) -> Result { - let shuffled_validator_indices = { - let validator_indices = get_active_validator_indices(validators, 0); - shuffle(seed, validator_indices)? - }; - let shard_indices: Vec = (0_usize..spec.shard_count as usize).into_iter().collect(); - let crosslinking_shard_start = crosslinking_shard_start as usize; - let epoch_length = spec.epoch_length as usize; - let min_committee_size = spec.target_committee_size as usize; - generate_cycle( - &shuffled_validator_indices, - &shard_indices, - crosslinking_shard_start, - epoch_length, - min_committee_size, - ) -} - -/// Given the validator list, delegates the validators into slots and comittees for a given cycle. -fn generate_cycle( - validator_indices: &[usize], - shard_indices: &[usize], - crosslinking_shard_start: usize, - epoch_length: usize, - min_committee_size: usize, -) -> Result { - let validator_count = validator_indices.len(); - let shard_count = shard_indices.len(); - - if shard_count / epoch_length == 0 { - return Err(ValidatorAssignmentError::TooFewShards); - } - - let (committees_per_slot, slots_per_committee) = { - if validator_count >= epoch_length * min_committee_size { - let committees_per_slot = min( - validator_count / epoch_length / (min_committee_size * 2) + 1, - shard_count / epoch_length, - ); - let slots_per_committee = 1; - (committees_per_slot, slots_per_committee) - } else { - let committees_per_slot = 1; - let mut slots_per_committee = 1; - while (validator_count * slots_per_committee < epoch_length * min_committee_size) - & (slots_per_committee < epoch_length) - { - slots_per_committee *= 2; - } - (committees_per_slot, slots_per_committee) - } - }; - - let cycle = validator_indices - .honey_badger_split(epoch_length) - .enumerate() - .map(|(i, slot_indices)| { - let shard_start = - crosslinking_shard_start + i * committees_per_slot / slots_per_committee; - slot_indices - .honey_badger_split(committees_per_slot) - .enumerate() - .map(|(j, shard_indices)| ShardCommittee { - shard: ((shard_start + j) % shard_count) as u64, - committee: shard_indices.to_vec(), - }) - .collect() - }) - .collect(); - Ok(cycle) -} - -impl From for ValidatorAssignmentError { - fn from(e: ShuffleErr) -> ValidatorAssignmentError { - match e { - ShuffleErr::ExceedsListLength => ValidatorAssignmentError::TooManyValidators, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn generate_cycle_helper( - validator_count: &usize, - shard_count: &usize, - crosslinking_shard_start: usize, - epoch_length: usize, - min_committee_size: usize, - ) -> ( - Vec, - Vec, - Result, - ) { - let validator_indices: Vec = (0_usize..*validator_count).into_iter().collect(); - let shard_indices: Vec = (0_usize..*shard_count).into_iter().collect(); - let result = generate_cycle( - &validator_indices, - &shard_indices, - crosslinking_shard_start, - epoch_length, - min_committee_size, - ); - (validator_indices, shard_indices, result) - } - - #[allow(dead_code)] - fn print_cycle(cycle: &DelegatedCycle) { - cycle.iter().enumerate().for_each(|(i, slot)| { - println!("slot {:?}", &i); - slot.iter().enumerate().for_each(|(i, sac)| { - println!( - "#{:?}\tshard={}\tcommittee.len()={}", - &i, - &sac.shard, - &sac.committee.len() - ) - }) - }); - } - - fn flatten_validators(cycle: &DelegatedCycle) -> Vec { - let mut flattened = vec![]; - for slot in cycle.iter() { - for sac in slot.iter() { - for validator in sac.committee.iter() { - flattened.push(*validator); - } - } - } - flattened - } - - fn flatten_and_dedup_shards(cycle: &DelegatedCycle) -> Vec { - let mut flattened = vec![]; - for slot in cycle.iter() { - for sac in slot.iter() { - flattened.push(sac.shard as usize); - } - } - flattened.dedup(); - flattened - } - - fn flatten_shards_in_slots(cycle: &DelegatedCycle) -> Vec> { - let mut shards_in_slots: Vec> = vec![]; - for slot in cycle.iter() { - let mut shards: Vec = vec![]; - for sac in slot.iter() { - shards.push(sac.shard as usize); - } - shards_in_slots.push(shards); - } - shards_in_slots - } - - // TODO: Improve these tests to check committee lengths - #[test] - fn test_generate_cycle() { - let validator_count: usize = 100; - let shard_count: usize = 20; - let crosslinking_shard_start: usize = 0; - let epoch_length: usize = 20; - let min_committee_size: usize = 10; - let (validators, shards, result) = generate_cycle_helper( - &validator_count, - &shard_count, - crosslinking_shard_start, - epoch_length, - min_committee_size, - ); - let cycle = result.unwrap(); - - let assigned_validators = flatten_validators(&cycle); - let assigned_shards = flatten_and_dedup_shards(&cycle); - let shards_in_slots = flatten_shards_in_slots(&cycle); - let expected_shards = shards.get(0..10).unwrap(); - assert_eq!( - assigned_validators, validators, - "Validator assignment incorrect" - ); - assert_eq!( - assigned_shards, expected_shards, - "Shard assignment incorrect" - ); - - let expected_shards_in_slots: Vec> = vec![ - vec![0], - vec![0], // Each line is 2 slots.. - vec![1], - vec![1], - vec![2], - vec![2], - vec![3], - vec![3], - vec![4], - vec![4], - vec![5], - vec![5], - vec![6], - vec![6], - vec![7], - vec![7], - vec![8], - vec![8], - vec![9], - vec![9], - ]; - // assert!(compare_shards_in_slots(&cycle, &expected_shards_in_slots)); - assert_eq!( - expected_shards_in_slots, shards_in_slots, - "Shard assignment incorrect." - ) - } - - #[test] - // Check that the committees per slot is upper bounded by shard count - fn test_generate_cycle_committees_bounded() { - let validator_count: usize = 523; - let shard_count: usize = 31; - let crosslinking_shard_start: usize = 0; - let epoch_length: usize = 11; - let min_committee_size: usize = 5; - let (validators, shards, result) = generate_cycle_helper( - &validator_count, - &shard_count, - crosslinking_shard_start, - epoch_length, - min_committee_size, - ); - let cycle = result.unwrap(); - let assigned_validators = flatten_validators(&cycle); - let assigned_shards = flatten_and_dedup_shards(&cycle); - let shards_in_slots = flatten_shards_in_slots(&cycle); - let expected_shards = shards.get(0..22).unwrap(); - let expected_shards_in_slots: Vec> = (0_usize..11_usize) - .map(|x| vec![2 * x, 2 * x + 1]) - .collect(); - assert_eq!( - assigned_validators, validators, - "Validator assignment incorrect" - ); - assert_eq!( - assigned_shards, expected_shards, - "Shard assignment incorrect" - ); - // assert!(compare_shards_in_slots(&cycle, &expected_shards_in_slots)); - assert_eq!( - expected_shards_in_slots, shards_in_slots, - "Shard assignment incorrect." - ) - } -} diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index dfec240c4..8ab515e15 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] +block_producer = { path = "../eth2/block_producer" } bls = { path = "../eth2/utils/bls" } clap = "2.32.0" dirs = "1.0.3" diff --git a/validator_client/src/block_producer/test_node.rs b/validator_client/src/block_producer/test_node.rs deleted file mode 100644 index e99613e8f..000000000 --- a/validator_client/src/block_producer/test_node.rs +++ /dev/null @@ -1,47 +0,0 @@ -use super::traits::{BeaconNode, BeaconNodeError}; -use std::sync::RwLock; -use types::BeaconBlock; - -type ProduceResult = Result, BeaconNodeError>; -type PublishResult = Result; - -/// A test-only struct used to simulate a Beacon Node. -#[derive(Default)] -pub struct TestBeaconNode { - pub produce_input: RwLock>, - pub produce_result: RwLock>, - pub publish_input: RwLock>, - pub publish_result: RwLock>, -} - -impl TestBeaconNode { - /// Set the result to be returned when `produce_beacon_block` is called. - pub fn set_next_produce_result(&self, result: ProduceResult) { - *self.produce_result.write().unwrap() = Some(result); - } - - /// Set the result to be returned when `publish_beacon_block` is called. - pub fn set_next_publish_result(&self, result: PublishResult) { - *self.publish_result.write().unwrap() = Some(result); - } -} - -impl BeaconNode for TestBeaconNode { - /// Returns the value specified by the `set_next_produce_result`. - fn produce_beacon_block(&self, slot: u64) -> ProduceResult { - *self.produce_input.write().unwrap() = Some(slot); - match *self.produce_result.read().unwrap() { - Some(ref r) => r.clone(), - None => panic!("TestBeaconNode: produce_result == None"), - } - } - - /// Returns the value specified by the `set_next_publish_result`. - fn publish_beacon_block(&self, block: BeaconBlock) -> PublishResult { - *self.publish_input.write().unwrap() = Some(block); - match *self.publish_result.read().unwrap() { - Some(ref r) => r.clone(), - None => panic!("TestBeaconNode: publish_result == None"), - } - } -} diff --git a/validator_client/src/block_producer/traits.rs b/validator_client/src/block_producer/traits.rs deleted file mode 100644 index be1c73bda..000000000 --- a/validator_client/src/block_producer/traits.rs +++ /dev/null @@ -1,19 +0,0 @@ -use types::BeaconBlock; - -#[derive(Debug, PartialEq, Clone)] -pub enum BeaconNodeError { - RemoteFailure(String), - DecodeFailure, -} - -/// Defines the methods required to produce and publish blocks on a Beacon Node. -pub trait BeaconNode: Send + Sync { - /// Request that the node produces a block. - /// - /// Returns Ok(None) if the Beacon Node is unable to produce at the given slot. - fn produce_beacon_block(&self, slot: u64) -> Result, BeaconNodeError>; - /// Request that the node publishes a block. - /// - /// Returns `true` if the publish was sucessful. - fn publish_beacon_block(&self, block: BeaconBlock) -> Result; -} diff --git a/validator_client/src/block_producer/grpc.rs b/validator_client/src/block_producer_service/beacon_block_grpc_client.rs similarity index 69% rename from validator_client/src/block_producer/grpc.rs rename to validator_client/src/block_producer_service/beacon_block_grpc_client.rs index 87f1b2cff..9ac8e779c 100644 --- a/validator_client/src/block_producer/grpc.rs +++ b/validator_client/src/block_producer_service/beacon_block_grpc_client.rs @@ -1,21 +1,45 @@ -use super::traits::{BeaconNode, BeaconNodeError}; +use block_producer::{BeaconNode, BeaconNodeError, PublishOutcome}; use protos::services::{ BeaconBlock as GrpcBeaconBlock, ProduceBeaconBlockRequest, PublishBeaconBlockRequest, }; use protos::services_grpc::BeaconBlockServiceClient; use ssz::{ssz_encode, Decodable}; -use types::{BeaconBlock, BeaconBlockBody, Eth1Data, Hash256, Signature}; +use std::sync::Arc; +use types::{BeaconBlock, BeaconBlockBody, Eth1Data, Hash256, PublicKey, Signature}; -impl BeaconNode for BeaconBlockServiceClient { +/// A newtype designed to wrap the gRPC-generated service so the `BeaconNode` trait may be +/// implemented upon it. +pub struct BeaconBlockGrpcClient { + client: Arc, +} + +impl BeaconBlockGrpcClient { + pub fn new(client: Arc) -> Self { + Self { client } + } +} + +impl BeaconNode for BeaconBlockGrpcClient { + fn proposer_nonce(&self, pubkey: &PublicKey) -> Result { + // TODO: this might not be required. + // + // See: https://github.com/ethereum/eth2.0-specs/pull/496 + panic!("Not implemented.") + } /// Request a Beacon Node (BN) to produce a new block at the supplied slot. /// /// Returns `None` if it is not possible to produce at the supplied slot. For example, if the /// BN is unable to find a parent block. - fn produce_beacon_block(&self, slot: u64) -> Result, BeaconNodeError> { + fn produce_beacon_block( + &self, + slot: u64, + randao_reveal: &Signature, + ) -> Result, BeaconNodeError> { let mut req = ProduceBeaconBlockRequest::new(); req.set_slot(slot); let reply = self + .client .produce_beacon_block(&req) .map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?; @@ -59,7 +83,7 @@ impl BeaconNode for BeaconBlockServiceClient { /// /// Generally, this will be called after a `produce_beacon_block` call with a block that has /// been completed (signed) by the validator client. - fn publish_beacon_block(&self, block: BeaconBlock) -> Result { + fn publish_beacon_block(&self, block: BeaconBlock) -> Result { let mut req = PublishBeaconBlockRequest::new(); // TODO: this conversion is incomplete; fix it. @@ -72,9 +96,15 @@ impl BeaconNode for BeaconBlockServiceClient { req.set_block(grpc_block); let reply = self + .client .publish_beacon_block(&req) .map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?; - Ok(reply.get_success()) + if reply.get_success() { + Ok(PublishOutcome::ValidBlock) + } else { + // TODO: distinguish between different errors + Ok(PublishOutcome::InvalidBlock("Publish failed".to_string())) + } } } diff --git a/validator_client/src/block_producer/service.rs b/validator_client/src/block_producer_service/block_producer_service.rs similarity index 68% rename from validator_client/src/block_producer/service.rs rename to validator_client/src/block_producer_service/block_producer_service.rs index ffdb33029..5e335e383 100644 --- a/validator_client/src/block_producer/service.rs +++ b/validator_client/src/block_producer_service/block_producer_service.rs @@ -1,15 +1,17 @@ -use super::traits::BeaconNode; -use super::{BlockProducer, PollOutcome as BlockProducerPollOutcome, SlotClock}; +use block_producer::{ + BeaconNode, BlockProducer, DutiesReader, PollOutcome as BlockProducerPollOutcome, Signer, +}; use slog::{error, info, warn, Logger}; +use slot_clock::SlotClock; use std::time::Duration; -pub struct BlockProducerService { - pub block_producer: BlockProducer, +pub struct BlockProducerService { + pub block_producer: BlockProducer, pub poll_interval_millis: u64, pub log: Logger, } -impl BlockProducerService { +impl BlockProducerService { /// Run a loop which polls the block producer each `poll_interval_millis` millseconds. /// /// Logs the results of the polls. @@ -37,6 +39,12 @@ impl BlockProducerService { Ok(BlockProducerPollOutcome::BeaconNodeUnableToProduceBlock(slot)) => { error!(self.log, "Beacon node unable to produce block"; "slot" => slot) } + Ok(BlockProducerPollOutcome::SignerRejection(slot)) => { + error!(self.log, "The cryptographic signer refused to sign the block"; "slot" => slot) + } + Ok(BlockProducerPollOutcome::ValidatorIsUnknown(slot)) => { + error!(self.log, "The Beacon Node does not recognise the validator"; "slot" => slot) + } }; std::thread::sleep(Duration::from_millis(self.poll_interval_millis)); diff --git a/validator_client/src/block_producer_service/mod.rs b/validator_client/src/block_producer_service/mod.rs new file mode 100644 index 000000000..52aac688b --- /dev/null +++ b/validator_client/src/block_producer_service/mod.rs @@ -0,0 +1,5 @@ +mod beacon_block_grpc_client; +mod block_producer_service; + +pub use self::beacon_block_grpc_client::BeaconBlockGrpcClient; +pub use self::block_producer_service::BlockProducerService; diff --git a/validator_client/src/duties/epoch_duties.rs b/validator_client/src/duties/epoch_duties.rs new file mode 100644 index 000000000..ea3c8ae4a --- /dev/null +++ b/validator_client/src/duties/epoch_duties.rs @@ -0,0 +1,81 @@ +use block_producer::{DutiesReader, DutiesReaderError}; +use std::collections::HashMap; +use std::sync::RwLock; + +/// The information required for a validator to propose and attest during some epoch. +/// +/// Generally obtained from a Beacon Node, this information contains the validators canonical index +/// (thier sequence in the global validator induction process) and the "shuffling" for that index +/// for some epoch. +#[derive(Debug, PartialEq, Clone, Copy, Default)] +pub struct EpochDuties { + pub validator_index: u64, + pub block_production_slot: Option, + // Future shard info +} + +impl EpochDuties { + /// Returns `true` if the supplied `slot` is a slot in which the validator should produce a + /// block. + pub fn is_block_production_slot(&self, slot: u64) -> bool { + match self.block_production_slot { + Some(s) if s == slot => true, + _ => false, + } + } +} + +pub enum EpochDutiesMapError { + Poisoned, +} + +/// Maps an `epoch` to some `EpochDuties` for a single validator. +pub struct EpochDutiesMap { + pub epoch_length: u64, + pub map: RwLock>, +} + +impl EpochDutiesMap { + pub fn new(epoch_length: u64) -> Self { + Self { + epoch_length, + map: RwLock::new(HashMap::new()), + } + } + + pub fn get(&self, epoch: u64) -> Result, EpochDutiesMapError> { + let map = self.map.read().map_err(|_| EpochDutiesMapError::Poisoned)?; + match map.get(&epoch) { + Some(duties) => Ok(Some(duties.clone())), + None => Ok(None), + } + } + + pub fn insert( + &self, + epoch: u64, + epoch_duties: EpochDuties, + ) -> Result, EpochDutiesMapError> { + let mut map = self + .map + .write() + .map_err(|_| EpochDutiesMapError::Poisoned)?; + Ok(map.insert(epoch, epoch_duties)) + } +} + +impl DutiesReader for EpochDutiesMap { + fn is_block_production_slot(&self, slot: u64) -> Result { + let epoch = slot + .checked_div(self.epoch_length) + .ok_or_else(|| DutiesReaderError::EpochLengthIsZero)?; + + let map = self.map.read().map_err(|_| DutiesReaderError::Poisoned)?; + let duties = map + .get(&epoch) + .ok_or_else(|| DutiesReaderError::UnknownEpoch)?; + Ok(duties.is_block_production_slot(slot)) + } +} + +// TODO: add tests. diff --git a/validator_client/src/duties/mod.rs b/validator_client/src/duties/mod.rs index 851c2a033..711a1e29c 100644 --- a/validator_client/src/duties/mod.rs +++ b/validator_client/src/duties/mod.rs @@ -1,44 +1,19 @@ +mod epoch_duties; mod grpc; mod service; #[cfg(test)] mod test_node; mod traits; +pub use self::epoch_duties::EpochDutiesMap; +use self::epoch_duties::{EpochDuties, EpochDutiesMapError}; +pub use self::service::DutiesManagerService; use self::traits::{BeaconNode, BeaconNodeError}; use bls::PublicKey; use slot_clock::SlotClock; -use std::collections::HashMap; -use std::sync::{Arc, RwLock}; +use std::sync::Arc; use types::ChainSpec; -pub use self::service::DutiesManagerService; - -/// The information required for a validator to propose and attest during some epoch. -/// -/// Generally obtained from a Beacon Node, this information contains the validators canonical index -/// (thier sequence in the global validator induction process) and the "shuffling" for that index -/// for some epoch. -#[derive(Debug, PartialEq, Clone, Copy, Default)] -pub struct EpochDuties { - pub validator_index: u64, - pub block_production_slot: Option, - // Future shard info -} - -impl EpochDuties { - /// Returns `true` if the supplied `slot` is a slot in which the validator should produce a - /// block. - pub fn is_block_production_slot(&self, slot: u64) -> bool { - match self.block_production_slot { - Some(s) if s == slot => true, - _ => false, - } - } -} - -/// Maps an `epoch` to some `EpochDuties` for a single validator. -pub type EpochDutiesMap = HashMap; - #[derive(Debug, PartialEq, Clone, Copy)] pub enum PollOutcome { /// The `EpochDuties` were not updated during this poll. @@ -58,7 +33,6 @@ pub enum Error { SlotClockError, SlotUnknowable, EpochMapPoisoned, - SlotClockPoisoned, EpochLengthIsZero, BeaconNodeError(BeaconNodeError), } @@ -68,11 +42,11 @@ pub enum Error { /// /// There is a single `DutiesManager` per validator instance. pub struct DutiesManager { - pub duties_map: Arc>, + pub duties_map: Arc, /// The validator's public key. pub pubkey: PublicKey, pub spec: Arc, - pub slot_clock: Arc>, + pub slot_clock: Arc, pub beacon_node: Arc, } @@ -84,8 +58,6 @@ impl DutiesManager { pub fn poll(&self) -> Result { let slot = self .slot_clock - .read() - .map_err(|_| Error::SlotClockPoisoned)? .present_slot() .map_err(|_| Error::SlotClockError)? .ok_or(Error::SlotUnknowable)?; @@ -95,14 +67,9 @@ impl DutiesManager { .ok_or(Error::EpochLengthIsZero)?; if let Some(duties) = self.beacon_node.request_shuffling(epoch, &self.pubkey)? { - let mut map = self - .duties_map - .write() - .map_err(|_| Error::EpochMapPoisoned)?; - // If these duties were known, check to see if they're updates or identical. - let result = if let Some(known_duties) = map.get(&epoch) { - if *known_duties == duties { + let result = if let Some(known_duties) = self.duties_map.get(epoch)? { + if known_duties == duties { Ok(PollOutcome::NoChange(epoch)) } else { Ok(PollOutcome::DutiesChanged(epoch, duties)) @@ -110,7 +77,7 @@ impl DutiesManager { } else { Ok(PollOutcome::NewDuties(epoch, duties)) }; - map.insert(epoch, duties); + self.duties_map.insert(epoch, duties)?; result } else { Ok(PollOutcome::UnknownValidatorOrEpoch(epoch)) @@ -124,6 +91,14 @@ impl From for Error { } } +impl From for Error { + fn from(e: EpochDutiesMapError) -> Error { + match e { + EpochDutiesMapError::Poisoned => Error::EpochMapPoisoned, + } + } +} + #[cfg(test)] mod tests { use super::test_node::TestBeaconNode; @@ -139,9 +114,9 @@ mod tests { #[test] pub fn polling() { let spec = Arc::new(ChainSpec::foundation()); - let duties_map = Arc::new(RwLock::new(EpochDutiesMap::new())); + let duties_map = Arc::new(EpochDutiesMap::new(spec.epoch_length)); let keypair = Keypair::random(); - let slot_clock = Arc::new(RwLock::new(TestingSlotClock::new(0))); + let slot_clock = Arc::new(TestingSlotClock::new(0)); let beacon_node = Arc::new(TestBeaconNode::default()); let manager = DutiesManager { diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 56c705494..0e1fd4a6b 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -1,6 +1,7 @@ +use self::block_producer_service::{BeaconBlockGrpcClient, BlockProducerService}; use self::duties::{DutiesManager, DutiesManagerService, EpochDutiesMap}; -use crate::block_producer::{BlockProducer, BlockProducerService}; use crate::config::ClientConfig; +use block_producer::{test_utils::LocalSigner, BlockProducer}; use bls::Keypair; use clap::{App, Arg}; use grpcio::{ChannelBuilder, EnvBuilder}; @@ -8,11 +9,11 @@ use protos::services_grpc::{BeaconBlockServiceClient, ValidatorServiceClient}; use slog::{error, info, o, Drain}; use slot_clock::SystemTimeSlotClock; use std::path::PathBuf; -use std::sync::{Arc, RwLock}; +use std::sync::Arc; use std::thread; use types::ChainSpec; -mod block_producer; +mod block_producer_service; mod config; mod duties; @@ -91,7 +92,7 @@ fn main() { info!(log, "Genesis time"; "unix_epoch_seconds" => spec.genesis_time); let clock = SystemTimeSlotClock::new(spec.genesis_time, spec.slot_duration) .expect("Unable to instantiate SystemTimeSlotClock."); - Arc::new(RwLock::new(clock)) + Arc::new(clock) }; let poll_interval_millis = spec.slot_duration * 1000 / 10; // 10% epoch time precision. @@ -107,7 +108,7 @@ fn main() { for keypair in keypairs { info!(log, "Starting validator services"; "validator" => keypair.pk.concatenated_hex_id()); - let duties_map = Arc::new(RwLock::new(EpochDutiesMap::new())); + let duties_map = Arc::new(EpochDutiesMap::new(spec.epoch_length)); // Spawn a new thread to maintain the validator's `EpochDuties`. let duties_manager_thread = { @@ -138,12 +139,15 @@ fn main() { // Spawn a new thread to perform block production for the validator. let producer_thread = { let spec = spec.clone(); + let pubkey = keypair.pk.clone(); + let signer = Arc::new(LocalSigner::new(keypair.clone())); let duties_map = duties_map.clone(); let slot_clock = slot_clock.clone(); let log = log.clone(); - let client = beacon_block_grpc_client.clone(); + let client = Arc::new(BeaconBlockGrpcClient::new(beacon_block_grpc_client.clone())); thread::spawn(move || { - let block_producer = BlockProducer::new(spec, duties_map, slot_clock, client); + let block_producer = + BlockProducer::new(spec, pubkey, duties_map, slot_clock, client, signer); let mut block_producer_service = BlockProducerService { block_producer, poll_interval_millis,