diff --git a/Cargo.toml b/Cargo.toml index d92b1a303..c5aae7f43 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,12 +9,14 @@ members = [ "eth2/utils/boolean-bitfield", "eth2/utils/hashing", "eth2/utils/honey-badger-split", + "eth2/utils/merkle_proof", "eth2/utils/int_to_bytes", "eth2/utils/slot_clock", "eth2/utils/ssz", "eth2/utils/ssz_derive", "eth2/utils/swap_or_not_shuffle", "eth2/utils/fisher_yates_shuffle", + "eth2/utils/test_random_derive", "beacon_node", "beacon_node/db", "beacon_node/beacon_chain", diff --git a/README.md b/README.md index 7759c1166..6da6732ad 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,19 @@ If you'd like some background on Sigma Prime, please see the [Lighthouse Update \#00](https://lighthouse.sigmaprime.io/update-00.html) blog post or the [company website](https://sigmaprime.io). +### Directory Structure + +- [`beacon_node/`](beacon_node/): the "Beacon Node" binary and crates exclusively + associated with it. +- [`docs/`](docs/): documentation related to the repository. This includes contributor + guides, etc. (It does not include code documentation, which can be produced with `cargo doc`). +- [`eth2/`](eth2/): Crates containing common logic across the Lighthouse project. For + example: Ethereum 2.0 types ([`BeaconBlock`](eth2/types/src/beacon_block.rs), [`BeaconState`](eth2/types/src/beacon_state.rs), etc) and + SimpleSerialize (SSZ). +- [`protos/`](protos/): protobuf/gRPC definitions that are common across the Lighthouse project. +- [`validator_client/`](validator_client/): the "Validator Client" binary and crates exclusively + associated with it. + ### Components The following list describes some of the components actively under development @@ -61,7 +74,7 @@ by the team: from the Ethereum Foundation to develop *simpleserialize* (SSZ), a purpose-built serialization format for sending information across a network. Check out the [SSZ -implementation](https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz) +implementation](https://github.com/ethereum/eth2.0-specs/blob/00aa553fee95963b74fbec84dbd274d7247b8a0e/specs/simple-serialize.md) and this [research](https://github.com/sigp/serialization_sandbox/blob/report/report/serialization_report.md) on serialization formats for more information. @@ -79,16 +92,6 @@ In addition to these components we are also working on database schemas, RPC frameworks, specification development, database optimizations (e.g., bloom-filters), and tons of other interesting stuff (at least we think so). -### Directory Structure - -Here we provide an overview of the directory structure: - -- `beacon_chain/`: contains logic derived directly from the specification. - E.g., shuffling algorithms, state transition logic and structs, block -validation, BLS crypto, etc. -- `lighthouse/`: contains logic specific to this client implementation. E.g., - CLI parsing, RPC end-points, databases, etc. - ### Running **NOTE: The cryptography libraries used in this implementation are diff --git a/beacon_node/beacon_chain/src/attestation_aggregator.rs b/beacon_node/beacon_chain/src/attestation_aggregator.rs index fa2ec87ab..75cfd7ee5 100644 --- a/beacon_node/beacon_chain/src/attestation_aggregator.rs +++ b/beacon_node/beacon_chain/src/attestation_aggregator.rs @@ -1,10 +1,8 @@ -use crate::cached_beacon_state::CachedBeaconState; -use state_processing::validate_attestation_without_signature; +use log::trace; +use ssz::TreeHash; +use state_processing::per_block_processing::validate_attestation_without_signature; use std::collections::{HashMap, HashSet}; -use types::{ - beacon_state::BeaconStateError, AggregateSignature, Attestation, AttestationData, BeaconState, - Bitfield, ChainSpec, FreeAttestation, Signature, -}; +use types::*; const PHASE_0_CUSTODY_BIT: bool = false; @@ -42,21 +40,28 @@ pub enum Message { BadSignature, /// The given `slot` does not match the validators committee assignment. BadSlot, - /// The given `shard` does not match the validators committee assignment. + /// The given `shard` does not match the validators committee assignment, or is not included in + /// a committee for the given slot. BadShard, + /// Attestation is from the epoch prior to this, ignoring. + TooOld, } -macro_rules! some_or_invalid { - ($expression: expr, $error: expr) => { - match $expression { - Some(x) => x, - None => { - return Ok(Outcome { - valid: false, - message: $error, - }); - } - } +macro_rules! valid_outcome { + ($error: expr) => { + return Ok(Outcome { + valid: true, + message: $error, + }); + }; +} + +macro_rules! invalid_outcome { + ($error: expr) => { + return Ok(Outcome { + valid: false, + message: $error, + }); }; } @@ -77,49 +82,61 @@ impl AttestationAggregator { /// - The signature is verified against that of the validator at `validator_index`. pub fn process_free_attestation( &mut self, - cached_state: &CachedBeaconState, + state: &BeaconState, free_attestation: &FreeAttestation, spec: &ChainSpec, ) -> Result { - let (slot, shard, committee_index) = some_or_invalid!( - cached_state.attestation_slot_and_shard_for_validator( - free_attestation.validator_index as usize, - spec, - )?, - Message::BadValidatorIndex + let attestation_duties = match state.attestation_slot_and_shard_for_validator( + free_attestation.validator_index as usize, + spec, + ) { + Err(BeaconStateError::EpochCacheUninitialized(e)) => { + panic!("Attempted to access unbuilt cache {:?}.", e) + } + Err(BeaconStateError::EpochOutOfBounds) => invalid_outcome!(Message::TooOld), + Err(BeaconStateError::ShardOutOfBounds) => invalid_outcome!(Message::BadShard), + Err(e) => return Err(e), + Ok(None) => invalid_outcome!(Message::BadValidatorIndex), + Ok(Some(attestation_duties)) => attestation_duties, + }; + + let (slot, shard, committee_index) = attestation_duties; + + trace!( + "slot: {}, shard: {}, committee_index: {}, val_index: {}", + slot, + shard, + committee_index, + free_attestation.validator_index ); if free_attestation.data.slot != slot { - return Ok(Outcome { - valid: false, - message: Message::BadSlot, - }); + invalid_outcome!(Message::BadSlot); } if free_attestation.data.shard != shard { - return Ok(Outcome { - valid: false, - message: Message::BadShard, - }); + invalid_outcome!(Message::BadShard); } - let signable_message = free_attestation.data.signable_message(PHASE_0_CUSTODY_BIT); + let signable_message = AttestationDataAndCustodyBit { + data: free_attestation.data.clone(), + custody_bit: PHASE_0_CUSTODY_BIT, + } + .hash_tree_root(); - let validator_record = some_or_invalid!( - cached_state - .state - .validator_registry - .get(free_attestation.validator_index as usize), - Message::BadValidatorIndex - ); - - if !free_attestation - .signature - .verify(&signable_message, &validator_record.pubkey) + let validator_record = match state + .validator_registry + .get(free_attestation.validator_index as usize) { - return Ok(Outcome { - valid: false, - message: Message::BadSignature, - }); + None => invalid_outcome!(Message::BadValidatorIndex), + Some(validator_record) => validator_record, + }; + + if !free_attestation.signature.verify( + &signable_message, + spec.get_domain(state.current_epoch(spec), Domain::Attestation, &state.fork), + &validator_record.pubkey, + ) { + invalid_outcome!(Message::BadSignature); } if let Some(existing_attestation) = self.store.get(&signable_message) { @@ -129,15 +146,9 @@ impl AttestationAggregator { committee_index as usize, ) { self.store.insert(signable_message, updated_attestation); - Ok(Outcome { - valid: true, - message: Message::Aggregated, - }) + valid_outcome!(Message::Aggregated); } else { - Ok(Outcome { - valid: true, - message: Message::AggregationNotRequired, - }) + valid_outcome!(Message::AggregationNotRequired); } } else { let mut aggregate_signature = AggregateSignature::new(); @@ -151,10 +162,7 @@ impl AttestationAggregator { aggregate_signature, }; self.store.insert(signable_message, new_attestation); - Ok(Outcome { - valid: true, - message: Message::NewAttestationCreated, - }) + valid_outcome!(Message::NewAttestationCreated); } } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 40e30b2fb..3d2efa8ae 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1,6 +1,6 @@ use crate::attestation_aggregator::{AttestationAggregator, Outcome as AggregationOutcome}; -use crate::cached_beacon_state::CachedBeaconState; use crate::checkpoint::CheckPoint; +use crate::errors::{BeaconChainError as Error, BlockProductionError}; use db::{ stores::{BeaconBlockStore, BeaconStateStore}, ClientDB, DBError, @@ -11,28 +11,15 @@ use parking_lot::{RwLock, RwLockReadGuard}; use slot_clock::SlotClock; use ssz::ssz_encode; use state_processing::{ - BlockProcessable, BlockProcessingError, SlotProcessable, SlotProcessingError, + per_block_processing, per_block_processing_without_verifying_block_signature, + per_slot_processing, BlockProcessingError, SlotProcessingError, }; use std::sync::Arc; use types::{ - beacon_state::BeaconStateError, readers::{BeaconBlockReader, BeaconStateReader}, - AttestationData, BeaconBlock, BeaconBlockBody, BeaconState, ChainSpec, Crosslink, Deposit, - Epoch, Eth1Data, FreeAttestation, Hash256, PublicKey, Signature, Slot, + *, }; -#[derive(Debug, PartialEq)] -pub enum Error { - InsufficientValidators, - BadRecentBlockRoots, - BeaconStateError(BeaconStateError), - DBInconsistent(String), - DBError(String), - ForkChoiceError(ForkChoiceError), - MissingBeaconBlock(Hash256), - MissingBeaconState(Hash256), -} - #[derive(Debug, PartialEq)] pub enum ValidBlock { /// The block was successfully processed. @@ -67,10 +54,14 @@ pub struct BeaconChain { pub state_store: Arc>, pub slot_clock: U, pub attestation_aggregator: RwLock, + pub deposits_for_inclusion: RwLock>, + pub exits_for_inclusion: RwLock>, + pub transfers_for_inclusion: RwLock>, + pub proposer_slashings_for_inclusion: RwLock>, + pub attester_slashings_for_inclusion: RwLock>, canonical_head: RwLock, finalized_head: RwLock, pub state: RwLock, - pub cached_state: RwLock, pub spec: ChainSpec, pub fork_choice: RwLock, } @@ -82,6 +73,7 @@ where F: ForkChoice, { /// Instantiate a new Beacon Chain, from genesis. + #[allow(clippy::too_many_arguments)] // Will be re-factored in the coming weeks. pub fn genesis( state_store: Arc>, block_store: Arc>, @@ -96,7 +88,7 @@ where return Err(Error::InsufficientValidators); } - let genesis_state = BeaconState::genesis( + let mut genesis_state = BeaconState::genesis( genesis_time, initial_validator_deposits, latest_eth1_data, @@ -109,32 +101,37 @@ where let block_root = genesis_block.canonical_root(); block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?; - let cached_state = RwLock::new(CachedBeaconState::from_beacon_state( - genesis_state.clone(), - spec.clone(), - )?); - let finalized_head = RwLock::new(CheckPoint::new( genesis_block.clone(), block_root, + // TODO: this is a memory waste; remove full clone. genesis_state.clone(), state_root, )); let canonical_head = RwLock::new(CheckPoint::new( genesis_block.clone(), block_root, + // TODO: this is a memory waste; remove full clone. genesis_state.clone(), state_root, )); let attestation_aggregator = RwLock::new(AttestationAggregator::new()); + genesis_state.build_epoch_cache(RelativeEpoch::Previous, &spec)?; + genesis_state.build_epoch_cache(RelativeEpoch::Current, &spec)?; + genesis_state.build_epoch_cache(RelativeEpoch::Next, &spec)?; + Ok(Self { block_store, state_store, slot_clock, attestation_aggregator, - state: RwLock::new(genesis_state.clone()), - cached_state, + deposits_for_inclusion: RwLock::new(vec![]), + exits_for_inclusion: RwLock::new(vec![]), + transfers_for_inclusion: RwLock::new(vec![]), + proposer_slashings_for_inclusion: RwLock::new(vec![]), + attester_slashings_for_inclusion: RwLock::new(vec![]), + state: RwLock::new(genesis_state), finalized_head, canonical_head, spec, @@ -150,6 +147,10 @@ where new_beacon_state: BeaconState, new_beacon_state_root: Hash256, ) { + debug!( + "Updating canonical head with block at slot: {}", + new_beacon_block.slot + ); let mut head = self.canonical_head.write(); head.update( new_beacon_block, @@ -206,9 +207,7 @@ where let state_slot = self.state.read().slot; let head_block_root = self.head().beacon_block_root; for _ in state_slot.as_u64()..slot.as_u64() { - self.state - .write() - .per_slot_processing(head_block_root, &self.spec)?; + per_slot_processing(&mut *self.state.write(), head_block_root, &self.spec)?; } Ok(()) } @@ -288,7 +287,7 @@ where validator_index ); if let Some((slot, shard, _committee)) = self - .cached_state + .state .read() .attestation_slot_and_shard_for_validator(validator_index, &self.spec)? { @@ -306,7 +305,7 @@ where .state .read() .get_block_root( - justified_epoch.start_slot(self.spec.epoch_length), + justified_epoch.start_slot(self.spec.slots_per_epoch), &self.spec, ) .ok_or_else(|| Error::BadRecentBlockRoots)?; @@ -325,10 +324,10 @@ where shard, beacon_block_root: self.head().beacon_block_root, epoch_boundary_root, - shard_block_root: Hash256::zero(), + crosslink_data_root: Hash256::zero(), latest_crosslink: Crosslink { - epoch: self.state.read().slot.epoch(self.spec.epoch_length), - shard_block_root: Hash256::zero(), + epoch: self.state.read().slot.epoch(self.spec.slots_per_epoch), + crosslink_data_root: Hash256::zero(), }, justified_epoch, justified_block_root, @@ -346,7 +345,7 @@ where let aggregation_outcome = self .attestation_aggregator .write() - .process_free_attestation(&self.cached_state.read(), &free_attestation, &self.spec)?; + .process_free_attestation(&self.state.read(), &free_attestation, &self.spec)?; // return if the attestation is invalid if !aggregation_outcome.valid { @@ -362,6 +361,222 @@ where Ok(aggregation_outcome) } + /// Accept some deposit and queue it for inclusion in an appropriate block. + pub fn receive_deposit_for_inclusion(&self, deposit: Deposit) { + // TODO: deposits are not checked for validity; check them. + // + // https://github.com/sigp/lighthouse/issues/276 + self.deposits_for_inclusion.write().push(deposit); + } + + /// Return a vec of deposits suitable for inclusion in some block. + pub fn get_deposits_for_block(&self) -> Vec { + // TODO: deposits are indiscriminately included; check them for validity. + // + // https://github.com/sigp/lighthouse/issues/275 + self.deposits_for_inclusion.read().clone() + } + + /// Takes a list of `Deposits` that were included in recent blocks and removes them from the + /// inclusion queue. + /// + /// This ensures that `Deposits` are not included twice in successive blocks. + pub fn set_deposits_as_included(&self, included_deposits: &[Deposit]) { + // TODO: method does not take forks into account; consider this. + // + // https://github.com/sigp/lighthouse/issues/275 + let mut indices_to_delete = vec![]; + + for included in included_deposits { + for (i, for_inclusion) in self.deposits_for_inclusion.read().iter().enumerate() { + if included == for_inclusion { + indices_to_delete.push(i); + } + } + } + + let deposits_for_inclusion = &mut self.deposits_for_inclusion.write(); + for i in indices_to_delete { + deposits_for_inclusion.remove(i); + } + } + + /// Accept some exit and queue it for inclusion in an appropriate block. + pub fn receive_exit_for_inclusion(&self, exit: VoluntaryExit) { + // TODO: exits are not checked for validity; check them. + // + // https://github.com/sigp/lighthouse/issues/276 + self.exits_for_inclusion.write().push(exit); + } + + /// Return a vec of exits suitable for inclusion in some block. + pub fn get_exits_for_block(&self) -> Vec { + // TODO: exits are indiscriminately included; check them for validity. + // + // https://github.com/sigp/lighthouse/issues/275 + self.exits_for_inclusion.read().clone() + } + + /// Takes a list of `Deposits` that were included in recent blocks and removes them from the + /// inclusion queue. + /// + /// This ensures that `Deposits` are not included twice in successive blocks. + pub fn set_exits_as_included(&self, included_exits: &[VoluntaryExit]) { + // TODO: method does not take forks into account; consider this. + let mut indices_to_delete = vec![]; + + for included in included_exits { + for (i, for_inclusion) in self.exits_for_inclusion.read().iter().enumerate() { + if included == for_inclusion { + indices_to_delete.push(i); + } + } + } + + let exits_for_inclusion = &mut self.exits_for_inclusion.write(); + for i in indices_to_delete { + exits_for_inclusion.remove(i); + } + } + + /// Accept some transfer and queue it for inclusion in an appropriate block. + pub fn receive_transfer_for_inclusion(&self, transfer: Transfer) { + // TODO: transfers are not checked for validity; check them. + // + // https://github.com/sigp/lighthouse/issues/276 + self.transfers_for_inclusion.write().push(transfer); + } + + /// Return a vec of transfers suitable for inclusion in some block. + pub fn get_transfers_for_block(&self) -> Vec { + // TODO: transfers are indiscriminately included; check them for validity. + // + // https://github.com/sigp/lighthouse/issues/275 + self.transfers_for_inclusion.read().clone() + } + + /// Takes a list of `Deposits` that were included in recent blocks and removes them from the + /// inclusion queue. + /// + /// This ensures that `Deposits` are not included twice in successive blocks. + pub fn set_transfers_as_included(&self, included_transfers: &[Transfer]) { + // TODO: method does not take forks into account; consider this. + let mut indices_to_delete = vec![]; + + for included in included_transfers { + for (i, for_inclusion) in self.transfers_for_inclusion.read().iter().enumerate() { + if included == for_inclusion { + indices_to_delete.push(i); + } + } + } + + let transfers_for_inclusion = &mut self.transfers_for_inclusion.write(); + for i in indices_to_delete { + transfers_for_inclusion.remove(i); + } + } + + /// Accept some proposer slashing and queue it for inclusion in an appropriate block. + pub fn receive_proposer_slashing_for_inclusion(&self, proposer_slashing: ProposerSlashing) { + // TODO: proposer_slashings are not checked for validity; check them. + // + // https://github.com/sigp/lighthouse/issues/276 + self.proposer_slashings_for_inclusion + .write() + .push(proposer_slashing); + } + + /// Return a vec of proposer slashings suitable for inclusion in some block. + pub fn get_proposer_slashings_for_block(&self) -> Vec { + // TODO: proposer_slashings are indiscriminately included; check them for validity. + // + // https://github.com/sigp/lighthouse/issues/275 + self.proposer_slashings_for_inclusion.read().clone() + } + + /// Takes a list of `ProposerSlashings` that were included in recent blocks and removes them + /// from the inclusion queue. + /// + /// This ensures that `ProposerSlashings` are not included twice in successive blocks. + pub fn set_proposer_slashings_as_included( + &self, + included_proposer_slashings: &[ProposerSlashing], + ) { + // TODO: method does not take forks into account; consider this. + // + // https://github.com/sigp/lighthouse/issues/275 + let mut indices_to_delete = vec![]; + + for included in included_proposer_slashings { + for (i, for_inclusion) in self + .proposer_slashings_for_inclusion + .read() + .iter() + .enumerate() + { + if included == for_inclusion { + indices_to_delete.push(i); + } + } + } + + let proposer_slashings_for_inclusion = &mut self.proposer_slashings_for_inclusion.write(); + for i in indices_to_delete { + proposer_slashings_for_inclusion.remove(i); + } + } + + /// Accept some attester slashing and queue it for inclusion in an appropriate block. + pub fn receive_attester_slashing_for_inclusion(&self, attester_slashing: AttesterSlashing) { + // TODO: attester_slashings are not checked for validity; check them. + // + // https://github.com/sigp/lighthouse/issues/276 + self.attester_slashings_for_inclusion + .write() + .push(attester_slashing); + } + + /// Return a vec of attester slashings suitable for inclusion in some block. + pub fn get_attester_slashings_for_block(&self) -> Vec { + // TODO: attester_slashings are indiscriminately included; check them for validity. + // + // https://github.com/sigp/lighthouse/issues/275 + self.attester_slashings_for_inclusion.read().clone() + } + + /// Takes a list of `AttesterSlashings` that were included in recent blocks and removes them + /// from the inclusion queue. + /// + /// This ensures that `AttesterSlashings` are not included twice in successive blocks. + pub fn set_attester_slashings_as_included( + &self, + included_attester_slashings: &[AttesterSlashing], + ) { + // TODO: method does not take forks into account; consider this. + // + // https://github.com/sigp/lighthouse/issues/275 + let mut indices_to_delete = vec![]; + + for included in included_attester_slashings { + for (i, for_inclusion) in self + .attester_slashings_for_inclusion + .read() + .iter() + .enumerate() + { + if included == for_inclusion { + indices_to_delete.push(i); + } + } + } + + let attester_slashings_for_inclusion = &mut self.attester_slashings_for_inclusion.write(); + for i in indices_to_delete { + attester_slashings_for_inclusion.remove(i); + } + } + /// Dumps the entire canonical chain, from the head to genesis to a vector for analysis. /// /// This could be a very expensive operation and should only be done in testing/analysis @@ -410,6 +625,8 @@ where last_slot = slot; } + dump.reverse(); + Ok(dump) } @@ -459,7 +676,7 @@ where // Transition the parent state to the present slot. let mut state = parent_state; for _ in state.slot.as_u64()..present_slot.as_u64() { - if let Err(e) = state.per_slot_processing(parent_block_root, &self.spec) { + if let Err(e) = per_slot_processing(&mut state, parent_block_root, &self.spec) { return Ok(BlockProcessingOutcome::InvalidBlock( InvalidBlock::SlotProcessingError(e), )); @@ -468,7 +685,7 @@ where // Apply the received block to its parent state (which has been transitioned into this // slot). - if let Err(e) = state.per_block_processing(&block, &self.spec) { + if let Err(e) = per_block_processing(&mut state, &block, &self.spec) { return Ok(BlockProcessingOutcome::InvalidBlock( InvalidBlock::PerBlockProcessingError(e), )); @@ -486,6 +703,13 @@ where self.block_store.put(&block_root, &ssz_encode(&block)[..])?; self.state_store.put(&state_root, &ssz_encode(&state)[..])?; + // Update the inclusion queues so they aren't re-submitted. + self.set_deposits_as_included(&block.body.deposits[..]); + self.set_transfers_as_included(&block.body.transfers[..]); + self.set_exits_as_included(&block.body.voluntary_exits[..]); + self.set_proposer_slashings_as_included(&block.body.proposer_slashings[..]); + self.set_attester_slashings_as_included(&block.body.attester_slashings[..]); + // run the fork_choice add_block logic self.fork_choice .write() @@ -496,17 +720,9 @@ where // TODO: this is a first-in-best-dressed scenario that is not ideal; fork_choice should be // run instead. if self.head().beacon_block_root == parent_block_root { - self.update_canonical_head( - block.clone(), - block_root.clone(), - state.clone(), - state_root, - ); + self.update_canonical_head(block.clone(), block_root, state.clone(), state_root); // Update the local state variable. - *self.state.write() = state.clone(); - // Update the cached state variable. - *self.cached_state.write() = - CachedBeaconState::from_beacon_state(state.clone(), self.spec.clone())?; + *self.state.write() = state; } Ok(BlockProcessingOutcome::ValidBlock(ValidBlock::Processed)) @@ -516,7 +732,10 @@ where /// /// The produced block will not be inherently valid, it must be signed by a block producer. /// Block signing is out of the scope of this function and should be done by a separate program. - pub fn produce_block(&self, randao_reveal: Signature) -> Option<(BeaconBlock, BeaconState)> { + pub fn produce_block( + &self, + randao_reveal: Signature, + ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { debug!("Producing block at slot {}...", self.state.read().slot); let mut state = self.state.read().clone(); @@ -533,7 +752,9 @@ where attestations.len() ); - let parent_root = *state.get_block_root(state.slot.saturating_sub(1_u64), &self.spec)?; + let parent_root = *state + .get_block_root(state.slot.saturating_sub(1_u64), &self.spec) + .ok_or_else(|| BlockProductionError::UnableToGetBlockRootFromState)?; let mut block = BeaconBlock { slot: state.slot, @@ -547,31 +768,24 @@ where }, signature: self.spec.empty_signature.clone(), // To be completed by a validator. body: BeaconBlockBody { - proposer_slashings: vec![], - attester_slashings: vec![], + proposer_slashings: self.get_proposer_slashings_for_block(), + attester_slashings: self.get_attester_slashings_for_block(), attestations, - deposits: vec![], - exits: vec![], + deposits: self.get_deposits_for_block(), + voluntary_exits: self.get_exits_for_block(), + transfers: self.get_transfers_for_block(), }, }; trace!("BeaconChain::produce_block: updating state for new block.",); - let result = - state.per_block_processing_without_verifying_block_signature(&block, &self.spec); - trace!( - "BeaconNode::produce_block: state processing result: {:?}", - result - ); - result.ok()?; + per_block_processing_without_verifying_block_signature(&mut state, &block, &self.spec)?; let state_root = state.canonical_root(); block.state_root = state_root; - trace!("Block produced."); - - Some((block, state)) + Ok((block, state)) } // TODO: Left this as is, modify later diff --git a/beacon_node/beacon_chain/src/cached_beacon_state.rs b/beacon_node/beacon_chain/src/cached_beacon_state.rs deleted file mode 100644 index e14e9fe99..000000000 --- a/beacon_node/beacon_chain/src/cached_beacon_state.rs +++ /dev/null @@ -1,150 +0,0 @@ -use log::{debug, trace}; -use std::collections::HashMap; -use types::{beacon_state::BeaconStateError, BeaconState, ChainSpec, Epoch, Slot}; - -pub const CACHE_PREVIOUS: bool = false; -pub const CACHE_CURRENT: bool = true; -pub const CACHE_NEXT: bool = false; - -pub type CrosslinkCommittees = Vec<(Vec, u64)>; -pub type Shard = u64; -pub type CommitteeIndex = u64; -pub type AttestationDuty = (Slot, Shard, CommitteeIndex); -pub type AttestationDutyMap = HashMap; - -// TODO: CachedBeaconState is presently duplicating `BeaconState` and `ChainSpec`. This is a -// massive memory waste, switch them to references. - -pub struct CachedBeaconState { - pub state: BeaconState, - committees: Vec>, - attestation_duties: Vec, - next_epoch: Epoch, - current_epoch: Epoch, - previous_epoch: Epoch, - spec: ChainSpec, -} - -impl CachedBeaconState { - pub fn from_beacon_state( - state: BeaconState, - spec: ChainSpec, - ) -> Result { - let current_epoch = state.current_epoch(&spec); - let previous_epoch = if current_epoch == spec.genesis_epoch { - current_epoch - } else { - current_epoch.saturating_sub(1_u64) - }; - let next_epoch = state.next_epoch(&spec); - - let mut committees: Vec> = Vec::with_capacity(3); - let mut attestation_duties: Vec = Vec::with_capacity(3); - - if CACHE_PREVIOUS { - debug!("from_beacon_state: building previous epoch cache."); - let cache = build_epoch_cache(&state, previous_epoch, &spec)?; - committees.push(cache.committees); - attestation_duties.push(cache.attestation_duty_map); - } else { - committees.push(vec![]); - attestation_duties.push(HashMap::new()); - } - if CACHE_CURRENT { - debug!("from_beacon_state: building current epoch cache."); - let cache = build_epoch_cache(&state, current_epoch, &spec)?; - committees.push(cache.committees); - attestation_duties.push(cache.attestation_duty_map); - } else { - committees.push(vec![]); - attestation_duties.push(HashMap::new()); - } - if CACHE_NEXT { - debug!("from_beacon_state: building next epoch cache."); - let cache = build_epoch_cache(&state, next_epoch, &spec)?; - committees.push(cache.committees); - attestation_duties.push(cache.attestation_duty_map); - } else { - committees.push(vec![]); - attestation_duties.push(HashMap::new()); - } - - Ok(Self { - state, - committees, - attestation_duties, - next_epoch, - current_epoch, - previous_epoch, - spec, - }) - } - - fn slot_to_cache_index(&self, slot: Slot) -> Option { - trace!("slot_to_cache_index: cache lookup"); - match slot.epoch(self.spec.epoch_length) { - epoch if (epoch == self.previous_epoch) & CACHE_PREVIOUS => Some(0), - epoch if (epoch == self.current_epoch) & CACHE_CURRENT => Some(1), - epoch if (epoch == self.next_epoch) & CACHE_NEXT => Some(2), - _ => None, - } - } - - /// Returns the `slot`, `shard` and `committee_index` for which a validator must produce an - /// attestation. - /// - /// Cached method. - /// - /// Spec v0.2.0 - pub fn attestation_slot_and_shard_for_validator( - &self, - validator_index: usize, - _spec: &ChainSpec, - ) -> Result, BeaconStateError> { - // Get the result for this epoch. - let cache_index = self - .slot_to_cache_index(self.state.slot) - .expect("Current epoch should always have a cache index."); - - let duties = self.attestation_duties[cache_index] - .get(&(validator_index as u64)) - .and_then(|tuple| Some(*tuple)); - - Ok(duties) - } -} - -struct EpochCacheResult { - committees: Vec, - attestation_duty_map: AttestationDutyMap, -} - -fn build_epoch_cache( - state: &BeaconState, - epoch: Epoch, - spec: &ChainSpec, -) -> Result { - let mut epoch_committees: Vec = - Vec::with_capacity(spec.epoch_length as usize); - let mut attestation_duty_map: AttestationDutyMap = HashMap::new(); - - for slot in epoch.slot_iter(spec.epoch_length) { - let slot_committees = state.get_crosslink_committees_at_slot(slot, false, spec)?; - - for (committee, shard) in slot_committees { - for (committee_index, validator_index) in committee.iter().enumerate() { - attestation_duty_map.insert( - *validator_index as u64, - (slot, shard, committee_index as u64), - ); - } - } - - epoch_committees.push(state.get_crosslink_committees_at_slot(slot, false, spec)?) - } - - Ok(EpochCacheResult { - committees: epoch_committees, - attestation_duty_map, - }) -} diff --git a/beacon_node/beacon_chain/src/checkpoint.rs b/beacon_node/beacon_chain/src/checkpoint.rs index bef97d2ed..828e462de 100644 --- a/beacon_node/beacon_chain/src/checkpoint.rs +++ b/beacon_node/beacon_chain/src/checkpoint.rs @@ -3,7 +3,7 @@ use types::{BeaconBlock, BeaconState, Hash256}; /// Represents some block and it's associated state. Generally, this will be used for tracking the /// head, justified head and finalized head. -#[derive(PartialEq, Clone, Serialize)] +#[derive(Clone, Serialize)] pub struct CheckPoint { pub beacon_block: BeaconBlock, pub beacon_block_root: Hash256, diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs new file mode 100644 index 000000000..58c3f87ae --- /dev/null +++ b/beacon_node/beacon_chain/src/errors.rs @@ -0,0 +1,33 @@ +use fork_choice::ForkChoiceError; +use state_processing::BlockProcessingError; +use types::*; + +macro_rules! easy_from_to { + ($from: ident, $to: ident) => { + impl From<$from> for $to { + fn from(e: $from) -> $to { + $to::$from(e) + } + } + }; +} + +#[derive(Debug, PartialEq)] +pub enum BeaconChainError { + InsufficientValidators, + BadRecentBlockRoots, + BeaconStateError(BeaconStateError), + DBInconsistent(String), + DBError(String), + ForkChoiceError(ForkChoiceError), + MissingBeaconBlock(Hash256), + MissingBeaconState(Hash256), +} + +#[derive(Debug, PartialEq)] +pub enum BlockProductionError { + UnableToGetBlockRootFromState, + BlockProcessingError(BlockProcessingError), +} + +easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index fdacdd2b1..0e879a415 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,8 +1,9 @@ mod attestation_aggregator; mod beacon_chain; -mod cached_beacon_state; mod checkpoint; +mod errors; -pub use self::beacon_chain::{BeaconChain, Error}; +pub use self::beacon_chain::{BeaconChain, BlockProcessingOutcome, InvalidBlock, ValidBlock}; pub use self::checkpoint::CheckPoint; +pub use self::errors::BeaconChainError; pub use fork_choice::{ForkChoice, ForkChoiceAlgorithm, ForkChoiceError}; diff --git a/beacon_node/beacon_chain/test_harness/Cargo.toml b/beacon_node/beacon_chain/test_harness/Cargo.toml index 77b52ccf6..bd7a58270 100644 --- a/beacon_node/beacon_chain/test_harness/Cargo.toml +++ b/beacon_node/beacon_chain/test_harness/Cargo.toml @@ -4,12 +4,21 @@ version = "0.1.0" authors = ["Paul Hauner "] edition = "2018" +[[bin]] +name = "test_harness" +path = "src/bin.rs" + +[lib] +name = "test_harness" +path = "src/lib.rs" + [[bench]] name = "state_transition" harness = false [dev-dependencies] criterion = "0.2" +state_processing = { path = "../../../eth2/state_processing" } [dependencies] attester = { path = "../../../eth2/attester" } @@ -17,6 +26,7 @@ beacon_chain = { path = "../../beacon_chain" } block_proposer = { path = "../../../eth2/block_proposer" } bls = { path = "../../../eth2/utils/bls" } boolean-bitfield = { path = "../../../eth2/utils/boolean-bitfield" } +clap = "2.32.0" db = { path = "../../db" } parking_lot = "0.7" failure = "0.1" @@ -32,3 +42,4 @@ serde_json = "1.0" slot_clock = { path = "../../../eth2/utils/slot_clock" } ssz = { path = "../../../eth2/utils/ssz" } types = { path = "../../../eth2/types" } +yaml-rust = "0.4.2" diff --git a/beacon_node/beacon_chain/test_harness/README.md b/beacon_node/beacon_chain/test_harness/README.md new file mode 100644 index 000000000..9dfd90d60 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/README.md @@ -0,0 +1,150 @@ +# Test Harness + +Provides a testing environment for the `BeaconChain`, `Attester` and `BlockProposer` objects. + +This environment bypasses networking and client run-times and connects the `Attester` and `Proposer` +directly to the `BeaconChain` via an `Arc`. + +The `BeaconChainHarness` contains a single `BeaconChain` instance and many `ValidatorHarness` +instances. All of the `ValidatorHarness` instances work to advance the `BeaconChain` by +producing blocks and attestations. + +The crate consists of a library and binary, examples for using both are +described below. + +## YAML + +Both the library and the binary are capable of parsing tests from a YAML file, +in fact this is the sole purpose of the binary. + +You can find YAML test cases [here](specs/). An example is included below: + +```yaml +title: Validator Registry Tests +summary: Tests deposit and slashing effects on validator registry. +test_suite: validator_registry +fork: tchaikovsky +version: 1.0 +test_cases: + - config: + slots_per_epoch: 64 + deposits_for_chain_start: 1000 + num_slots: 64 + skip_slots: [2, 3] + deposits: + # At slot 1, create a new validator deposit of 32 ETH. + - slot: 1 + amount: 32 + # Trigger more deposits... + - slot: 3 + amount: 32 + - slot: 5 + amount: 32 + proposer_slashings: + # At slot 2, trigger a proposer slashing for validator #42. + - slot: 2 + validator_index: 42 + # Trigger another slashing... + - slot: 8 + validator_index: 13 + attester_slashings: + # At slot 2, trigger an attester slashing for validators #11 and #12. + - slot: 2 + validator_indices: [11, 12] + # Trigger another slashing... + - slot: 5 + validator_indices: [14] + results: + num_skipped_slots: 2 + states: + - slot: 63 + num_validators: 1003 + slashed_validators: [11, 12, 13, 14, 42] + exited_validators: [] + +``` + +Thanks to [prsym](http://github.com/prysmaticlabs/prysm) for coming up with the +base YAML format. + +### Notes + +Wherever `slot` is used, it is actually the "slot height", or slots since +genesis. This allows the tests to disregard the `GENESIS_EPOCH`. + +### Differences from Prysmatic's format + +1. The detail for `deposits`, `proposer_slashings` and `attester_slashings` is + ommitted from the test specification. It assumed they should be valid + objects. +2. There is a `states` list in `results` that runs checks against any state + specified by a `slot` number. This is in contrast to the variables in + `results` that assume the last (highest) state should be inspected. + +#### Reasoning + +Respective reasonings for above changes: + +1. This removes the concerns of the actual object structure from the tests. + This allows for more variation in the deposits/slashings objects without + needing to update the tests. Also, it makes it makes it easier to create + tests. +2. This gives more fine-grained control over the tests. It allows for checking + that certain events happened at certain times whilst making the tests only + slightly more verbose. + +_Notes: it may be useful to add an extra field to each slashing type to +indicate if it should be valid or not. It also may be useful to add an option +for double-vote/surround-vote attester slashings. The `amount` field was left +on `deposits` as it changes the behaviour of state significantly._ + +## Binary Usage Example + +Follow these steps to run as a binary: + +1. Navigate to the root of this crate (where this readme is located) +2. Run `$ cargo run --release -- --yaml examples/validator_registry.yaml` + +_Note: the `--release` flag builds the binary without all the debugging +instrumentation. The test is much faster built using `--release`. As is +customary in cargo, the flags before `--` are passed to cargo and the flags +after are passed to the binary._ + +### CLI Options + +``` +Lighthouse Test Harness Runner 0.0.1 +Sigma Prime +Runs `test_harness` using a YAML test_case. + +USAGE: + test_harness --log-level --yaml + +FLAGS: + -h, --help Prints help information + -V, --version Prints version information + +OPTIONS: + --log-level Logging level. [default: debug] [possible values: error, warn, info, debug, trace] + --yaml YAML file test_case. +``` + + +## Library Usage Example + +```rust +use test_harness::BeaconChainHarness; +use types::ChainSpec; + +let validator_count = 8; +let spec = ChainSpec::few_validators(); + +let mut harness = BeaconChainHarness::new(spec, validator_count); + +harness.advance_chain_with_block(); + +let chain = harness.chain_dump().unwrap(); + +// One block should have been built on top of the genesis block. +assert_eq!(chain.len(), 2); +``` diff --git a/beacon_node/beacon_chain/test_harness/benches/state_transition.rs b/beacon_node/beacon_chain/test_harness/benches/state_transition.rs index 013ecfd1e..7d1c44653 100644 --- a/beacon_node/beacon_chain/test_harness/benches/state_transition.rs +++ b/beacon_node/beacon_chain/test_harness/benches/state_transition.rs @@ -1,6 +1,7 @@ use criterion::Criterion; use criterion::{black_box, criterion_group, criterion_main, Benchmark}; // use env_logger::{Builder, Env}; +use state_processing::SlotProcessable; use test_harness::BeaconChainHarness; use types::{ChainSpec, Hash256}; @@ -10,7 +11,7 @@ fn mid_epoch_state_transition(c: &mut Criterion) { let validator_count = 1000; let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count); - let epoch_depth = (rig.spec.epoch_length * 2) + (rig.spec.epoch_length / 2); + let epoch_depth = (rig.spec.slots_per_epoch * 2) + (rig.spec.slots_per_epoch / 2); for _ in 0..epoch_depth { rig.advance_chain_with_block(); @@ -18,7 +19,7 @@ fn mid_epoch_state_transition(c: &mut Criterion) { let state = rig.beacon_chain.state.read().clone(); - assert!((state.slot + 1) % rig.spec.epoch_length != 0); + assert!((state.slot + 1) % rig.spec.slots_per_epoch != 0); c.bench_function("mid-epoch state transition 10k validators", move |b| { let state = state.clone(); @@ -35,7 +36,7 @@ fn epoch_boundary_state_transition(c: &mut Criterion) { let validator_count = 10000; let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count); - let epoch_depth = rig.spec.epoch_length * 2; + let epoch_depth = rig.spec.slots_per_epoch * 2; for _ in 0..(epoch_depth - 1) { rig.advance_chain_with_block(); @@ -43,7 +44,7 @@ fn epoch_boundary_state_transition(c: &mut Criterion) { let state = rig.beacon_chain.state.read().clone(); - assert_eq!((state.slot + 1) % rig.spec.epoch_length, 0); + assert_eq!((state.slot + 1) % rig.spec.slots_per_epoch, 0); c.bench( "routines", diff --git a/beacon_node/beacon_chain/test_harness/specs/validator_registry.yaml b/beacon_node/beacon_chain/test_harness/specs/validator_registry.yaml new file mode 100644 index 000000000..aea7dcf31 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/specs/validator_registry.yaml @@ -0,0 +1,59 @@ +title: Validator Registry Tests +summary: Tests deposit and slashing effects on validator registry. +test_suite: validator_registry +fork: tchaikovsky +version: 1.0 +test_cases: + - config: + slots_per_epoch: 64 + deposits_for_chain_start: 1000 + num_slots: 64 + skip_slots: [2, 3] + deposits: + # At slot 1, create a new validator deposit of 5 ETH. + - slot: 1 + amount: 5000000000 + # Trigger more deposits... + - slot: 3 + amount: 5000000000 + - slot: 5 + amount: 32000000000 + exits: + # At slot 10, submit an exit for validator #50. + - slot: 10 + validator_index: 50 + transfers: + - slot: 6 + from: 1000 + to: 1001 + amount: 5000000000 + proposer_slashings: + # At slot 2, trigger a proposer slashing for validator #42. + - slot: 2 + validator_index: 42 + # Trigger another slashing... + - slot: 8 + validator_index: 13 + attester_slashings: + # At slot 2, trigger an attester slashing for validators #11 and #12. + - slot: 2 + validator_indices: [11, 12] + # Trigger another slashing... + - slot: 5 + validator_indices: [14] + results: + num_skipped_slots: 2 + states: + - slot: 63 + num_validators: 1003 + slashed_validators: [11, 12, 13, 14, 42] + exited_validators: [] + exit_initiated_validators: [50] + balances: + - validator_index: 1000 + comparison: "eq" + balance: 0 + - validator_index: 1001 + comparison: "eq" + balance: 10000000000 + diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index a20441beb..f220619ce 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -1,7 +1,7 @@ use super::ValidatorHarness; -use beacon_chain::BeaconChain; -pub use beacon_chain::{CheckPoint, Error as BeaconChainError}; -use bls::create_proof_of_possession; +use beacon_chain::{BeaconChain, BlockProcessingOutcome}; +pub use beacon_chain::{BeaconChainError, CheckPoint}; +use bls::{create_proof_of_possession, get_withdrawal_credentials}; use db::{ stores::{BeaconBlockStore, BeaconStateStore}, MemoryDB, @@ -11,14 +11,9 @@ use log::debug; use rayon::prelude::*; use slot_clock::TestingSlotClock; use std::collections::HashSet; -use std::fs::File; -use std::io::prelude::*; use std::iter::FromIterator; use std::sync::Arc; -use types::{ - BeaconBlock, ChainSpec, Deposit, DepositData, DepositInput, Eth1Data, FreeAttestation, Hash256, - Keypair, Slot, -}; +use types::*; /// The beacon chain harness simulates a single beacon node with `validator_count` validators connected /// to it. Each validator is provided a borrow to the beacon chain, where it may read @@ -72,7 +67,13 @@ impl BeaconChainHarness { timestamp: genesis_time - 1, deposit_input: DepositInput { pubkey: keypair.pk.clone(), - withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. + // Validator can withdraw using their main keypair. + withdrawal_credentials: Hash256::from_slice( + &get_withdrawal_credentials( + &keypair.pk, + spec.bls_withdrawal_prefix_byte, + )[..], + ), proof_of_possession: create_proof_of_possession(&keypair), }, }, @@ -130,13 +131,13 @@ impl BeaconChainHarness { let nth_slot = slot - slot - .epoch(self.spec.epoch_length) - .start_slot(self.spec.epoch_length); - let nth_epoch = slot.epoch(self.spec.epoch_length) - self.spec.genesis_epoch; + .epoch(self.spec.slots_per_epoch) + .start_slot(self.spec.slots_per_epoch); + let nth_epoch = slot.epoch(self.spec.slots_per_epoch) - self.spec.genesis_epoch; debug!( "Advancing BeaconChain to slot {}, epoch {} (epoch height: {}, slot {} in epoch.).", slot, - slot.epoch(self.spec.epoch_length), + slot.epoch(self.spec.slots_per_epoch), nth_epoch, nth_slot ); @@ -157,7 +158,7 @@ impl BeaconChainHarness { .beacon_chain .state .read() - .get_crosslink_committees_at_slot(present_slot, false, &self.spec) + .get_crosslink_committees_at_slot(present_slot, &self.spec) .unwrap() .iter() .fold(vec![], |mut acc, (committee, _slot)| { @@ -223,7 +224,10 @@ impl BeaconChainHarness { debug!("Producing block..."); let block = self.produce_block(); debug!("Submitting block for processing..."); - self.beacon_chain.process_block(block).unwrap(); + match self.beacon_chain.process_block(block) { + Ok(BlockProcessingOutcome::ValidBlock(_)) => {} + other => panic!("block processing failed with {:?}", other), + }; debug!("...block processed by BeaconChain."); debug!("Producing free attestations..."); @@ -242,16 +246,76 @@ impl BeaconChainHarness { debug!("Free attestations processed."); } + /// Signs a message using some validators secret key with the `Fork` info from the latest state + /// of the `BeaconChain`. + /// + /// Useful for producing slashable messages and other objects that `BeaconChainHarness` does + /// not produce naturally. + pub fn validator_sign( + &self, + validator_index: usize, + message: &[u8], + epoch: Epoch, + domain_type: Domain, + ) -> Option { + let validator = self.validators.get(validator_index)?; + + let domain = self + .spec + .get_domain(epoch, domain_type, &self.beacon_chain.state.read().fork); + + Some(Signature::new(message, domain, &validator.keypair.sk)) + } + + /// Submit a deposit to the `BeaconChain` and, if given a keypair, create a new + /// `ValidatorHarness` instance for this validator. + /// + /// If a new `ValidatorHarness` was created, the validator should become fully operational as + /// if the validator were created during `BeaconChainHarness` instantiation. + pub fn add_deposit(&mut self, deposit: Deposit, keypair: Option) { + self.beacon_chain.receive_deposit_for_inclusion(deposit); + + // If a keypair is present, add a new `ValidatorHarness` to the rig. + if let Some(keypair) = keypair { + let validator = + ValidatorHarness::new(keypair, self.beacon_chain.clone(), self.spec.clone()); + self.validators.push(validator); + } + } + + /// Submit an exit to the `BeaconChain` for inclusion in some block. + /// + /// Note: the `ValidatorHarness` for this validator continues to exist. Once it is exited it + /// will stop receiving duties from the beacon chain and just do nothing when prompted to + /// produce/attest. + pub fn add_exit(&mut self, exit: VoluntaryExit) { + self.beacon_chain.receive_exit_for_inclusion(exit); + } + + /// Submit an transfer to the `BeaconChain` for inclusion in some block. + pub fn add_transfer(&mut self, transfer: Transfer) { + self.beacon_chain.receive_transfer_for_inclusion(transfer); + } + + /// Submit a proposer slashing to the `BeaconChain` for inclusion in some block. + pub fn add_proposer_slashing(&mut self, proposer_slashing: ProposerSlashing) { + self.beacon_chain + .receive_proposer_slashing_for_inclusion(proposer_slashing); + } + + /// Submit an attester slashing to the `BeaconChain` for inclusion in some block. + pub fn add_attester_slashing(&mut self, attester_slashing: AttesterSlashing) { + self.beacon_chain + .receive_attester_slashing_for_inclusion(attester_slashing); + } + + /// Executes the fork choice rule on the `BeaconChain`, selecting a new canonical head. + pub fn run_fork_choice(&mut self) { + self.beacon_chain.fork_choice().unwrap() + } + /// Dump all blocks and states from the canonical beacon chain. pub fn chain_dump(&self) -> Result, BeaconChainError> { self.beacon_chain.chain_dump() } - - /// Write the output of `chain_dump` to a JSON file. - pub fn dump_to_file(&self, filename: String, chain_dump: &[CheckPoint]) { - let json = serde_json::to_string(chain_dump).unwrap(); - let mut file = File::create(filename).unwrap(); - file.write_all(json.as_bytes()) - .expect("Failed writing dump to file."); - } } diff --git a/beacon_node/beacon_chain/test_harness/src/bin.rs b/beacon_node/beacon_chain/test_harness/src/bin.rs new file mode 100644 index 000000000..283cb0dfa --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/bin.rs @@ -0,0 +1,69 @@ +use clap::{App, Arg}; +use env_logger::{Builder, Env}; +use std::{fs::File, io::prelude::*}; +use test_case::TestCase; +use yaml_rust::YamlLoader; + +mod beacon_chain_harness; +mod test_case; +mod validator_harness; + +use validator_harness::ValidatorHarness; + +fn main() { + let matches = App::new("Lighthouse Test Harness Runner") + .version("0.0.1") + .author("Sigma Prime ") + .about("Runs `test_harness` using a YAML test_case.") + .arg( + Arg::with_name("yaml") + .long("yaml") + .value_name("FILE") + .help("YAML file test_case.") + .required(true), + ) + .arg( + Arg::with_name("log") + .long("log-level") + .value_name("LOG_LEVEL") + .help("Logging level.") + .possible_values(&["error", "warn", "info", "debug", "trace"]) + .default_value("debug") + .required(true), + ) + .get_matches(); + + if let Some(log_level) = matches.value_of("log") { + Builder::from_env(Env::default().default_filter_or(log_level)).init(); + } + + if let Some(yaml_file) = matches.value_of("yaml") { + let docs = { + let mut file = File::open(yaml_file).unwrap(); + + let mut yaml_str = String::new(); + file.read_to_string(&mut yaml_str).unwrap(); + + YamlLoader::load_from_str(&yaml_str).unwrap() + }; + + for doc in &docs { + // For each `test_cases` YAML in the document, build a `TestCase`, execute it and + // assert that the execution result matches the test_case description. + // + // In effect, for each `test_case` a new `BeaconChainHarness` is created from genesis + // and a new `BeaconChain` is built as per the test_case. + // + // After the `BeaconChain` has been built out as per the test_case, a dump of all blocks + // and states in the chain is obtained and checked against the `results` specified in + // the `test_case`. + // + // If any of the expectations in the results are not met, the process + // panics with a message. + for test_case in doc["test_cases"].as_vec().unwrap() { + let test_case = TestCase::from_yaml(test_case); + test_case.assert_result_valid(test_case.execute()) + } + } + } +} diff --git a/beacon_node/beacon_chain/test_harness/src/lib.rs b/beacon_node/beacon_chain/test_harness/src/lib.rs index b04fc6996..0703fd4a5 100644 --- a/beacon_node/beacon_chain/test_harness/src/lib.rs +++ b/beacon_node/beacon_chain/test_harness/src/lib.rs @@ -1,4 +1,32 @@ +//! Provides a testing environment for the `BeaconChain`, `Attester` and `BlockProposer` objects. +//! +//! This environment bypasses networking and client run-times and connects the `Attester` and `Proposer` +//! directly to the `BeaconChain` via an `Arc`. +//! +//! The `BeaconChainHarness` contains a single `BeaconChain` instance and many `ValidatorHarness` +//! instances. All of the `ValidatorHarness` instances work to advance the `BeaconChain` by +//! producing blocks and attestations. +//! +//! Example: +//! ``` +//! use test_harness::BeaconChainHarness; +//! use types::ChainSpec; +//! +//! let validator_count = 8; +//! let spec = ChainSpec::few_validators(); +//! +//! let mut harness = BeaconChainHarness::new(spec, validator_count); +//! +//! harness.advance_chain_with_block(); +//! +//! let chain = harness.chain_dump().unwrap(); +//! +//! // One block should have been built on top of the genesis block. +//! assert_eq!(chain.len(), 2); +//! ``` + mod beacon_chain_harness; +pub mod test_case; mod validator_harness; pub use self::beacon_chain_harness::BeaconChainHarness; diff --git a/beacon_node/beacon_chain/test_harness/src/test_case.rs b/beacon_node/beacon_chain/test_harness/src/test_case.rs new file mode 100644 index 000000000..b2709edfc --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/test_case.rs @@ -0,0 +1,335 @@ +//! Defines execution and testing specs for a `BeaconChainHarness` instance. Supports loading from +//! a YAML file. + +use crate::beacon_chain_harness::BeaconChainHarness; +use beacon_chain::CheckPoint; +use bls::{create_proof_of_possession, get_withdrawal_credentials}; +use log::{info, warn}; +use ssz::SignedRoot; +use types::*; + +use types::{ + attester_slashing::AttesterSlashingBuilder, proposer_slashing::ProposerSlashingBuilder, +}; +use yaml_rust::Yaml; + +mod config; +mod results; +mod state_check; +mod yaml_helpers; + +pub use config::Config; +pub use results::Results; +pub use state_check::StateCheck; + +/// Defines the execution and testing of a `BeaconChainHarness` instantiation. +/// +/// Typical workflow is: +/// +/// 1. Instantiate the `TestCase` from YAML: `let test_case = TestCase::from_yaml(&my_yaml);` +/// 2. Execute the test_case: `let result = test_case.execute();` +/// 3. Test the results against the test_case: `test_case.assert_result_valid(result);` +#[derive(Debug)] +pub struct TestCase { + /// Defines the execution. + pub config: Config, + /// Defines tests to run against the execution result. + pub results: Results, +} + +/// The result of executing a `TestCase`. +/// +pub struct ExecutionResult { + /// The canonical beacon chain generated from the execution. + pub chain: Vec, + /// The spec used for execution. + pub spec: ChainSpec, +} + +impl TestCase { + /// Load the test case from a YAML document. + pub fn from_yaml(test_case: &Yaml) -> Self { + Self { + results: Results::from_yaml(&test_case["results"]), + config: Config::from_yaml(&test_case["config"]), + } + } + + /// Return a `ChainSpec::foundation()`. + /// + /// If specified in `config`, returns it with a modified `slots_per_epoch`. + fn spec(&self) -> ChainSpec { + let mut spec = ChainSpec::foundation(); + + if let Some(n) = self.config.slots_per_epoch { + spec.slots_per_epoch = n; + } + + spec + } + + /// Executes the test case, returning an `ExecutionResult`. + #[allow(clippy::cyclomatic_complexity)] + pub fn execute(&self) -> ExecutionResult { + let spec = self.spec(); + let validator_count = self.config.deposits_for_chain_start; + let slots = self.config.num_slots; + + info!( + "Building BeaconChainHarness with {} validators...", + validator_count + ); + + let mut harness = BeaconChainHarness::new(spec, validator_count); + + info!("Starting simulation across {} slots...", slots); + + // Start at 1 because genesis counts as a slot. + for slot_height in 1..slots { + // Used to ensure that deposits in the same slot have incremental deposit indices. + let mut deposit_index_offset = 0; + + // Feed deposits to the BeaconChain. + if let Some(ref deposits) = self.config.deposits { + for (slot, amount) in deposits { + if *slot == slot_height { + info!("Including deposit at slot height {}.", slot_height); + let (deposit, keypair) = + build_deposit(&harness, *amount, deposit_index_offset); + harness.add_deposit(deposit, Some(keypair.clone())); + deposit_index_offset += 1; + } + } + } + + // Feed proposer slashings to the BeaconChain. + if let Some(ref slashings) = self.config.proposer_slashings { + for (slot, validator_index) in slashings { + if *slot == slot_height { + info!( + "Including proposer slashing at slot height {} for validator #{}.", + slot_height, validator_index + ); + let slashing = build_proposer_slashing(&harness, *validator_index); + harness.add_proposer_slashing(slashing); + } + } + } + + // Feed attester slashings to the BeaconChain. + if let Some(ref slashings) = self.config.attester_slashings { + for (slot, validator_indices) in slashings { + if *slot == slot_height { + info!( + "Including attester slashing at slot height {} for validators {:?}.", + slot_height, validator_indices + ); + let slashing = + build_double_vote_attester_slashing(&harness, &validator_indices[..]); + harness.add_attester_slashing(slashing); + } + } + } + + // Feed exits to the BeaconChain. + if let Some(ref exits) = self.config.exits { + for (slot, validator_index) in exits { + if *slot == slot_height { + info!( + "Including exit at slot height {} for validator {}.", + slot_height, validator_index + ); + let exit = build_exit(&harness, *validator_index); + harness.add_exit(exit); + } + } + } + + // Feed transfers to the BeaconChain. + if let Some(ref transfers) = self.config.transfers { + for (slot, from, to, amount) in transfers { + if *slot == slot_height { + info!( + "Including transfer at slot height {} from validator {}.", + slot_height, from + ); + let transfer = build_transfer(&harness, *from, *to, *amount); + harness.add_transfer(transfer); + } + } + } + + // Build a block or skip a slot. + match self.config.skip_slots { + Some(ref skip_slots) if skip_slots.contains(&slot_height) => { + warn!("Skipping slot at height {}.", slot_height); + harness.increment_beacon_chain_slot(); + } + _ => { + info!("Producing block at slot height {}.", slot_height); + harness.advance_chain_with_block(); + } + } + } + + harness.run_fork_choice(); + + info!("Test execution complete!"); + + info!("Building chain dump for analysis..."); + + ExecutionResult { + chain: harness.chain_dump().expect("Chain dump failed."), + spec: (*harness.spec).clone(), + } + } + + /// Checks that the `ExecutionResult` is consistent with the specifications in `self.results`. + /// + /// # Panics + /// + /// Panics with a message if any result does not match exepectations. + pub fn assert_result_valid(&self, execution_result: ExecutionResult) { + info!("Verifying test results..."); + let spec = &execution_result.spec; + + if let Some(num_skipped_slots) = self.results.num_skipped_slots { + assert_eq!( + execution_result.chain.len(), + self.config.num_slots as usize - num_skipped_slots, + "actual skipped slots != expected." + ); + info!( + "OK: Chain length is {} ({} skipped slots).", + execution_result.chain.len(), + num_skipped_slots + ); + } + + if let Some(ref state_checks) = self.results.state_checks { + for checkpoint in &execution_result.chain { + let state = &checkpoint.beacon_state; + + for state_check in state_checks { + let adjusted_state_slot = + state.slot - spec.genesis_epoch.start_slot(spec.slots_per_epoch); + + if state_check.slot == adjusted_state_slot { + state_check.assert_valid(state, spec); + } + } + } + } + } +} + +/// Builds a `Deposit` this is valid for the given `BeaconChainHarness` at its next slot. +fn build_transfer(harness: &BeaconChainHarness, from: u64, to: u64, amount: u64) -> Transfer { + let slot = harness.beacon_chain.state.read().slot + 1; + + let mut transfer = Transfer { + from, + to, + amount, + fee: 0, + slot, + pubkey: harness.validators[from as usize].keypair.pk.clone(), + signature: Signature::empty_signature(), + }; + + let message = transfer.signed_root(); + let epoch = slot.epoch(harness.spec.slots_per_epoch); + + transfer.signature = harness + .validator_sign(from as usize, &message[..], epoch, Domain::Transfer) + .expect("Unable to sign Transfer"); + + transfer +} + +/// Builds a `Deposit` this is valid for the given `BeaconChainHarness`. +/// +/// `index_offset` is used to ensure that `deposit.index == state.index` when adding multiple +/// deposits. +fn build_deposit( + harness: &BeaconChainHarness, + amount: u64, + index_offset: u64, +) -> (Deposit, Keypair) { + let keypair = Keypair::random(); + let proof_of_possession = create_proof_of_possession(&keypair); + let index = harness.beacon_chain.state.read().deposit_index + index_offset; + let withdrawal_credentials = Hash256::from_slice( + &get_withdrawal_credentials(&keypair.pk, harness.spec.bls_withdrawal_prefix_byte)[..], + ); + + let deposit = Deposit { + // Note: `branch` and `index` will need to be updated once the spec defines their + // validity. + branch: vec![], + index, + deposit_data: DepositData { + amount, + timestamp: 1, + deposit_input: DepositInput { + pubkey: keypair.pk.clone(), + withdrawal_credentials, + proof_of_possession, + }, + }, + }; + + (deposit, keypair) +} + +/// Builds a `VoluntaryExit` this is valid for the given `BeaconChainHarness`. +fn build_exit(harness: &BeaconChainHarness, validator_index: u64) -> VoluntaryExit { + let epoch = harness + .beacon_chain + .state + .read() + .current_epoch(&harness.spec); + + let mut exit = VoluntaryExit { + epoch, + validator_index, + signature: Signature::empty_signature(), + }; + + let message = exit.signed_root(); + + exit.signature = harness + .validator_sign(validator_index as usize, &message[..], epoch, Domain::Exit) + .expect("Unable to sign VoluntaryExit"); + + exit +} + +/// Builds an `AttesterSlashing` for some `validator_indices`. +/// +/// Signs the message using a `BeaconChainHarness`. +fn build_double_vote_attester_slashing( + harness: &BeaconChainHarness, + validator_indices: &[u64], +) -> AttesterSlashing { + let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| { + harness + .validator_sign(validator_index as usize, message, epoch, domain) + .expect("Unable to sign AttesterSlashing") + }; + + AttesterSlashingBuilder::double_vote(validator_indices, signer) +} + +/// Builds an `ProposerSlashing` for some `validator_index`. +/// +/// Signs the message using a `BeaconChainHarness`. +fn build_proposer_slashing(harness: &BeaconChainHarness, validator_index: u64) -> ProposerSlashing { + let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| { + harness + .validator_sign(validator_index as usize, message, epoch, domain) + .expect("Unable to sign AttesterSlashing") + }; + + ProposerSlashingBuilder::double_vote(validator_index, signer, &harness.spec) +} diff --git a/beacon_node/beacon_chain/test_harness/src/test_case/config.rs b/beacon_node/beacon_chain/test_harness/src/test_case/config.rs new file mode 100644 index 000000000..f336b9d53 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/test_case/config.rs @@ -0,0 +1,132 @@ +use super::yaml_helpers::{as_u64, as_usize, as_vec_u64}; +use types::*; +use yaml_rust::Yaml; + +pub type ValidatorIndex = u64; +pub type ValidatorIndices = Vec; +pub type GweiAmount = u64; + +pub type DepositTuple = (SlotHeight, GweiAmount); +pub type ExitTuple = (SlotHeight, ValidatorIndex); +pub type ProposerSlashingTuple = (SlotHeight, ValidatorIndex); +pub type AttesterSlashingTuple = (SlotHeight, ValidatorIndices); +/// (slot_height, from, to, amount) +pub type TransferTuple = (SlotHeight, ValidatorIndex, ValidatorIndex, GweiAmount); + +/// Defines the execution of a `BeaconStateHarness` across a series of slots. +#[derive(Debug)] +pub struct Config { + /// Initial validators. + pub deposits_for_chain_start: usize, + /// Number of slots in an epoch. + pub slots_per_epoch: Option, + /// Number of slots to build before ending execution. + pub num_slots: u64, + /// Number of slots that should be skipped due to inactive validator. + pub skip_slots: Option>, + /// Deposits to be included during execution. + pub deposits: Option>, + /// Proposer slashings to be included during execution. + pub proposer_slashings: Option>, + /// Attester slashings to be including during execution. + pub attester_slashings: Option>, + /// Exits to be including during execution. + pub exits: Option>, + /// Transfers to be including during execution. + pub transfers: Option>, +} + +impl Config { + /// Load from a YAML document. + /// + /// Expects to receive the `config` section of the document. + pub fn from_yaml(yaml: &Yaml) -> Self { + Self { + deposits_for_chain_start: as_usize(&yaml, "deposits_for_chain_start") + .expect("Must specify validator count"), + slots_per_epoch: as_u64(&yaml, "slots_per_epoch"), + num_slots: as_u64(&yaml, "num_slots").expect("Must specify `config.num_slots`"), + skip_slots: as_vec_u64(yaml, "skip_slots"), + deposits: parse_deposits(&yaml), + proposer_slashings: parse_proposer_slashings(&yaml), + attester_slashings: parse_attester_slashings(&yaml), + exits: parse_exits(&yaml), + transfers: parse_transfers(&yaml), + } + } +} + +/// Parse the `transfers` section of the YAML document. +fn parse_transfers(yaml: &Yaml) -> Option> { + let mut tuples = vec![]; + + for exit in yaml["transfers"].as_vec()? { + let slot = as_u64(exit, "slot").expect("Incomplete transfer (slot)"); + let from = as_u64(exit, "from").expect("Incomplete transfer (from)"); + let to = as_u64(exit, "to").expect("Incomplete transfer (to)"); + let amount = as_u64(exit, "amount").expect("Incomplete transfer (amount)"); + + tuples.push((SlotHeight::from(slot), from, to, amount)); + } + + Some(tuples) +} + +/// Parse the `attester_slashings` section of the YAML document. +fn parse_exits(yaml: &Yaml) -> Option> { + let mut tuples = vec![]; + + for exit in yaml["exits"].as_vec()? { + let slot = as_u64(exit, "slot").expect("Incomplete exit (slot)"); + let validator_index = + as_u64(exit, "validator_index").expect("Incomplete exit (validator_index)"); + + tuples.push((SlotHeight::from(slot), validator_index)); + } + + Some(tuples) +} + +/// Parse the `attester_slashings` section of the YAML document. +fn parse_attester_slashings(yaml: &Yaml) -> Option> { + let mut slashings = vec![]; + + for slashing in yaml["attester_slashings"].as_vec()? { + let slot = as_u64(slashing, "slot").expect("Incomplete attester_slashing (slot)"); + let validator_indices = as_vec_u64(slashing, "validator_indices") + .expect("Incomplete attester_slashing (validator_indices)"); + + slashings.push((SlotHeight::from(slot), validator_indices)); + } + + Some(slashings) +} + +/// Parse the `proposer_slashings` section of the YAML document. +fn parse_proposer_slashings(yaml: &Yaml) -> Option> { + let mut slashings = vec![]; + + for slashing in yaml["proposer_slashings"].as_vec()? { + let slot = as_u64(slashing, "slot").expect("Incomplete proposer slashing (slot)_"); + let validator_index = as_u64(slashing, "validator_index") + .expect("Incomplete proposer slashing (validator_index)"); + + slashings.push((SlotHeight::from(slot), validator_index)); + } + + Some(slashings) +} + +/// Parse the `deposits` section of the YAML document. +fn parse_deposits(yaml: &Yaml) -> Option> { + let mut deposits = vec![]; + + for deposit in yaml["deposits"].as_vec()? { + let slot = as_u64(deposit, "slot").expect("Incomplete deposit (slot)"); + let amount = as_u64(deposit, "amount").expect("Incomplete deposit (amount)"); + + deposits.push((SlotHeight::from(slot), amount)) + } + + Some(deposits) +} diff --git a/beacon_node/beacon_chain/test_harness/src/test_case/results.rs b/beacon_node/beacon_chain/test_harness/src/test_case/results.rs new file mode 100644 index 000000000..596418c0f --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/test_case/results.rs @@ -0,0 +1,34 @@ +use super::state_check::StateCheck; +use super::yaml_helpers::as_usize; +use yaml_rust::Yaml; + +/// A series of tests to be carried out upon an `ExecutionResult`, returned from executing a +/// `TestCase`. +#[derive(Debug)] +pub struct Results { + pub num_skipped_slots: Option, + pub state_checks: Option>, +} + +impl Results { + /// Load from a YAML document. + /// + /// Expects the `results` section of the YAML document. + pub fn from_yaml(yaml: &Yaml) -> Self { + Self { + num_skipped_slots: as_usize(yaml, "num_skipped_slots"), + state_checks: parse_state_checks(yaml), + } + } +} + +/// Parse the `state_checks` section of the YAML document. +fn parse_state_checks(yaml: &Yaml) -> Option> { + let mut states = vec![]; + + for state_yaml in yaml["states"].as_vec()? { + states.push(StateCheck::from_yaml(state_yaml)); + } + + Some(states) +} diff --git a/beacon_node/beacon_chain/test_harness/src/test_case/state_check.rs b/beacon_node/beacon_chain/test_harness/src/test_case/state_check.rs new file mode 100644 index 000000000..4d2bfd07d --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/test_case/state_check.rs @@ -0,0 +1,178 @@ +use super::yaml_helpers::{as_u64, as_usize, as_vec_u64}; +use log::info; +use types::*; +use yaml_rust::Yaml; + +type ValidatorIndex = u64; +type BalanceGwei = u64; + +type BalanceCheckTuple = (ValidatorIndex, String, BalanceGwei); + +/// Tests to be conducted upon a `BeaconState` object generated during the execution of a +/// `TestCase`. +#[derive(Debug)] +pub struct StateCheck { + /// Checked against `beacon_state.slot`. + pub slot: Slot, + /// Checked against `beacon_state.validator_registry.len()`. + pub num_validators: Option, + /// A list of validator indices which have been penalized. Must be in ascending order. + pub slashed_validators: Option>, + /// A list of validator indices which have been fully exited. Must be in ascending order. + pub exited_validators: Option>, + /// A list of validator indices which have had an exit initiated. Must be in ascending order. + pub exit_initiated_validators: Option>, + /// A list of balances to check. + pub balances: Option>, +} + +impl StateCheck { + /// Load from a YAML document. + /// + /// Expects the `state_check` section of the YAML document. + pub fn from_yaml(yaml: &Yaml) -> Self { + Self { + slot: Slot::from(as_u64(&yaml, "slot").expect("State must specify slot")), + num_validators: as_usize(&yaml, "num_validators"), + slashed_validators: as_vec_u64(&yaml, "slashed_validators"), + exited_validators: as_vec_u64(&yaml, "exited_validators"), + exit_initiated_validators: as_vec_u64(&yaml, "exit_initiated_validators"), + balances: parse_balances(&yaml), + } + } + + /// Performs all checks against a `BeaconState` + /// + /// # Panics + /// + /// Panics with an error message if any test fails. + pub fn assert_valid(&self, state: &BeaconState, spec: &ChainSpec) { + let state_epoch = state.slot.epoch(spec.slots_per_epoch); + + info!("Running state check for slot height {}.", self.slot); + + // Check the state slot. + assert_eq!( + self.slot, + state.slot - spec.genesis_epoch.start_slot(spec.slots_per_epoch), + "State slot is invalid." + ); + + if let Some(num_validators) = self.num_validators { + assert_eq!( + state.validator_registry.len(), + num_validators, + "State validator count != expected." + ); + info!("OK: num_validators = {}.", num_validators); + } + + // Check for slashed validators. + if let Some(ref slashed_validators) = self.slashed_validators { + let actually_slashed_validators: Vec = state + .validator_registry + .iter() + .enumerate() + .filter_map(|(i, validator)| { + if validator.slashed { + Some(i as u64) + } else { + None + } + }) + .collect(); + assert_eq!( + actually_slashed_validators, *slashed_validators, + "Slashed validators != expected." + ); + info!("OK: slashed_validators = {:?}.", slashed_validators); + } + + // Check for exited validators. + if let Some(ref exited_validators) = self.exited_validators { + let actually_exited_validators: Vec = state + .validator_registry + .iter() + .enumerate() + .filter_map(|(i, validator)| { + if validator.is_exited_at(state_epoch) { + Some(i as u64) + } else { + None + } + }) + .collect(); + assert_eq!( + actually_exited_validators, *exited_validators, + "Exited validators != expected." + ); + info!("OK: exited_validators = {:?}.", exited_validators); + } + + // Check for validators that have initiated exit. + if let Some(ref exit_initiated_validators) = self.exit_initiated_validators { + let actual: Vec = state + .validator_registry + .iter() + .enumerate() + .filter_map(|(i, validator)| { + if validator.initiated_exit { + Some(i as u64) + } else { + None + } + }) + .collect(); + assert_eq!( + actual, *exit_initiated_validators, + "Exit initiated validators != expected." + ); + info!( + "OK: exit_initiated_validators = {:?}.", + exit_initiated_validators + ); + } + + // Check validator balances. + if let Some(ref balances) = self.balances { + for (index, comparison, expected) in balances { + let actual = *state + .validator_balances + .get(*index as usize) + .expect("Balance check specifies unknown validator"); + + let result = match comparison.as_ref() { + "eq" => actual == *expected, + _ => panic!("Unknown balance comparison (use `eq`)"), + }; + assert!( + result, + format!( + "Validator balance for {}: {} !{} {}.", + index, actual, comparison, expected + ) + ); + info!("OK: validator balance for {:?}.", index); + } + } + } +} + +/// Parse the `transfers` section of the YAML document. +fn parse_balances(yaml: &Yaml) -> Option> { + let mut tuples = vec![]; + + for exit in yaml["balances"].as_vec()? { + let from = + as_u64(exit, "validator_index").expect("Incomplete balance check (validator_index)"); + let comparison = exit["comparison"] + .clone() + .into_string() + .expect("Incomplete balance check (amount)"); + let balance = as_u64(exit, "balance").expect("Incomplete balance check (balance)"); + + tuples.push((from, comparison, balance)); + } + + Some(tuples) +} diff --git a/beacon_node/beacon_chain/test_harness/src/test_case/yaml_helpers.rs b/beacon_node/beacon_chain/test_harness/src/test_case/yaml_helpers.rs new file mode 100644 index 000000000..c499b3c0f --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/test_case/yaml_helpers.rs @@ -0,0 +1,19 @@ +use yaml_rust::Yaml; + +pub fn as_usize(yaml: &Yaml, key: &str) -> Option { + yaml[key].as_i64().and_then(|n| Some(n as usize)) +} + +pub fn as_u64(yaml: &Yaml, key: &str) -> Option { + yaml[key].as_i64().and_then(|n| Some(n as u64)) +} + +pub fn as_vec_u64(yaml: &Yaml, key: &str) -> Option> { + yaml[key].clone().into_vec().and_then(|vec| { + Some( + vec.iter() + .map(|item| item.as_i64().unwrap() as u64) + .collect(), + ) + }) +} diff --git a/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs b/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs index 06d3e7c72..d2de354d7 100644 --- a/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs +++ b/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs @@ -80,8 +80,8 @@ impl BeaconBlockNode for DirectBeaconN let (block, _state) = self .beacon_chain .produce_block(randao_reveal.clone()) - .ok_or_else(|| { - BeaconBlockNodeError::RemoteFailure("Did not produce block.".to_string()) + .map_err(|e| { + BeaconBlockNodeError::RemoteFailure(format!("Did not produce block: {:?}", e)) })?; if block.slot == slot { diff --git a/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_duties.rs b/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_duties.rs index 5bed59531..dec93c334 100644 --- a/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_duties.rs +++ b/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_duties.rs @@ -9,7 +9,7 @@ use db::ClientDB; use fork_choice::ForkChoice; use slot_clock::SlotClock; use std::sync::Arc; -use types::{PublicKey, Slot}; +use types::{Fork, PublicKey, Slot}; /// Connects directly to a borrowed `BeaconChain` and reads attester/proposer duties directly from /// it. @@ -40,6 +40,10 @@ impl ProducerDutiesReader for DirectDu Err(_) => Err(ProducerDutiesReaderError::UnknownEpoch), } } + + fn fork(&self) -> Result { + Ok(self.beacon_chain.state.read().fork.clone()) + } } impl AttesterDutiesReader for DirectDuties { diff --git a/beacon_node/beacon_chain/test_harness/src/validator_harness/local_signer.rs b/beacon_node/beacon_chain/test_harness/src/validator_harness/local_signer.rs index aa46a1c9a..803af5045 100644 --- a/beacon_node/beacon_chain/test_harness/src/validator_harness/local_signer.rs +++ b/beacon_node/beacon_chain/test_harness/src/validator_harness/local_signer.rs @@ -1,47 +1,36 @@ use attester::Signer as AttesterSigner; use block_proposer::Signer as BlockProposerSigner; -use std::sync::RwLock; use types::{Keypair, Signature}; /// A test-only struct used to perform signing for a proposer or attester. pub struct LocalSigner { keypair: Keypair, - should_sign: RwLock, } impl LocalSigner { /// Produce a new TestSigner with signing enabled by default. pub fn new(keypair: Keypair) -> Self { - Self { - keypair, - should_sign: RwLock::new(true), - } - } - - /// If set to `false`, the service will refuse to sign all messages. Otherwise, all messages - /// will be signed. - pub fn enable_signing(&self, enabled: bool) { - *self.should_sign.write().unwrap() = enabled; + Self { keypair } } /// Sign some message. - fn bls_sign(&self, message: &[u8]) -> Option { - Some(Signature::new(message, &self.keypair.sk)) + fn bls_sign(&self, message: &[u8], domain: u64) -> Option { + Some(Signature::new(message, domain, &self.keypair.sk)) } } impl BlockProposerSigner for LocalSigner { - fn sign_block_proposal(&self, message: &[u8]) -> Option { - self.bls_sign(message) + fn sign_block_proposal(&self, message: &[u8], domain: u64) -> Option { + self.bls_sign(message, domain) } - fn sign_randao_reveal(&self, message: &[u8]) -> Option { - self.bls_sign(message) + fn sign_randao_reveal(&self, message: &[u8], domain: u64) -> Option { + self.bls_sign(message, domain) } } impl AttesterSigner for LocalSigner { - fn sign_attestation_message(&self, message: &[u8]) -> Option { - self.bls_sign(message) + fn sign_attestation_message(&self, message: &[u8], domain: u64) -> Option { + self.bls_sign(message, domain) } } diff --git a/beacon_node/beacon_chain/test_harness/src/validator_harness/mod.rs b/beacon_node/beacon_chain/test_harness/src/validator_harness/mod.rs index 60c2f8ecf..91a679463 100644 --- a/beacon_node/beacon_chain/test_harness/src/validator_harness/mod.rs +++ b/beacon_node/beacon_chain/test_harness/src/validator_harness/mod.rs @@ -28,24 +28,28 @@ pub enum AttestationProduceError { PollError(AttestationPollError), } +type TestingBlockProducer = BlockProducer< + TestingSlotClock, + DirectBeaconNode>, + DirectDuties>, + LocalSigner, +>; + +type TestingAttester = Attester< + TestingSlotClock, + DirectBeaconNode>, + DirectDuties>, + LocalSigner, +>; + /// A `BlockProducer` and `Attester` which sign using a common keypair. /// /// The test validator connects directly to a borrowed `BeaconChain` struct. It is useful for /// testing that the core proposer and attester logic is functioning. Also for supporting beacon /// chain tests. pub struct ValidatorHarness { - pub block_producer: BlockProducer< - TestingSlotClock, - DirectBeaconNode>, - DirectDuties>, - LocalSigner, - >, - pub attester: Attester< - TestingSlotClock, - DirectBeaconNode>, - DirectDuties>, - LocalSigner, - >, + pub block_producer: TestingBlockProducer, + pub attester: TestingAttester, pub spec: Arc, pub epoch_map: Arc>>, pub keypair: Keypair, diff --git a/beacon_node/beacon_chain/test_harness/tests/chain.rs b/beacon_node/beacon_chain/test_harness/tests/chain.rs index 1a08ffcf1..e72c3a5aa 100644 --- a/beacon_node/beacon_chain/test_harness/tests/chain.rs +++ b/beacon_node/beacon_chain/test_harness/tests/chain.rs @@ -29,15 +29,16 @@ fn it_can_produce_past_first_epoch_boundary() { debug!("Harness built, tests starting.."); - let blocks = harness.spec.epoch_length * 2 + 1; + let blocks = harness.spec.slots_per_epoch * 2 + 1; for i in 0..blocks { harness.advance_chain_with_block(); debug!("Produced block {}/{}.", i + 1, blocks); } + + harness.run_fork_choice(); + let dump = harness.chain_dump().expect("Chain dump failed."); assert_eq!(dump.len() as u64, blocks + 1); // + 1 for genesis block. - - harness.dump_to_file("/tmp/chaindump.json".to_string(), &dump); } diff --git a/beacon_node/db/src/stores/beacon_block_store.rs b/beacon_node/db/src/stores/beacon_block_store.rs index 8a1fc2b0d..bd5149cfd 100644 --- a/beacon_node/db/src/stores/beacon_block_store.rs +++ b/beacon_node/db/src/stores/beacon_block_store.rs @@ -134,9 +134,9 @@ mod tests { let store = BeaconBlockStore::new(db.clone()); let ssz = "definitly not a valid block".as_bytes(); - let hash = &Hash256::from("some hash".as_bytes()); + let hash = &Hash256::from([0xAA; 32]); - db.put(DB_COLUMN, hash, ssz).unwrap(); + db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); assert_eq!( store.block_at_slot(hash, Slot::from(42_u64)), Err(BeaconBlockAtSlotError::DBError( @@ -151,10 +151,10 @@ mod tests { let store = BeaconBlockStore::new(db.clone()); let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from("some hash".as_bytes()); - let other_hash = &Hash256::from("another hash".as_bytes()); + let hash = &Hash256::from([0xAA; 32]); + let other_hash = &Hash256::from([0xBB; 32]); - db.put(DB_COLUMN, hash, ssz).unwrap(); + db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); assert_eq!( store.block_at_slot(other_hash, Slot::from(42_u64)), Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*other_hash)) @@ -169,18 +169,15 @@ mod tests { let thread_count = 10; let write_count = 10; - // We're expecting the product of these numbers to fit in one byte. - assert!(thread_count * write_count <= 255); - let mut handles = vec![]; for t in 0..thread_count { let wc = write_count; let bs = bs.clone(); let handle = thread::spawn(move || { for w in 0..wc { - let key = (t * w) as u8; + let key = t * w; let val = 42; - bs.put(&[key][..].into(), &vec![val]).unwrap(); + bs.put(&Hash256::from_low_u64_le(key), &vec![val]).unwrap(); } }); handles.push(handle); @@ -192,9 +189,9 @@ mod tests { for t in 0..thread_count { for w in 0..write_count { - let key = (t * w) as u8; - assert!(bs.exists(&[key][..].into()).unwrap()); - let val = bs.get(&[key][..].into()).unwrap().unwrap(); + let key = t * w; + assert!(bs.exists(&Hash256::from_low_u64_le(key)).unwrap()); + let val = bs.get(&Hash256::from_low_u64_le(key)).unwrap().unwrap(); assert_eq!(vec![42], val); } } @@ -208,19 +205,20 @@ mod tests { // Specify test block parameters. let hashes = [ - Hash256::from(&[0; 32][..]), - Hash256::from(&[1; 32][..]), - Hash256::from(&[2; 32][..]), - Hash256::from(&[3; 32][..]), - Hash256::from(&[4; 32][..]), + Hash256::from([0; 32]), + Hash256::from([1; 32]), + Hash256::from([2; 32]), + Hash256::from([3; 32]), + Hash256::from([4; 32]), ]; let parent_hashes = [ - Hash256::from(&[255; 32][..]), // Genesis block. - Hash256::from(&[0; 32][..]), - Hash256::from(&[1; 32][..]), - Hash256::from(&[2; 32][..]), - Hash256::from(&[3; 32][..]), + Hash256::from([255; 32]), // Genesis block. + Hash256::from([0; 32]), + Hash256::from([1; 32]), + Hash256::from([2; 32]), + Hash256::from([3; 32]), ]; + let unknown_hash = Hash256::from([101; 32]); // different from all above let slots: Vec = vec![0, 1, 3, 4, 5].iter().map(|x| Slot::new(*x)).collect(); // Generate a vec of random blocks and store them in the DB. @@ -233,7 +231,7 @@ mod tests { block.slot = slots[i]; let ssz = ssz_encode(&block); - db.put(DB_COLUMN, &hashes[i], &ssz).unwrap(); + db.put(DB_COLUMN, hashes[i].as_bytes(), &ssz).unwrap(); blocks.push(block); } @@ -255,11 +253,10 @@ mod tests { let ssz = bs.block_at_slot(&hashes[4], Slot::new(6)).unwrap(); assert_eq!(ssz, None); - let bad_hash = &Hash256::from("unknown".as_bytes()); - let ssz = bs.block_at_slot(bad_hash, Slot::new(2)); + let ssz = bs.block_at_slot(&unknown_hash, Slot::new(2)); assert_eq!( ssz, - Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*bad_hash)) + Err(BeaconBlockAtSlotError::UnknownBeaconBlock(unknown_hash)) ); } } diff --git a/beacon_node/db/src/stores/macros.rs b/beacon_node/db/src/stores/macros.rs index 36b8aef8e..6c53e40ee 100644 --- a/beacon_node/db/src/stores/macros.rs +++ b/beacon_node/db/src/stores/macros.rs @@ -2,25 +2,25 @@ macro_rules! impl_crud_for_store { ($store: ident, $db_column: expr) => { impl $store { pub fn put(&self, hash: &Hash256, ssz: &[u8]) -> Result<(), DBError> { - self.db.put($db_column, hash, ssz) + self.db.put($db_column, hash.as_bytes(), ssz) } pub fn get(&self, hash: &Hash256) -> Result>, DBError> { - self.db.get($db_column, hash) + self.db.get($db_column, hash.as_bytes()) } pub fn exists(&self, hash: &Hash256) -> Result { - self.db.exists($db_column, hash) + self.db.exists($db_column, hash.as_bytes()) } pub fn delete(&self, hash: &Hash256) -> Result<(), DBError> { - self.db.delete($db_column, hash) + self.db.delete($db_column, hash.as_bytes()) } } }; } -#[allow(unused_macros)] +#[cfg(test)] macro_rules! test_crud_for_store { ($store: ident, $db_column: expr) => { #[test] @@ -29,10 +29,10 @@ macro_rules! test_crud_for_store { let store = $store::new(db.clone()); let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from("some hash".as_bytes()); + let hash = &Hash256::from([0xAA; 32]); store.put(hash, ssz).unwrap(); - assert_eq!(db.get(DB_COLUMN, hash).unwrap().unwrap(), ssz); + assert_eq!(db.get(DB_COLUMN, hash.as_bytes()).unwrap().unwrap(), ssz); } #[test] @@ -41,9 +41,9 @@ macro_rules! test_crud_for_store { let store = $store::new(db.clone()); let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from("some hash".as_bytes()); + let hash = &Hash256::from([0xAA; 32]); - db.put(DB_COLUMN, hash, ssz).unwrap(); + db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); assert_eq!(store.get(hash).unwrap().unwrap(), ssz); } @@ -53,10 +53,10 @@ macro_rules! test_crud_for_store { let store = $store::new(db.clone()); let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from("some hash".as_bytes()); - let other_hash = &Hash256::from("another hash".as_bytes()); + let hash = &Hash256::from([0xAA; 32]); + let other_hash = &Hash256::from([0xBB; 32]); - db.put(DB_COLUMN, other_hash, ssz).unwrap(); + db.put(DB_COLUMN, other_hash.as_bytes(), ssz).unwrap(); assert_eq!(store.get(hash).unwrap(), None); } @@ -66,9 +66,9 @@ macro_rules! test_crud_for_store { let store = $store::new(db.clone()); let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from("some hash".as_bytes()); + let hash = &Hash256::from([0xAA; 32]); - db.put(DB_COLUMN, hash, ssz).unwrap(); + db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); assert!(store.exists(hash).unwrap()); } @@ -78,10 +78,10 @@ macro_rules! test_crud_for_store { let store = $store::new(db.clone()); let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from("some hash".as_bytes()); - let other_hash = &Hash256::from("another hash".as_bytes()); + let hash = &Hash256::from([0xAA; 32]); + let other_hash = &Hash256::from([0xBB; 32]); - db.put(DB_COLUMN, hash, ssz).unwrap(); + db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); assert!(!store.exists(other_hash).unwrap()); } @@ -91,13 +91,13 @@ macro_rules! test_crud_for_store { let store = $store::new(db.clone()); let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from("some hash".as_bytes()); + let hash = &Hash256::from([0xAA; 32]); - db.put(DB_COLUMN, hash, ssz).unwrap(); - assert!(db.exists(DB_COLUMN, hash).unwrap()); + db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); + assert!(db.exists(DB_COLUMN, hash.as_bytes()).unwrap()); store.delete(hash).unwrap(); - assert!(!db.exists(DB_COLUMN, hash).unwrap()); + assert!(!db.exists(DB_COLUMN, hash.as_bytes()).unwrap()); } }; } diff --git a/beacon_node/db/src/stores/pow_chain_store.rs b/beacon_node/db/src/stores/pow_chain_store.rs index a7c77bab5..5c8b97907 100644 --- a/beacon_node/db/src/stores/pow_chain_store.rs +++ b/beacon_node/db/src/stores/pow_chain_store.rs @@ -37,7 +37,7 @@ mod tests { let db = Arc::new(MemoryDB::open()); let store = PoWChainStore::new(db.clone()); - let hash = &Hash256::from("some hash".as_bytes()).to_vec(); + let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec(); store.put_block_hash(hash).unwrap(); assert!(db.exists(DB_COLUMN, hash).unwrap()); @@ -48,7 +48,7 @@ mod tests { let db = Arc::new(MemoryDB::open()); let store = PoWChainStore::new(db.clone()); - let hash = &Hash256::from("some hash".as_bytes()).to_vec(); + let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec(); db.put(DB_COLUMN, hash, &[0]).unwrap(); assert!(store.block_hash_exists(hash).unwrap()); @@ -59,8 +59,8 @@ mod tests { let db = Arc::new(MemoryDB::open()); let store = PoWChainStore::new(db.clone()); - let hash = &Hash256::from("some hash".as_bytes()).to_vec(); - let other_hash = &Hash256::from("another hash".as_bytes()).to_vec(); + let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec(); + let other_hash = &Hash256::from([0xBB; 32]).as_bytes().to_vec(); db.put(DB_COLUMN, hash, &[0]).unwrap(); assert!(!store.block_hash_exists(other_hash).unwrap()); diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index b9ef2c8a7..072315b6b 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -78,7 +78,7 @@ fn main() { // Slot clock let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past). - let slot_clock = SystemTimeSlotClock::new(genesis_time, spec.slot_duration) + let slot_clock = SystemTimeSlotClock::new(genesis_time, spec.seconds_per_slot) .expect("Unable to load SystemTimeSlotClock"); // Choose the fork choice let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); diff --git a/docs/lighthouse.md b/docs/lighthouse.md index 8ca2387f8..16da13b56 100644 --- a/docs/lighthouse.md +++ b/docs/lighthouse.md @@ -67,7 +67,7 @@ into individual crates wherever possible. Generally, tests can be kept in the same file, as is typical in Rust. Integration tests should be placed in the `tests` directory in the crate's -root. Particularity large (line-count) tests should be placed into a separate +root. Particularly large (line-count) tests should be placed into a separate file. A function is not considered complete until a test exists for it. We produce diff --git a/docs/onboarding.md b/docs/onboarding.md index 8af3b0a83..275f95484 100644 --- a/docs/onboarding.md +++ b/docs/onboarding.md @@ -122,7 +122,7 @@ project. * **Module**: A collection of items: functions, structs, traits, and even other modules. Modules allow you to hierarchically split code into logical units and manage visibility. -* **Attribute**: Metadaata applied to some module, crate or item. +* **Attribute**: Metadata applied to some module, crate or item. * **Macros**: Macros are powerful meta-programming statements that get expanded into source code that gets compiled with the rest of the code (Unlike `C` macros that are pre-processed, Rust macros form an Abstract Syntax Tree). @@ -185,7 +185,7 @@ check your code. | Function / Method | ``snake_case`` | | Macro Names | ``snake_case`` | | Constants | ``SCREAMING_SNAKE_CASE`` | -| Forbidden name | Trialing Underscore: ``name_`` | +| Forbidden name | Trailing Underscore: ``name_`` | Other general rust docs: diff --git a/eth2/README.md b/eth2/README.md new file mode 100644 index 000000000..cf041e987 --- /dev/null +++ b/eth2/README.md @@ -0,0 +1,37 @@ +# Ethereum 2.0 Common Crates + +Rust crates containing logic common across the Lighthouse project. + +## Per-Crate Summary + +- [`attester/`](attester/): Core logic for attesting to beacon and shard blocks. +- [`block_proposer/`](block_proposer/): Core logic for proposing beacon blocks. +- [`fork_choice/`](fork_choice/): A collection of fork-choice algorithms for + the Beacon Chain. +- [`state_processing/`](state_processing/): Provides per-slot, per-block, and + per-epoch state processing. +- [`types/`](types/): Defines base Ethereum 2.0 types (e.g., `BeaconBlock`, + `BeaconState`, etc). +- [`utils/`](utils/): + - [`bls`](utils/bls/): A wrapper for an external BLS encryption library. + - [`boolean-bitfield`](utils/boolean-bitfield/): Provides an expandable vector + of bools, specifically for use in Eth2. + - [`fisher-yates-shuffle`](utils/fisher-yates-shuffle/): shuffles a list + pseudo-randomly. + - [`hashing`](utils/hashing/): A wrapper for external hashing libraries. + - [`honey-badger-split`](utils/honey-badger-split/): Splits a list in `n` + parts without giving AF about the length of the list, `n`, or anything + else. + - [`int-to-bytes`](utils/int-to-bytes/): Simple library which converts ints + into byte-strings of various lengths. + - [`slot_clock`](utils/slot_clock/): translates the system time into Beacon + Chain "slots". (Also provides another slot clock that's useful during + testing.) + - [`ssz`](utils/ssz/): an implementation of the SimpleSerialize + serialization/deserialization protocol used by Eth 2.0. + - [`ssz_derive`](utils/ssz_derive/): provides procedural macros for + deriving SSZ `Encodable`, `Decodable`, and `TreeHash` methods. + - [`swap_or_not_shuffle`](utils/swap_or_not_shuffle/): a list-shuffling + method which is slow, but allows for a subset of indices to be shuffled. + - [`test_random_derive`](utils/test_random_derive/): provides procedural + macros for deriving the `TestRandom` trait defined in `types`. diff --git a/eth2/attester/src/lib.rs b/eth2/attester/src/lib.rs index 7352dd2ea..8838f022d 100644 --- a/eth2/attester/src/lib.rs +++ b/eth2/attester/src/lib.rs @@ -2,14 +2,16 @@ pub mod test_utils; mod traits; use slot_clock::SlotClock; +use ssz::TreeHash; use std::sync::Arc; -use types::{AttestationData, FreeAttestation, Signature, Slot}; +use types::{AttestationData, AttestationDataAndCustodyBit, FreeAttestation, Signature, Slot}; pub use self::traits::{ BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer, }; const PHASE_0_CUSTODY_BIT: bool = false; +const DOMAIN_ATTESTATION: u64 = 1; #[derive(Debug, PartialEq)] pub enum PollOutcome { @@ -136,8 +138,14 @@ impl Attester Option { self.store_produce(attestation_data); + let message = AttestationDataAndCustodyBit { + data: attestation_data.clone(), + custody_bit: PHASE_0_CUSTODY_BIT, + } + .hash_tree_root(); + self.signer - .sign_attestation_message(&attestation_data.signable_message(PHASE_0_CUSTODY_BIT)[..]) + .sign_attestation_message(&message[..], DOMAIN_ATTESTATION) } /// Returns `true` if signing some attestation_data is safe (non-slashable). @@ -192,9 +200,9 @@ mod tests { let beacon_node = Arc::new(SimulatedBeaconNode::default()); let signer = Arc::new(LocalSigner::new(Keypair::random())); - let mut duties = EpochMap::new(spec.epoch_length); + let mut duties = EpochMap::new(spec.slots_per_epoch); let attest_slot = Slot::new(100); - let attest_epoch = attest_slot / spec.epoch_length; + let attest_epoch = attest_slot / spec.slots_per_epoch; let attest_shard = 12; duties.insert_attestation_shard(attest_slot, attest_shard); duties.set_validator_index(Some(2)); @@ -240,7 +248,7 @@ mod tests { ); // In an epoch without known duties... - let slot = (attest_epoch + 1) * spec.epoch_length; + let slot = (attest_epoch + 1) * spec.slots_per_epoch; slot_clock.set_slot(slot.into()); assert_eq!( attester.poll(), diff --git a/eth2/attester/src/test_utils/epoch_map.rs b/eth2/attester/src/test_utils/epoch_map.rs index f0dc4312e..0b5827d64 100644 --- a/eth2/attester/src/test_utils/epoch_map.rs +++ b/eth2/attester/src/test_utils/epoch_map.rs @@ -3,22 +3,22 @@ use std::collections::HashMap; use types::{Epoch, Slot}; pub struct EpochMap { - epoch_length: u64, + slots_per_epoch: u64, validator_index: Option, map: HashMap, } impl EpochMap { - pub fn new(epoch_length: u64) -> Self { + pub fn new(slots_per_epoch: u64) -> Self { Self { - epoch_length, + slots_per_epoch, validator_index: None, map: HashMap::new(), } } pub fn insert_attestation_shard(&mut self, slot: Slot, shard: u64) { - let epoch = slot.epoch(self.epoch_length); + let epoch = slot.epoch(self.slots_per_epoch); self.map.insert(epoch, (slot, shard)); } @@ -29,7 +29,7 @@ impl EpochMap { impl DutiesReader for EpochMap { fn attestation_shard(&self, slot: Slot) -> Result, DutiesReaderError> { - let epoch = slot.epoch(self.epoch_length); + let epoch = slot.epoch(self.slots_per_epoch); match self.map.get(&epoch) { Some((attest_slot, attest_shard)) if *attest_slot == slot => Ok(Some(*attest_shard)), diff --git a/eth2/attester/src/test_utils/local_signer.rs b/eth2/attester/src/test_utils/local_signer.rs index c256d1050..896d90775 100644 --- a/eth2/attester/src/test_utils/local_signer.rs +++ b/eth2/attester/src/test_utils/local_signer.rs @@ -25,7 +25,7 @@ impl LocalSigner { } impl Signer for LocalSigner { - fn sign_attestation_message(&self, message: &[u8]) -> Option { - Some(Signature::new(message, &self.keypair.sk)) + fn sign_attestation_message(&self, message: &[u8], domain: u64) -> Option { + Some(Signature::new(message, domain, &self.keypair.sk)) } } diff --git a/eth2/attester/src/traits.rs b/eth2/attester/src/traits.rs index 53bce3aaa..6062460cb 100644 --- a/eth2/attester/src/traits.rs +++ b/eth2/attester/src/traits.rs @@ -45,5 +45,5 @@ pub trait DutiesReader: Send + Sync { /// Signs message using an internally-maintained private key. pub trait Signer { - fn sign_attestation_message(&self, message: &[u8]) -> Option; + fn sign_attestation_message(&self, message: &[u8], domain: u64) -> Option; } diff --git a/eth2/block_proposer/src/lib.rs b/eth2/block_proposer/src/lib.rs index cf71edd99..5cddbaedc 100644 --- a/eth2/block_proposer/src/lib.rs +++ b/eth2/block_proposer/src/lib.rs @@ -1,10 +1,10 @@ pub mod test_utils; mod traits; -use int_to_bytes::int_to_bytes32; use slot_clock::SlotClock; +use ssz::{SignedRoot, TreeHash}; use std::sync::Arc; -use types::{BeaconBlock, ChainSpec, Slot}; +use types::{BeaconBlock, ChainSpec, Domain, Hash256, Proposal, Slot}; pub use self::traits::{ BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer, @@ -28,6 +28,8 @@ pub enum PollOutcome { SignerRejection(Slot), /// The public key for this validator is not an active validator. ValidatorIsUnknown(Slot), + /// Unable to determine a `Fork` for signature domain generation. + UnableToGetFork(Slot), } #[derive(Debug, PartialEq)] @@ -130,11 +132,20 @@ impl BlockProducer Result { + let fork = match self.epoch_map.fork() { + Ok(fork) => fork, + Err(_) => return Ok(PollOutcome::UnableToGetFork(slot)), + }; + let randao_reveal = { // TODO: add domain, etc to this message. Also ensure result matches `into_to_bytes32`. - let message = int_to_bytes32(slot.epoch(self.spec.epoch_length).as_u64()); + let message = slot.epoch(self.spec.slots_per_epoch).hash_tree_root(); - match self.signer.sign_randao_reveal(&message) { + match self.signer.sign_randao_reveal( + &message, + self.spec + .get_domain(slot.epoch(self.spec.slots_per_epoch), Domain::Randao, &fork), + ) { None => return Ok(PollOutcome::SignerRejection(slot)), Some(signature) => signature, } @@ -145,7 +156,12 @@ impl BlockProducer BlockProducer Option { + fn sign_block(&mut self, mut block: BeaconBlock, domain: u64) -> Option { self.store_produce(&block); + let proposal = Proposal { + slot: block.slot, + shard: self.spec.beacon_chain_shard_number, + block_root: Hash256::from_slice(&block.signed_root()[..]), + signature: block.signature.clone(), + }; + match self .signer - .sign_block_proposal(&block.proposal_root(&self.spec)[..]) + .sign_block_proposal(&proposal.signed_root()[..], domain) { None => None, Some(signature) => { @@ -230,9 +253,9 @@ mod tests { let beacon_node = Arc::new(SimulatedBeaconNode::default()); let signer = Arc::new(LocalSigner::new(Keypair::random())); - let mut epoch_map = EpochMap::new(spec.epoch_length); + let mut epoch_map = EpochMap::new(spec.slots_per_epoch); let produce_slot = Slot::new(100); - let produce_epoch = produce_slot.epoch(spec.epoch_length); + let produce_epoch = produce_slot.epoch(spec.slots_per_epoch); epoch_map.map.insert(produce_epoch, produce_slot); let epoch_map = Arc::new(epoch_map); @@ -277,7 +300,7 @@ mod tests { ); // In an epoch without known duties... - let slot = (produce_epoch.as_u64() + 1) * spec.epoch_length; + let slot = (produce_epoch.as_u64() + 1) * spec.slots_per_epoch; slot_clock.set_slot(slot); assert_eq!( block_proposer.poll(), diff --git a/eth2/block_proposer/src/test_utils/epoch_map.rs b/eth2/block_proposer/src/test_utils/epoch_map.rs index e9ed9b68a..6658c7526 100644 --- a/eth2/block_proposer/src/test_utils/epoch_map.rs +++ b/eth2/block_proposer/src/test_utils/epoch_map.rs @@ -1,16 +1,16 @@ use crate::{DutiesReader, DutiesReaderError}; use std::collections::HashMap; -use types::{Epoch, Slot}; +use types::{Epoch, Fork, Slot}; pub struct EpochMap { - epoch_length: u64, + slots_per_epoch: u64, pub map: HashMap, } impl EpochMap { - pub fn new(epoch_length: u64) -> Self { + pub fn new(slots_per_epoch: u64) -> Self { Self { - epoch_length, + slots_per_epoch, map: HashMap::new(), } } @@ -18,11 +18,19 @@ impl EpochMap { impl DutiesReader for EpochMap { fn is_block_production_slot(&self, slot: Slot) -> Result { - let epoch = slot.epoch(self.epoch_length); + let epoch = slot.epoch(self.slots_per_epoch); match self.map.get(&epoch) { Some(s) if *s == slot => Ok(true), Some(s) if *s != slot => Ok(false), _ => Err(DutiesReaderError::UnknownEpoch), } } + + fn fork(&self) -> Result { + Ok(Fork { + previous_version: 0, + current_version: 0, + epoch: Epoch::new(0), + }) + } } diff --git a/eth2/block_proposer/src/test_utils/local_signer.rs b/eth2/block_proposer/src/test_utils/local_signer.rs index 0ebefa29d..d7f490c30 100644 --- a/eth2/block_proposer/src/test_utils/local_signer.rs +++ b/eth2/block_proposer/src/test_utils/local_signer.rs @@ -25,11 +25,11 @@ impl LocalSigner { } impl Signer for LocalSigner { - fn sign_block_proposal(&self, message: &[u8]) -> Option { - Some(Signature::new(message, &self.keypair.sk)) + fn sign_block_proposal(&self, message: &[u8], domain: u64) -> Option { + Some(Signature::new(message, domain, &self.keypair.sk)) } - fn sign_randao_reveal(&self, message: &[u8]) -> Option { - Some(Signature::new(message, &self.keypair.sk)) + fn sign_randao_reveal(&self, message: &[u8], domain: u64) -> Option { + Some(Signature::new(message, domain, &self.keypair.sk)) } } diff --git a/eth2/block_proposer/src/traits.rs b/eth2/block_proposer/src/traits.rs index 5eb27bce7..1c0da9acf 100644 --- a/eth2/block_proposer/src/traits.rs +++ b/eth2/block_proposer/src/traits.rs @@ -1,4 +1,4 @@ -use types::{BeaconBlock, Signature, Slot}; +use types::{BeaconBlock, Fork, Signature, Slot}; #[derive(Debug, PartialEq, Clone)] pub enum BeaconNodeError { @@ -40,10 +40,11 @@ pub enum DutiesReaderError { /// Informs a validator of their duties (e.g., block production). pub trait DutiesReader: Send + Sync { fn is_block_production_slot(&self, slot: Slot) -> Result; + fn fork(&self) -> Result; } /// Signs message using an internally-maintained private key. pub trait Signer { - fn sign_block_proposal(&self, message: &[u8]) -> Option; - fn sign_randao_reveal(&self, message: &[u8]) -> Option; + fn sign_block_proposal(&self, message: &[u8], domain: u64) -> Option; + fn sign_randao_reveal(&self, message: &[u8], domain: u64) -> Option; } diff --git a/eth2/fork_choice/Cargo.toml b/eth2/fork_choice/Cargo.toml index 210f3c235..819b84055 100644 --- a/eth2/fork_choice/Cargo.toml +++ b/eth2/fork_choice/Cargo.toml @@ -8,7 +8,6 @@ edition = "2018" db = { path = "../../beacon_node/db" } ssz = { path = "../utils/ssz" } types = { path = "../types" } -fast-math = "0.1.1" log = "0.4.6" bit-vec = "0.5.0" diff --git a/eth2/fork_choice/src/bitwise_lmd_ghost.rs b/eth2/fork_choice/src/bitwise_lmd_ghost.rs index e1d246e92..fd1c3dea4 100644 --- a/eth2/fork_choice/src/bitwise_lmd_ghost.rs +++ b/eth2/fork_choice/src/bitwise_lmd_ghost.rs @@ -1,5 +1,5 @@ +//! The optimised bitwise LMD-GHOST fork choice rule. extern crate bit_vec; -extern crate fast_math; use crate::{ForkChoice, ForkChoiceError}; use bit_vec::BitVec; @@ -7,7 +7,6 @@ use db::{ stores::{BeaconBlockStore, BeaconStateStore}, ClientDB, }; -use fast_math::log2_raw; use log::{debug, trace}; use std::collections::HashMap; use std::sync::Arc; @@ -19,33 +18,28 @@ use types::{ //TODO: Pruning - Children //TODO: Handle Syncing -/// The optimised bitwise LMD-GHOST fork choice rule. -/// NOTE: This uses u32 to represent difference between block heights. Thus this is only -/// applicable for block height differences in the range of a u32. -/// This can potentially be parallelized in some parts. -// we use fast log2, a log2 lookup table is implemented in Vitaliks code, potentially do -// the comparison. Log2_raw takes 2ns according to the documentation. +// NOTE: This uses u32 to represent difference between block heights. Thus this is only +// applicable for block height differences in the range of a u32. +// This can potentially be parallelized in some parts. + +/// Compute the base-2 logarithm of an integer, floored (rounded down) #[inline] -fn log2_int(x: u32) -> u32 { +fn log2_int(x: u64) -> u32 { if x == 0 { return 0; } - assert!( - x <= std::f32::MAX as u32, - "Height too large for fast log in bitwise fork choice" - ); - log2_raw(x as f32) as u32 + 63 - x.leading_zeros() } -fn power_of_2_below(x: u32) -> u32 { - 2u32.pow(log2_int(x)) +fn power_of_2_below(x: u64) -> u64 { + 2u64.pow(log2_int(x)) } /// Stores the necessary data structures to run the optimised bitwise lmd ghost algorithm. pub struct BitwiseLMDGhost { /// A cache of known ancestors at given heights for a specific block. //TODO: Consider FnvHashMap - cache: HashMap, Hash256>, + cache: HashMap, Hash256>, /// Log lookup table for blocks to their ancestors. //TODO: Verify we only want/need a size 16 log lookup ancestors: Vec>, @@ -101,7 +95,7 @@ where let active_validator_indices = get_active_validator_indices( ¤t_state.validator_registry[..], - block_slot.epoch(spec.epoch_length), + block_slot.epoch(spec.slots_per_epoch), ); for index in active_validator_indices { @@ -147,7 +141,7 @@ where } } // check if the result is stored in our cache - let cache_key = CacheKey::new(&block_hash, target_height.as_u32()); + let cache_key = CacheKey::new(&block_hash, target_height.as_u64()); if let Some(ancestor) = self.cache.get(&cache_key) { return Some(*ancestor); } @@ -155,7 +149,7 @@ where // not in the cache recursively search for ancestors using a log-lookup if let Some(ancestor) = { let ancestor_lookup = self.ancestors - [log2_int((block_height - target_height - 1u64).as_u32()) as usize] + [log2_int((block_height - target_height - 1u64).as_u64()) as usize] .get(&block_hash) //TODO: Panic if we can't lookup and fork choice fails .expect("All blocks should be added to the ancestor log lookup table"); @@ -192,7 +186,7 @@ where } // Check if there is a clear block winner at this height. If so return it. for (hash, votes) in current_votes.iter() { - if *votes >= total_vote_count / 2 { + if *votes > total_vote_count / 2 { // we have a clear winner, return it return Some(*hash); } @@ -216,7 +210,7 @@ where trace!("Child vote length: {}", votes.len()); for (candidate, votes) in votes.iter() { - let candidate_bit: BitVec = BitVec::from_bytes(&candidate); + let candidate_bit: BitVec = BitVec::from_bytes(candidate.as_bytes()); // if the bitmasks don't match, exclude candidate if !bitmask.iter().eq(candidate_bit.iter().take(bit)) { @@ -371,18 +365,21 @@ impl ForkChoice for BitwiseLMDGhost { // if there are no children, we are done, return the current_head let children = match self.children.get(¤t_head) { Some(children) => children.clone(), - None => return Ok(current_head), + None => { + debug!("Head found: {}", current_head); + return Ok(current_head); + } }; // logarithmic lookup blocks to see if there are obvious winners, if so, // progress to the next iteration. let mut step = - power_of_2_below(self.max_known_height.saturating_sub(block_height).as_u32()) / 2; + power_of_2_below(self.max_known_height.saturating_sub(block_height).as_u64()) / 2; while step > 0 { trace!("Current Step: {}", step); if let Some(clear_winner) = self.get_clear_winner( &latest_votes, - block_height - (block_height % u64::from(step)) + u64::from(step), + block_height - (block_height % step) + step, spec, ) { current_head = clear_winner; @@ -391,7 +388,7 @@ impl ForkChoice for BitwiseLMDGhost { step /= 2; } if step > 0 { - trace!("Found clear winner in log lookup"); + trace!("Found clear winner: {}", current_head); } // if our skip lookup failed and we only have one child, progress to that child else if children.len() == 1 { @@ -466,7 +463,6 @@ mod tests { #[test] pub fn test_power_of_2_below() { - println!("{:?}", std::f32::MAX); assert_eq!(power_of_2_below(4), 4); assert_eq!(power_of_2_below(5), 4); assert_eq!(power_of_2_below(7), 4); @@ -475,4 +471,12 @@ mod tests { assert_eq!(power_of_2_below(33), 32); assert_eq!(power_of_2_below(63), 32); } + + #[test] + pub fn test_power_of_2_below_large() { + let pow: u64 = 1 << 24; + for x in (pow - 20)..(pow + 20) { + assert!(power_of_2_below(x) <= x, "{}", x); + } + } } diff --git a/eth2/fork_choice/src/protolambda_lmd_ghost.rs b/eth2/fork_choice/src/protolambda_lmd_ghost.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/eth2/fork_choice/src/protolambda_lmd_ghost.rs @@ -0,0 +1 @@ + diff --git a/eth2/fork_choice/src/slow_lmd_ghost.rs b/eth2/fork_choice/src/slow_lmd_ghost.rs index 3aafb3924..ab4cd2ada 100644 --- a/eth2/fork_choice/src/slow_lmd_ghost.rs +++ b/eth2/fork_choice/src/slow_lmd_ghost.rs @@ -64,7 +64,7 @@ where let active_validator_indices = get_active_validator_indices( ¤t_state.validator_registry[..], - block_slot.epoch(spec.epoch_length), + block_slot.epoch(spec.slots_per_epoch), ); for index in active_validator_indices { diff --git a/eth2/fork_choice/tests/bitwise_lmd_ghost_test_vectors.yaml b/eth2/fork_choice/tests/bitwise_lmd_ghost_test_vectors.yaml index 1578673cd..3233137ab 100644 --- a/eth2/fork_choice/tests/bitwise_lmd_ghost_test_vectors.yaml +++ b/eth2/fork_choice/tests/bitwise_lmd_ghost_test_vectors.yaml @@ -35,3 +35,31 @@ test_cases: - b3: 3 heads: - id: 'b2' +- blocks: + - id: 'b0' + parent: 'b0' + - id: 'b1' + parent: 'b0' + - id: 'b2' + parent: 'b0' + - id: 'b3' + parent: 'b1' + - id: 'b4' + parent: 'b1' + - id: 'b5' + parent: 'b1' + - id: 'b6' + parent: 'b2' + - id: 'b7' + parent: 'b6' + weights: + - b0: 0 + - b1: 3 + - b2: 2 + - b3: 1 + - b4: 1 + - b5: 1 + - b6: 2 + - b7: 2 + heads: + - id: 'b4' diff --git a/eth2/fork_choice/tests/tests.rs b/eth2/fork_choice/tests/tests.rs index 1d93cd0db..a3cab6a7c 100644 --- a/eth2/fork_choice/tests/tests.rs +++ b/eth2/fork_choice/tests/tests.rs @@ -81,7 +81,8 @@ fn test_yaml_vectors( attester_slashings: vec![], attestations: vec![], deposits: vec![], - exits: vec![], + voluntary_exits: vec![], + transfers: vec![], }; // process the tests @@ -249,9 +250,9 @@ fn setup_inital_state( withdrawal_credentials: zero_hash, activation_epoch: Epoch::from(0u64), exit_epoch: spec.far_future_epoch, - withdrawal_epoch: spec.far_future_epoch, - penalized_epoch: spec.far_future_epoch, - status_flags: None, + withdrawable_epoch: spec.far_future_epoch, + initiated_exit: false, + slashed: false, }; // activate the validators for _ in 0..no_validators { diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index b6b0ea57c..c51ce8372 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -4,11 +4,22 @@ version = "0.1.0" authors = ["Paul Hauner "] edition = "2018" +[[bench]] +name = "benches" +harness = false + +[dev-dependencies] +criterion = "0.2" +env_logger = "0.6.0" + [dependencies] +bls = { path = "../utils/bls" } hashing = { path = "../utils/hashing" } int_to_bytes = { path = "../utils/int_to_bytes" } integer-sqrt = "0.1" log = "0.4" +merkle_proof = { path = "../utils/merkle_proof" } ssz = { path = "../utils/ssz" } +ssz_derive = { path = "../utils/ssz_derive" } types = { path = "../types" } rayon = "1.0" diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs new file mode 100644 index 000000000..682259eef --- /dev/null +++ b/eth2/state_processing/benches/benches.rs @@ -0,0 +1,65 @@ +use criterion::Criterion; +use criterion::{black_box, criterion_group, criterion_main, Benchmark}; +// use env_logger::{Builder, Env}; +use state_processing::SlotProcessable; +use types::beacon_state::BeaconStateBuilder; +use types::*; + +fn epoch_processing(c: &mut Criterion) { + // Builder::from_env(Env::default().default_filter_or("debug")).init(); + + let mut builder = BeaconStateBuilder::new(16_384); + + builder.build_fast().unwrap(); + builder.teleport_to_end_of_epoch(builder.spec.genesis_epoch + 4); + + let mut state = builder.cloned_state(); + + // Build all the caches so the following state does _not_ include the cache-building time. + state + .build_epoch_cache(RelativeEpoch::Previous, &builder.spec) + .unwrap(); + state + .build_epoch_cache(RelativeEpoch::Current, &builder.spec) + .unwrap(); + state + .build_epoch_cache(RelativeEpoch::Next, &builder.spec) + .unwrap(); + + let cached_state = state.clone(); + + // Drop all the caches so the following state includes the cache-building time. + state.drop_cache(RelativeEpoch::Previous); + state.drop_cache(RelativeEpoch::Current); + state.drop_cache(RelativeEpoch::Next); + + let cacheless_state = state; + + let spec_a = builder.spec.clone(); + let spec_b = builder.spec.clone(); + + c.bench( + "epoch processing", + Benchmark::new("with pre-built caches", move |b| { + b.iter_with_setup( + || cached_state.clone(), + |mut state| black_box(state.per_slot_processing(Hash256::zero(), &spec_a).unwrap()), + ) + }) + .sample_size(10), + ); + + c.bench( + "epoch processing", + Benchmark::new("without pre-built caches", move |b| { + b.iter_with_setup( + || cacheless_state.clone(), + |mut state| black_box(state.per_slot_processing(Hash256::zero(), &spec_b).unwrap()), + ) + }) + .sample_size(10), + ); +} + +criterion_group!(benches, epoch_processing,); +criterion_main!(benches); diff --git a/eth2/state_processing/src/block_processable.rs b/eth2/state_processing/src/block_processable.rs deleted file mode 100644 index 539711c69..000000000 --- a/eth2/state_processing/src/block_processable.rs +++ /dev/null @@ -1,424 +0,0 @@ -use crate::SlotProcessingError; -use hashing::hash; -use int_to_bytes::int_to_bytes32; -use log::{debug, trace}; -use ssz::{ssz_encode, TreeHash}; -use types::{ - beacon_state::{AttestationParticipantsError, BeaconStateError}, - AggregatePublicKey, Attestation, BeaconBlock, BeaconState, ChainSpec, Crosslink, Epoch, Exit, - Fork, Hash256, PendingAttestation, PublicKey, Signature, -}; - -// TODO: define elsehwere. -const DOMAIN_PROPOSAL: u64 = 2; -const DOMAIN_EXIT: u64 = 3; -const DOMAIN_RANDAO: u64 = 4; -const PHASE_0_CUSTODY_BIT: bool = false; -const DOMAIN_ATTESTATION: u64 = 1; - -#[derive(Debug, PartialEq)] -pub enum Error { - DBError(String), - StateAlreadyTransitioned, - PresentSlotIsNone, - UnableToDecodeBlock, - MissingParentState(Hash256), - InvalidParentState(Hash256), - MissingBeaconBlock(Hash256), - InvalidBeaconBlock(Hash256), - MissingParentBlock(Hash256), - NoBlockProducer, - StateSlotMismatch, - BadBlockSignature, - BadRandaoSignature, - MaxProposerSlashingsExceeded, - BadProposerSlashing, - MaxAttestationsExceeded, - InvalidAttestation(AttestationValidationError), - NoBlockRoot, - MaxDepositsExceeded, - MaxExitsExceeded, - BadExit, - BadCustodyReseeds, - BadCustodyChallenges, - BadCustodyResponses, - BeaconStateError(BeaconStateError), - SlotProcessingError(SlotProcessingError), -} - -#[derive(Debug, PartialEq)] -pub enum AttestationValidationError { - IncludedTooEarly, - IncludedTooLate, - WrongJustifiedSlot, - WrongJustifiedRoot, - BadLatestCrosslinkRoot, - BadSignature, - ShardBlockRootNotZero, - NoBlockRoot, - AttestationParticipantsError(AttestationParticipantsError), -} - -macro_rules! ensure { - ($condition: expr, $result: expr) => { - if !$condition { - return Err($result); - } - }; -} - -pub trait BlockProcessable { - fn per_block_processing(&mut self, block: &BeaconBlock, spec: &ChainSpec) -> Result<(), Error>; - fn per_block_processing_without_verifying_block_signature( - &mut self, - block: &BeaconBlock, - spec: &ChainSpec, - ) -> Result<(), Error>; -} - -impl BlockProcessable for BeaconState { - fn per_block_processing(&mut self, block: &BeaconBlock, spec: &ChainSpec) -> Result<(), Error> { - per_block_processing_signature_optional(self, block, true, spec) - } - - fn per_block_processing_without_verifying_block_signature( - &mut self, - block: &BeaconBlock, - spec: &ChainSpec, - ) -> Result<(), Error> { - per_block_processing_signature_optional(self, block, false, spec) - } -} - -fn per_block_processing_signature_optional( - state: &mut BeaconState, - block: &BeaconBlock, - verify_block_signature: bool, - spec: &ChainSpec, -) -> Result<(), Error> { - ensure!(block.slot == state.slot, Error::StateSlotMismatch); - - /* - * Proposer Signature - */ - let block_proposer_index = state - .get_beacon_proposer_index(block.slot, spec) - .map_err(|_| Error::NoBlockProducer)?; - let block_proposer = &state.validator_registry[block_proposer_index]; - - if verify_block_signature { - ensure!( - bls_verify( - &block_proposer.pubkey, - &block.proposal_root(spec)[..], - &block.signature, - get_domain(&state.fork, state.current_epoch(spec), DOMAIN_PROPOSAL) - ), - Error::BadBlockSignature - ); - } - - /* - * RANDAO - */ - ensure!( - bls_verify( - &block_proposer.pubkey, - &int_to_bytes32(state.current_epoch(spec).as_u64()), - &block.randao_reveal, - get_domain(&state.fork, state.current_epoch(spec), DOMAIN_RANDAO) - ), - Error::BadRandaoSignature - ); - - // TODO: check this is correct. - let new_mix = { - let mut mix = state.latest_randao_mixes - [state.slot.as_usize() % spec.latest_randao_mixes_length] - .to_vec(); - mix.append(&mut ssz_encode(&block.randao_reveal)); - Hash256::from(&hash(&mix)[..]) - }; - - state.latest_randao_mixes[state.slot.as_usize() % spec.latest_randao_mixes_length] = new_mix; - - /* - * Eth1 data - */ - // TODO: Eth1 data processing. - - /* - * Proposer slashings - */ - ensure!( - block.body.proposer_slashings.len() as u64 <= spec.max_proposer_slashings, - Error::MaxProposerSlashingsExceeded - ); - for proposer_slashing in &block.body.proposer_slashings { - let proposer = state - .validator_registry - .get(proposer_slashing.proposer_index as usize) - .ok_or(Error::BadProposerSlashing)?; - ensure!( - proposer_slashing.proposal_data_1.slot == proposer_slashing.proposal_data_2.slot, - Error::BadProposerSlashing - ); - ensure!( - proposer_slashing.proposal_data_1.shard == proposer_slashing.proposal_data_2.shard, - Error::BadProposerSlashing - ); - ensure!( - proposer_slashing.proposal_data_1.block_root - != proposer_slashing.proposal_data_2.block_root, - Error::BadProposerSlashing - ); - ensure!( - proposer.penalized_epoch > state.current_epoch(spec), - Error::BadProposerSlashing - ); - ensure!( - bls_verify( - &proposer.pubkey, - &proposer_slashing.proposal_data_1.hash_tree_root(), - &proposer_slashing.proposal_signature_1, - get_domain( - &state.fork, - proposer_slashing - .proposal_data_1 - .slot - .epoch(spec.epoch_length), - DOMAIN_PROPOSAL - ) - ), - Error::BadProposerSlashing - ); - ensure!( - bls_verify( - &proposer.pubkey, - &proposer_slashing.proposal_data_2.hash_tree_root(), - &proposer_slashing.proposal_signature_2, - get_domain( - &state.fork, - proposer_slashing - .proposal_data_2 - .slot - .epoch(spec.epoch_length), - DOMAIN_PROPOSAL - ) - ), - Error::BadProposerSlashing - ); - state.penalize_validator(proposer_slashing.proposer_index as usize, spec)?; - } - - /* - * Attestations - */ - ensure!( - block.body.attestations.len() as u64 <= spec.max_attestations, - Error::MaxAttestationsExceeded - ); - - debug!("Verifying {} attestations.", block.body.attestations.len()); - - for attestation in &block.body.attestations { - validate_attestation(&state, attestation, spec)?; - - let pending_attestation = PendingAttestation { - data: attestation.data.clone(), - aggregation_bitfield: attestation.aggregation_bitfield.clone(), - custody_bitfield: attestation.custody_bitfield.clone(), - inclusion_slot: state.slot, - }; - state.latest_attestations.push(pending_attestation); - } - - /* - * Deposits - */ - ensure!( - block.body.deposits.len() as u64 <= spec.max_deposits, - Error::MaxDepositsExceeded - ); - - // TODO: process deposits. - - /* - * Exits - */ - ensure!( - block.body.exits.len() as u64 <= spec.max_exits, - Error::MaxExitsExceeded - ); - - for exit in &block.body.exits { - let validator = state - .validator_registry - .get(exit.validator_index as usize) - .ok_or(Error::BadExit)?; - ensure!( - validator.exit_epoch - > state.get_entry_exit_effect_epoch(state.current_epoch(spec), spec), - Error::BadExit - ); - ensure!(state.current_epoch(spec) >= exit.epoch, Error::BadExit); - let exit_message = { - let exit_struct = Exit { - epoch: exit.epoch, - validator_index: exit.validator_index, - signature: spec.empty_signature.clone(), - }; - exit_struct.hash_tree_root() - }; - ensure!( - bls_verify( - &validator.pubkey, - &exit_message, - &exit.signature, - get_domain(&state.fork, exit.epoch, DOMAIN_EXIT) - ), - Error::BadProposerSlashing - ); - state.initiate_validator_exit(exit.validator_index as usize); - } - - debug!("State transition complete."); - - Ok(()) -} - -pub fn validate_attestation( - state: &BeaconState, - attestation: &Attestation, - spec: &ChainSpec, -) -> Result<(), AttestationValidationError> { - validate_attestation_signature_optional(state, attestation, spec, true) -} - -pub fn validate_attestation_without_signature( - state: &BeaconState, - attestation: &Attestation, - spec: &ChainSpec, -) -> Result<(), AttestationValidationError> { - validate_attestation_signature_optional(state, attestation, spec, false) -} - -fn validate_attestation_signature_optional( - state: &BeaconState, - attestation: &Attestation, - spec: &ChainSpec, - verify_signature: bool, -) -> Result<(), AttestationValidationError> { - trace!( - "validate_attestation_signature_optional: attestation epoch: {}", - attestation.data.slot.epoch(spec.epoch_length) - ); - ensure!( - attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot, - AttestationValidationError::IncludedTooEarly - ); - ensure!( - attestation.data.slot + spec.epoch_length >= state.slot, - AttestationValidationError::IncludedTooLate - ); - if attestation.data.slot >= state.current_epoch_start_slot(spec) { - ensure!( - attestation.data.justified_epoch == state.justified_epoch, - AttestationValidationError::WrongJustifiedSlot - ); - } else { - ensure!( - attestation.data.justified_epoch == state.previous_justified_epoch, - AttestationValidationError::WrongJustifiedSlot - ); - } - ensure!( - attestation.data.justified_block_root - == *state - .get_block_root( - attestation - .data - .justified_epoch - .start_slot(spec.epoch_length), - &spec - ) - .ok_or(AttestationValidationError::NoBlockRoot)?, - AttestationValidationError::WrongJustifiedRoot - ); - let potential_crosslink = Crosslink { - shard_block_root: attestation.data.shard_block_root, - epoch: attestation.data.slot.epoch(spec.epoch_length), - }; - ensure!( - (attestation.data.latest_crosslink - == state.latest_crosslinks[attestation.data.shard as usize]) - | (attestation.data.latest_crosslink == potential_crosslink), - AttestationValidationError::BadLatestCrosslinkRoot - ); - if verify_signature { - let participants = state.get_attestation_participants( - &attestation.data, - &attestation.aggregation_bitfield, - spec, - )?; - let mut group_public_key = AggregatePublicKey::new(); - for participant in participants { - group_public_key.add( - state.validator_registry[participant as usize] - .pubkey - .as_raw(), - ) - } - ensure!( - attestation.verify_signature( - &group_public_key, - PHASE_0_CUSTODY_BIT, - get_domain( - &state.fork, - attestation.data.slot.epoch(spec.epoch_length), - DOMAIN_ATTESTATION, - ) - ), - AttestationValidationError::BadSignature - ); - } - ensure!( - attestation.data.shard_block_root == spec.zero_hash, - AttestationValidationError::ShardBlockRootNotZero - ); - Ok(()) -} - -fn get_domain(_fork: &Fork, _epoch: Epoch, _domain_type: u64) -> u64 { - // TODO: stubbed out. - 0 -} - -fn bls_verify(pubkey: &PublicKey, message: &[u8], signature: &Signature, _domain: u64) -> bool { - // TODO: add domain - signature.verify(message, pubkey) -} - -impl From for Error { - fn from(e: AttestationValidationError) -> Error { - Error::InvalidAttestation(e) - } -} - -impl From for Error { - fn from(e: BeaconStateError) -> Error { - Error::BeaconStateError(e) - } -} - -impl From for Error { - fn from(e: SlotProcessingError) -> Error { - Error::SlotProcessingError(e) - } -} - -impl From for AttestationValidationError { - fn from(e: AttestationParticipantsError) -> AttestationValidationError { - AttestationValidationError::AttestationParticipantsError(e) - } -} diff --git a/eth2/state_processing/src/epoch_processable.rs b/eth2/state_processing/src/epoch_processable.rs deleted file mode 100644 index 11b2b224d..000000000 --- a/eth2/state_processing/src/epoch_processable.rs +++ /dev/null @@ -1,729 +0,0 @@ -use integer_sqrt::IntegerSquareRoot; -use log::{debug, trace}; -use rayon::prelude::*; -use ssz::TreeHash; -use std::collections::{HashMap, HashSet}; -use std::iter::FromIterator; -use types::{ - beacon_state::{AttestationParticipantsError, BeaconStateError, InclusionError}, - validator_registry::get_active_validator_indices, - BeaconState, ChainSpec, Crosslink, Epoch, Hash256, PendingAttestation, -}; - -macro_rules! safe_add_assign { - ($a: expr, $b: expr) => { - $a = $a.saturating_add($b); - }; -} -macro_rules! safe_sub_assign { - ($a: expr, $b: expr) => { - $a = $a.saturating_sub($b); - }; -} - -#[derive(Debug, PartialEq)] -pub enum Error { - UnableToDetermineProducer, - NoBlockRoots, - BaseRewardQuotientIsZero, - NoRandaoSeed, - BeaconStateError(BeaconStateError), - AttestationParticipantsError(AttestationParticipantsError), - InclusionError(InclusionError), - WinningRootError(WinningRootError), -} - -#[derive(Debug, PartialEq)] -pub enum WinningRootError { - NoWinningRoot, - AttestationParticipantsError(AttestationParticipantsError), -} - -#[derive(Clone)] -pub struct WinningRoot { - pub shard_block_root: Hash256, - pub attesting_validator_indices: Vec, - pub total_balance: u64, - pub total_attesting_balance: u64, -} - -pub trait EpochProcessable { - fn per_epoch_processing(&mut self, spec: &ChainSpec) -> Result<(), Error>; -} - -impl EpochProcessable for BeaconState { - // Cyclomatic complexity is ignored. It would be ideal to split this function apart, however it - // remains monolithic to allow for easier spec updates. Once the spec is more stable we can - // optimise. - #[allow(clippy::cyclomatic_complexity)] - fn per_epoch_processing(&mut self, spec: &ChainSpec) -> Result<(), Error> { - let current_epoch = self.current_epoch(spec); - let previous_epoch = self.previous_epoch(spec); - let next_epoch = self.next_epoch(spec); - - debug!( - "Starting per-epoch processing on epoch {}...", - self.current_epoch(spec) - ); - - /* - * Validators attesting during the current epoch. - */ - let active_validator_indices = get_active_validator_indices( - &self.validator_registry, - self.slot.epoch(spec.epoch_length), - ); - let current_total_balance = self.get_total_balance(&active_validator_indices[..], spec); - - trace!( - "{} validators with a total balance of {} wei.", - active_validator_indices.len(), - current_total_balance - ); - - let current_epoch_attestations: Vec<&PendingAttestation> = self - .latest_attestations - .par_iter() - .filter(|a| { - (a.data.slot / spec.epoch_length).epoch(spec.epoch_length) - == self.current_epoch(spec) - }) - .collect(); - - trace!( - "Current epoch attestations: {}", - current_epoch_attestations.len() - ); - - let current_epoch_boundary_attestations: Vec<&PendingAttestation> = - current_epoch_attestations - .par_iter() - .filter( - |a| match self.get_block_root(self.current_epoch_start_slot(spec), spec) { - Some(block_root) => { - (a.data.epoch_boundary_root == *block_root) - && (a.data.justified_epoch == self.justified_epoch) - } - None => unreachable!(), - }, - ) - .cloned() - .collect(); - - let current_epoch_boundary_attester_indices = self - .get_attestation_participants_union(¤t_epoch_boundary_attestations[..], spec)?; - let current_epoch_boundary_attesting_balance = - self.get_total_balance(¤t_epoch_boundary_attester_indices[..], spec); - - trace!( - "Current epoch boundary attesters: {}", - current_epoch_boundary_attester_indices.len() - ); - - /* - * Validators attesting during the previous epoch - */ - - /* - * Validators that made an attestation during the previous epoch - */ - let previous_epoch_attestations: Vec<&PendingAttestation> = self - .latest_attestations - .par_iter() - .filter(|a| { - //TODO: ensure these saturating subs are correct. - (a.data.slot / spec.epoch_length).epoch(spec.epoch_length) - == self.previous_epoch(spec) - }) - .collect(); - - debug!( - "previous epoch attestations: {}", - previous_epoch_attestations.len() - ); - - let previous_epoch_attester_indices = - self.get_attestation_participants_union(&previous_epoch_attestations[..], spec)?; - let previous_total_balance = self.get_total_balance( - &get_active_validator_indices(&self.validator_registry, previous_epoch), - spec, - ); - - /* - * Validators targetting the previous justified slot - */ - let previous_epoch_justified_attestations: Vec<&PendingAttestation> = { - let mut a: Vec<&PendingAttestation> = current_epoch_attestations - .iter() - .filter(|a| a.data.justified_epoch == self.previous_justified_epoch) - .cloned() - .collect(); - let mut b: Vec<&PendingAttestation> = previous_epoch_attestations - .iter() - .filter(|a| a.data.justified_epoch == self.previous_justified_epoch) - .cloned() - .collect(); - a.append(&mut b); - a - }; - - let previous_epoch_justified_attester_indices = self - .get_attestation_participants_union(&previous_epoch_justified_attestations[..], spec)?; - let previous_epoch_justified_attesting_balance = - self.get_total_balance(&previous_epoch_justified_attester_indices[..], spec); - - /* - * Validators justifying the epoch boundary block at the start of the previous epoch - */ - let previous_epoch_boundary_attestations: Vec<&PendingAttestation> = - previous_epoch_justified_attestations - .iter() - .filter( - |a| match self.get_block_root(self.previous_epoch_start_slot(spec), spec) { - Some(block_root) => a.data.epoch_boundary_root == *block_root, - None => unreachable!(), - }, - ) - .cloned() - .collect(); - - let previous_epoch_boundary_attester_indices = self - .get_attestation_participants_union(&previous_epoch_boundary_attestations[..], spec)?; - let previous_epoch_boundary_attesting_balance = - self.get_total_balance(&previous_epoch_boundary_attester_indices[..], spec); - - /* - * Validators attesting to the expected beacon chain head during the previous epoch. - */ - let previous_epoch_head_attestations: Vec<&PendingAttestation> = - previous_epoch_attestations - .iter() - .filter(|a| match self.get_block_root(a.data.slot, spec) { - Some(block_root) => a.data.beacon_block_root == *block_root, - None => unreachable!(), - }) - .cloned() - .collect(); - - let previous_epoch_head_attester_indices = - self.get_attestation_participants_union(&previous_epoch_head_attestations[..], spec)?; - let previous_epoch_head_attesting_balance = - self.get_total_balance(&previous_epoch_head_attester_indices[..], spec); - - debug!( - "previous_epoch_head_attester_balance of {} wei.", - previous_epoch_head_attesting_balance - ); - - /* - * Eth1 Data - */ - if self.next_epoch(spec) % spec.eth1_data_voting_period == 0 { - for eth1_data_vote in &self.eth1_data_votes { - if eth1_data_vote.vote_count * 2 > spec.eth1_data_voting_period { - self.latest_eth1_data = eth1_data_vote.eth1_data.clone(); - } - } - self.eth1_data_votes = vec![]; - } - - /* - * Justification - */ - - let mut new_justified_epoch = self.justified_epoch; - self.justification_bitfield <<= 1; - - // If > 2/3 of the total balance attested to the previous epoch boundary - // - // - Set the 2nd bit of the bitfield. - // - Set the previous epoch to be justified. - if (3 * previous_epoch_boundary_attesting_balance) >= (2 * current_total_balance) { - self.justification_bitfield |= 2; - new_justified_epoch = previous_epoch; - trace!(">= 2/3 voted for previous epoch boundary"); - } - // If > 2/3 of the total balance attested to the previous epoch boundary - // - // - Set the 1st bit of the bitfield. - // - Set the current epoch to be justified. - if (3 * current_epoch_boundary_attesting_balance) >= (2 * current_total_balance) { - self.justification_bitfield |= 1; - new_justified_epoch = current_epoch; - trace!(">= 2/3 voted for current epoch boundary"); - } - - // If: - // - // - All three epochs prior to this epoch have been justified. - // - The previous justified justified epoch was three epochs ago. - // - // Then, set the finalized epoch to be three epochs ago. - if ((self.justification_bitfield >> 1) % 8 == 0b111) - & (self.previous_justified_epoch == previous_epoch - 2) - { - self.finalized_epoch = self.previous_justified_epoch; - trace!("epoch - 3 was finalized (1st condition)."); - } - // If: - // - // - Both two epochs prior to this epoch have been justified. - // - The previous justified epoch was two epochs ago. - // - // Then, set the finalized epoch to two epochs ago. - if ((self.justification_bitfield >> 1) % 4 == 0b11) - & (self.previous_justified_epoch == previous_epoch - 1) - { - self.finalized_epoch = self.previous_justified_epoch; - trace!("epoch - 2 was finalized (2nd condition)."); - } - // If: - // - // - This epoch and the two prior have been justified. - // - The presently justified epoch was two epochs ago. - // - // Then, set the finalized epoch to two epochs ago. - if (self.justification_bitfield % 8 == 0b111) & (self.justified_epoch == previous_epoch - 1) - { - self.finalized_epoch = self.justified_epoch; - trace!("epoch - 2 was finalized (3rd condition)."); - } - // If: - // - // - This epoch and the epoch prior to it have been justified. - // - Set the previous epoch to be justified. - // - // Then, set the finalized epoch to be the previous epoch. - if (self.justification_bitfield % 4 == 0b11) & (self.justified_epoch == previous_epoch) { - self.finalized_epoch = self.justified_epoch; - trace!("epoch - 1 was finalized (4th condition)."); - } - - self.previous_justified_epoch = self.justified_epoch; - self.justified_epoch = new_justified_epoch; - - debug!( - "Finalized epoch {}, justified epoch {}.", - self.finalized_epoch, self.justified_epoch - ); - - /* - * Crosslinks - */ - - // Cached for later lookups. - let mut winning_root_for_shards: HashMap> = - HashMap::new(); - - // for slot in self.slot.saturating_sub(2 * spec.epoch_length)..self.slot { - for slot in self.previous_epoch(spec).slot_iter(spec.epoch_length) { - trace!( - "Finding winning root for slot: {} (epoch: {})", - slot, - slot.epoch(spec.epoch_length) - ); - let crosslink_committees_at_slot = - self.get_crosslink_committees_at_slot(slot, false, spec)?; - - for (crosslink_committee, shard) in crosslink_committees_at_slot { - let shard = shard as u64; - - let winning_root = winning_root( - self, - shard, - ¤t_epoch_attestations, - &previous_epoch_attestations, - spec, - ); - - if let Ok(winning_root) = &winning_root { - let total_committee_balance = - self.get_total_balance(&crosslink_committee[..], spec); - - if (3 * winning_root.total_attesting_balance) >= (2 * total_committee_balance) { - self.latest_crosslinks[shard as usize] = Crosslink { - epoch: current_epoch, - shard_block_root: winning_root.shard_block_root, - } - } - } - winning_root_for_shards.insert(shard, winning_root); - } - } - - trace!( - "Found {} winning shard roots.", - winning_root_for_shards.len() - ); - - /* - * Rewards and Penalities - */ - let base_reward_quotient = - previous_total_balance.integer_sqrt() / spec.base_reward_quotient; - if base_reward_quotient == 0 { - return Err(Error::BaseRewardQuotientIsZero); - } - - /* - * Justification and finalization - */ - let epochs_since_finality = next_epoch - self.finalized_epoch; - - let previous_epoch_justified_attester_indices_hashset: HashSet = - HashSet::from_iter(previous_epoch_justified_attester_indices.iter().cloned()); - let previous_epoch_boundary_attester_indices_hashset: HashSet = - HashSet::from_iter(previous_epoch_boundary_attester_indices.iter().cloned()); - let previous_epoch_head_attester_indices_hashset: HashSet = - HashSet::from_iter(previous_epoch_head_attester_indices.iter().cloned()); - let previous_epoch_attester_indices_hashset: HashSet = - HashSet::from_iter(previous_epoch_attester_indices.iter().cloned()); - let active_validator_indices_hashset: HashSet = - HashSet::from_iter(active_validator_indices.iter().cloned()); - - debug!("previous epoch justified attesters: {}, previous epoch boundary attesters: {}, previous epoch head attesters: {}, previous epoch attesters: {}", previous_epoch_justified_attester_indices.len(), previous_epoch_boundary_attester_indices.len(), previous_epoch_head_attester_indices.len(), previous_epoch_attester_indices.len()); - - debug!("{} epochs since finality.", epochs_since_finality); - - if epochs_since_finality <= 4 { - for index in 0..self.validator_balances.len() { - let base_reward = self.base_reward(index, base_reward_quotient, spec); - - if previous_epoch_justified_attester_indices_hashset.contains(&index) { - safe_add_assign!( - self.validator_balances[index], - base_reward * previous_epoch_justified_attesting_balance - / previous_total_balance - ); - } else if active_validator_indices_hashset.contains(&index) { - safe_sub_assign!(self.validator_balances[index], base_reward); - } - - if previous_epoch_boundary_attester_indices_hashset.contains(&index) { - safe_add_assign!( - self.validator_balances[index], - base_reward * previous_epoch_boundary_attesting_balance - / previous_total_balance - ); - } else if active_validator_indices_hashset.contains(&index) { - safe_sub_assign!(self.validator_balances[index], base_reward); - } - - if previous_epoch_head_attester_indices_hashset.contains(&index) { - safe_add_assign!( - self.validator_balances[index], - base_reward * previous_epoch_head_attesting_balance - / previous_total_balance - ); - } else if active_validator_indices_hashset.contains(&index) { - safe_sub_assign!(self.validator_balances[index], base_reward); - } - } - - for index in previous_epoch_attester_indices { - let base_reward = self.base_reward(index, base_reward_quotient, spec); - let inclusion_distance = - self.inclusion_distance(&previous_epoch_attestations, index, spec)?; - - safe_add_assign!( - self.validator_balances[index], - base_reward * spec.min_attestation_inclusion_delay / inclusion_distance - ) - } - } else { - for index in 0..self.validator_balances.len() { - let inactivity_penalty = self.inactivity_penalty( - index, - epochs_since_finality, - base_reward_quotient, - spec, - ); - if active_validator_indices_hashset.contains(&index) { - if !previous_epoch_justified_attester_indices_hashset.contains(&index) { - safe_sub_assign!(self.validator_balances[index], inactivity_penalty); - } - if !previous_epoch_boundary_attester_indices_hashset.contains(&index) { - safe_sub_assign!(self.validator_balances[index], inactivity_penalty); - } - if !previous_epoch_head_attester_indices_hashset.contains(&index) { - safe_sub_assign!(self.validator_balances[index], inactivity_penalty); - } - - if self.validator_registry[index].penalized_epoch <= current_epoch { - let base_reward = self.base_reward(index, base_reward_quotient, spec); - safe_sub_assign!( - self.validator_balances[index], - 2 * inactivity_penalty + base_reward - ); - } - } - } - - for index in previous_epoch_attester_indices { - let base_reward = self.base_reward(index, base_reward_quotient, spec); - let inclusion_distance = - self.inclusion_distance(&previous_epoch_attestations, index, spec)?; - - safe_sub_assign!( - self.validator_balances[index], - base_reward - - base_reward * spec.min_attestation_inclusion_delay / inclusion_distance - ); - } - } - - trace!("Processed validator justification and finalization rewards/penalities."); - - /* - * Attestation inclusion - */ - for &index in &previous_epoch_attester_indices_hashset { - let inclusion_slot = - self.inclusion_slot(&previous_epoch_attestations[..], index, spec)?; - let proposer_index = self - .get_beacon_proposer_index(inclusion_slot, spec) - .map_err(|_| Error::UnableToDetermineProducer)?; - let base_reward = self.base_reward(proposer_index, base_reward_quotient, spec); - safe_add_assign!( - self.validator_balances[proposer_index], - base_reward / spec.includer_reward_quotient - ); - } - - trace!( - "Previous epoch attesters: {}.", - previous_epoch_attester_indices_hashset.len() - ); - - /* - * Crosslinks - */ - for slot in self.previous_epoch(spec).slot_iter(spec.epoch_length) { - let crosslink_committees_at_slot = - self.get_crosslink_committees_at_slot(slot, false, spec)?; - - for (_crosslink_committee, shard) in crosslink_committees_at_slot { - let shard = shard as u64; - - if let Some(Ok(winning_root)) = winning_root_for_shards.get(&shard) { - // TODO: remove the map. - let attesting_validator_indices: HashSet = HashSet::from_iter( - winning_root.attesting_validator_indices.iter().cloned(), - ); - - for index in 0..self.validator_balances.len() { - let base_reward = self.base_reward(index, base_reward_quotient, spec); - - if attesting_validator_indices.contains(&index) { - safe_add_assign!( - self.validator_balances[index], - base_reward * winning_root.total_attesting_balance - / winning_root.total_balance - ); - } else { - safe_sub_assign!(self.validator_balances[index], base_reward); - } - } - - for index in &winning_root.attesting_validator_indices { - let base_reward = self.base_reward(*index, base_reward_quotient, spec); - safe_add_assign!( - self.validator_balances[*index], - base_reward * winning_root.total_attesting_balance - / winning_root.total_balance - ); - } - } - } - } - - /* - * Ejections - */ - self.process_ejections(spec); - - /* - * Validator Registry - */ - self.previous_calculation_epoch = self.current_calculation_epoch; - self.previous_epoch_start_shard = self.current_epoch_start_shard; - - debug!( - "setting previous_epoch_seed to : {}", - self.current_epoch_seed - ); - - self.previous_epoch_seed = self.current_epoch_seed; - - let should_update_validator_registy = if self.finalized_epoch - > self.validator_registry_update_epoch - { - (0..self.get_current_epoch_committee_count(spec)).all(|i| { - let shard = (self.current_epoch_start_shard + i as u64) % spec.shard_count; - self.latest_crosslinks[shard as usize].epoch > self.validator_registry_update_epoch - }) - } else { - false - }; - - if should_update_validator_registy { - trace!("updating validator registry."); - self.update_validator_registry(spec); - - self.current_calculation_epoch = next_epoch; - self.current_epoch_start_shard = (self.current_epoch_start_shard - + self.get_current_epoch_committee_count(spec) as u64) - % spec.shard_count; - self.current_epoch_seed = self.generate_seed(self.current_calculation_epoch, spec)? - } else { - trace!("not updating validator registry."); - let epochs_since_last_registry_update = - current_epoch - self.validator_registry_update_epoch; - if (epochs_since_last_registry_update > 1) - & epochs_since_last_registry_update.is_power_of_two() - { - self.current_calculation_epoch = next_epoch; - self.current_epoch_seed = - self.generate_seed(self.current_calculation_epoch, spec)? - } - } - - self.process_penalties_and_exits(spec); - - self.latest_index_roots[(next_epoch.as_usize() + spec.entry_exit_delay as usize) - % spec.latest_index_roots_length] = hash_tree_root(get_active_validator_indices( - &self.validator_registry, - next_epoch + Epoch::from(spec.entry_exit_delay), - )); - self.latest_penalized_balances[next_epoch.as_usize() % spec.latest_penalized_exit_length] = - self.latest_penalized_balances - [current_epoch.as_usize() % spec.latest_penalized_exit_length]; - self.latest_randao_mixes[next_epoch.as_usize() % spec.latest_randao_mixes_length] = self - .get_randao_mix(current_epoch, spec) - .and_then(|x| Some(*x)) - .ok_or_else(|| Error::NoRandaoSeed)?; - self.latest_attestations = self - .latest_attestations - .iter() - .filter(|a| a.data.slot.epoch(spec.epoch_length) >= current_epoch) - .cloned() - .collect(); - - debug!("Epoch transition complete."); - - Ok(()) - } -} - -fn hash_tree_root(input: Vec) -> Hash256 { - Hash256::from(&input.hash_tree_root()[..]) -} - -fn winning_root( - state: &BeaconState, - shard: u64, - current_epoch_attestations: &[&PendingAttestation], - previous_epoch_attestations: &[&PendingAttestation], - spec: &ChainSpec, -) -> Result { - let mut attestations = current_epoch_attestations.to_vec(); - attestations.append(&mut previous_epoch_attestations.to_vec()); - - let mut candidates: HashMap = HashMap::new(); - - let mut highest_seen_balance = 0; - - for a in &attestations { - if a.data.shard != shard { - continue; - } - - let shard_block_root = &a.data.shard_block_root; - - if candidates.contains_key(shard_block_root) { - continue; - } - - // TODO: `cargo fmt` makes this rather ugly; tidy up. - let attesting_validator_indices = attestations.iter().try_fold::<_, _, Result< - _, - AttestationParticipantsError, - >>(vec![], |mut acc, a| { - if (a.data.shard == shard) && (a.data.shard_block_root == *shard_block_root) { - acc.append(&mut state.get_attestation_participants( - &a.data, - &a.aggregation_bitfield, - spec, - )?); - } - Ok(acc) - })?; - - let total_balance: u64 = attesting_validator_indices - .iter() - .fold(0, |acc, i| acc + state.get_effective_balance(*i, spec)); - - let total_attesting_balance: u64 = attesting_validator_indices - .iter() - .fold(0, |acc, i| acc + state.get_effective_balance(*i, spec)); - - if total_attesting_balance > highest_seen_balance { - highest_seen_balance = total_attesting_balance; - } - - let candidate_root = WinningRoot { - shard_block_root: *shard_block_root, - attesting_validator_indices, - total_attesting_balance, - total_balance, - }; - - candidates.insert(*shard_block_root, candidate_root); - } - - Ok(candidates - .iter() - .filter_map(|(_hash, candidate)| { - if candidate.total_attesting_balance == highest_seen_balance { - Some(candidate) - } else { - None - } - }) - .min_by_key(|candidate| candidate.shard_block_root) - .ok_or_else(|| WinningRootError::NoWinningRoot)? - // TODO: avoid clone. - .clone()) -} - -impl From for Error { - fn from(e: InclusionError) -> Error { - Error::InclusionError(e) - } -} - -impl From for Error { - fn from(e: BeaconStateError) -> Error { - Error::BeaconStateError(e) - } -} - -impl From for Error { - fn from(e: AttestationParticipantsError) -> Error { - Error::AttestationParticipantsError(e) - } -} - -impl From for WinningRootError { - fn from(e: AttestationParticipantsError) -> WinningRootError { - WinningRootError::AttestationParticipantsError(e) - } -} - -#[cfg(test)] -mod tests { - #[test] - fn it_works() { - assert_eq!(2 + 2, 4); - } -} diff --git a/eth2/state_processing/src/lib.rs b/eth2/state_processing/src/lib.rs index 18d1f7554..2b30844cb 100644 --- a/eth2/state_processing/src/lib.rs +++ b/eth2/state_processing/src/lib.rs @@ -1,10 +1,13 @@ -mod block_processable; -mod epoch_processable; -mod slot_processable; +#[macro_use] +mod macros; -pub use block_processable::{ - validate_attestation, validate_attestation_without_signature, BlockProcessable, - Error as BlockProcessingError, +pub mod per_block_processing; +pub mod per_epoch_processing; +pub mod per_slot_processing; + +pub use per_block_processing::{ + errors::{BlockInvalid, BlockProcessingError}, + per_block_processing, per_block_processing_without_verifying_block_signature, }; -pub use epoch_processable::{EpochProcessable, Error as EpochProcessingError}; -pub use slot_processable::{Error as SlotProcessingError, SlotProcessable}; +pub use per_epoch_processing::{errors::EpochProcessingError, per_epoch_processing}; +pub use per_slot_processing::{per_slot_processing, Error as SlotProcessingError}; diff --git a/eth2/state_processing/src/macros.rs b/eth2/state_processing/src/macros.rs new file mode 100644 index 000000000..93a42764b --- /dev/null +++ b/eth2/state_processing/src/macros.rs @@ -0,0 +1,24 @@ +macro_rules! verify { + ($condition: expr, $result: expr) => { + if !$condition { + return Err(Error::Invalid($result)); + } + }; +} + +macro_rules! invalid { + ($result: expr) => { + return Err(Error::Invalid($result)); + }; +} + +macro_rules! safe_add_assign { + ($a: expr, $b: expr) => { + $a = $a.saturating_add($b); + }; +} +macro_rules! safe_sub_assign { + ($a: expr, $b: expr) => { + $a = $a.saturating_sub($b); + }; +} diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs new file mode 100644 index 000000000..1ab1eed71 --- /dev/null +++ b/eth2/state_processing/src/per_block_processing.rs @@ -0,0 +1,386 @@ +use self::verify_proposer_slashing::verify_proposer_slashing; +use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex}; +use hashing::hash; +use log::debug; +use ssz::{ssz_encode, SignedRoot, TreeHash}; +use types::*; + +pub use self::verify_attester_slashing::verify_attester_slashing; +pub use validate_attestation::{validate_attestation, validate_attestation_without_signature}; +pub use verify_deposit::verify_deposit; +pub use verify_exit::verify_exit; +pub use verify_transfer::{execute_transfer, verify_transfer}; + +pub mod errors; +mod validate_attestation; +mod verify_attester_slashing; +mod verify_deposit; +mod verify_exit; +mod verify_proposer_slashing; +mod verify_slashable_attestation; +mod verify_transfer; + +// Set to `true` to check the merkle proof that a deposit is in the eth1 deposit root. +// +// Presently disabled to make testing easier. +const VERIFY_DEPOSIT_MERKLE_PROOFS: bool = false; + +/// Updates the state for a new block, whilst validating that the block is valid. +/// +/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise +/// returns an error describing why the block was invalid or how the function failed to execute. +/// +/// Spec v0.4.0 +pub fn per_block_processing( + state: &mut BeaconState, + block: &BeaconBlock, + spec: &ChainSpec, +) -> Result<(), Error> { + per_block_processing_signature_optional(state, block, true, spec) +} + +/// Updates the state for a new block, whilst validating that the block is valid, without actually +/// checking the block proposer signature. +/// +/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise +/// returns an error describing why the block was invalid or how the function failed to execute. +/// +/// Spec v0.4.0 +pub fn per_block_processing_without_verifying_block_signature( + state: &mut BeaconState, + block: &BeaconBlock, + spec: &ChainSpec, +) -> Result<(), Error> { + per_block_processing_signature_optional(state, block, false, spec) +} + +/// Updates the state for a new block, whilst validating that the block is valid, optionally +/// checking the block proposer signature. +/// +/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise +/// returns an error describing why the block was invalid or how the function failed to execute. +/// +/// Spec v0.4.0 +fn per_block_processing_signature_optional( + mut state: &mut BeaconState, + block: &BeaconBlock, + should_verify_block_signature: bool, + spec: &ChainSpec, +) -> Result<(), Error> { + // Verify that `block.slot == state.slot`. + verify!(block.slot == state.slot, Invalid::StateSlotMismatch); + + // Ensure the current epoch cache is built. + state.build_epoch_cache(RelativeEpoch::Current, spec)?; + + if should_verify_block_signature { + verify_block_signature(&state, &block, &spec)?; + } + process_randao(&mut state, &block, &spec)?; + process_eth1_data(&mut state, &block.eth1_data)?; + process_proposer_slashings(&mut state, &block.body.proposer_slashings[..], spec)?; + process_attester_slashings(&mut state, &block.body.attester_slashings[..], spec)?; + process_attestations(&mut state, &block.body.attestations[..], spec)?; + process_deposits(&mut state, &block.body.deposits[..], spec)?; + process_exits(&mut state, &block.body.voluntary_exits[..], spec)?; + process_transfers(&mut state, &block.body.transfers[..], spec)?; + + debug!("per_block_processing complete."); + + Ok(()) +} + +/// Verifies the signature of a block. +/// +/// Spec v0.4.0 +pub fn verify_block_signature( + state: &BeaconState, + block: &BeaconBlock, + spec: &ChainSpec, +) -> Result<(), Error> { + let block_proposer = + &state.validator_registry[state.get_beacon_proposer_index(block.slot, spec)?]; + + let proposal = Proposal { + slot: block.slot, + shard: spec.beacon_chain_shard_number, + block_root: Hash256::from_slice(&block.signed_root()[..]), + signature: block.signature.clone(), + }; + let domain = spec.get_domain( + block.slot.epoch(spec.slots_per_epoch), + Domain::Proposal, + &state.fork, + ); + + verify!( + proposal + .signature + .verify(&proposal.signed_root()[..], domain, &block_proposer.pubkey), + Invalid::BadSignature + ); + + Ok(()) +} + +/// Verifies the `randao_reveal` against the block's proposer pubkey and updates +/// `state.latest_randao_mixes`. +/// +/// Spec v0.4.0 +pub fn process_randao( + state: &mut BeaconState, + block: &BeaconBlock, + spec: &ChainSpec, +) -> Result<(), Error> { + // Let `proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)]`. + let block_proposer = + &state.validator_registry[state.get_beacon_proposer_index(block.slot, spec)?]; + + // Verify that `bls_verify(pubkey=proposer.pubkey, + // message_hash=hash_tree_root(get_current_epoch(state)), signature=block.randao_reveal, + // domain=get_domain(state.fork, get_current_epoch(state), DOMAIN_RANDAO))`. + verify!( + block.randao_reveal.verify( + &state.current_epoch(spec).hash_tree_root()[..], + spec.get_domain( + block.slot.epoch(spec.slots_per_epoch), + Domain::Randao, + &state.fork + ), + &block_proposer.pubkey + ), + Invalid::BadRandaoSignature + ); + + // Update the state's RANDAO mix with the one revealed in the block. + update_randao(state, &block.randao_reveal, spec)?; + + Ok(()) +} + +/// Update the `state.eth1_data_votes` based upon the `eth1_data` provided. +/// +/// Spec v0.4.0 +pub fn process_eth1_data(state: &mut BeaconState, eth1_data: &Eth1Data) -> Result<(), Error> { + // Either increment the eth1_data vote count, or add a new eth1_data. + let matching_eth1_vote_index = state + .eth1_data_votes + .iter() + .position(|vote| vote.eth1_data == *eth1_data); + if let Some(index) = matching_eth1_vote_index { + state.eth1_data_votes[index].vote_count += 1; + } else { + state.eth1_data_votes.push(Eth1DataVote { + eth1_data: eth1_data.clone(), + vote_count: 1, + }); + } + + Ok(()) +} + +/// Updates the present randao mix. +/// +/// Set `state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] = +/// xor(get_randao_mix(state, get_current_epoch(state)), hash(block.randao_reveal))`. +/// +/// Spec v0.4.0 +pub fn update_randao( + state: &mut BeaconState, + reveal: &Signature, + spec: &ChainSpec, +) -> Result<(), BeaconStateError> { + let hashed_reveal = { + let encoded_signature = ssz_encode(reveal); + Hash256::from_slice(&hash(&encoded_signature[..])[..]) + }; + + let current_epoch = state.slot.epoch(spec.slots_per_epoch); + + let current_mix = state + .get_randao_mix(current_epoch, spec) + .ok_or_else(|| BeaconStateError::InsufficientRandaoMixes)?; + + let new_mix = *current_mix ^ hashed_reveal; + + let index = current_epoch.as_usize() % spec.latest_randao_mixes_length; + + if index < state.latest_randao_mixes.len() { + state.latest_randao_mixes[index] = new_mix; + Ok(()) + } else { + Err(BeaconStateError::InsufficientRandaoMixes) + } +} + +/// Validates each `ProposerSlashing` and updates the state, short-circuiting on an invalid object. +/// +/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns +/// an `Err` describing the invalid object or cause of failure. +/// +/// Spec v0.4.0 +pub fn process_proposer_slashings( + state: &mut BeaconState, + proposer_slashings: &[ProposerSlashing], + spec: &ChainSpec, +) -> Result<(), Error> { + verify!( + proposer_slashings.len() as u64 <= spec.max_proposer_slashings, + Invalid::MaxProposerSlashingsExceeded + ); + for (i, proposer_slashing) in proposer_slashings.iter().enumerate() { + verify_proposer_slashing(proposer_slashing, &state, spec) + .map_err(|e| e.into_with_index(i))?; + state.slash_validator(proposer_slashing.proposer_index as usize, spec)?; + } + + Ok(()) +} + +/// Validates each `AttesterSlsashing` and updates the state, short-circuiting on an invalid object. +/// +/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns +/// an `Err` describing the invalid object or cause of failure. +/// +/// Spec v0.4.0 +pub fn process_attester_slashings( + state: &mut BeaconState, + attester_slashings: &[AttesterSlashing], + spec: &ChainSpec, +) -> Result<(), Error> { + verify!( + attester_slashings.len() as u64 <= spec.max_attester_slashings, + Invalid::MaxAttesterSlashingsExceed + ); + for (i, attester_slashing) in attester_slashings.iter().enumerate() { + let slashable_indices = verify_attester_slashing(&state, &attester_slashing, spec) + .map_err(|e| e.into_with_index(i))?; + for i in slashable_indices { + state.slash_validator(i as usize, spec)?; + } + } + + Ok(()) +} + +/// Validates each `Attestation` and updates the state, short-circuiting on an invalid object. +/// +/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns +/// an `Err` describing the invalid object or cause of failure. +/// +/// Spec v0.4.0 +pub fn process_attestations( + state: &mut BeaconState, + attestations: &[Attestation], + spec: &ChainSpec, +) -> Result<(), Error> { + verify!( + attestations.len() as u64 <= spec.max_attestations, + Invalid::MaxAttestationsExceeded + ); + for (i, attestation) in attestations.iter().enumerate() { + // Build the previous epoch cache only if required by an attestation. + if attestation.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec) { + state.build_epoch_cache(RelativeEpoch::Previous, spec)?; + } + + validate_attestation(state, attestation, spec).map_err(|e| e.into_with_index(i))?; + + let pending_attestation = PendingAttestation { + data: attestation.data.clone(), + aggregation_bitfield: attestation.aggregation_bitfield.clone(), + custody_bitfield: attestation.custody_bitfield.clone(), + inclusion_slot: state.slot, + }; + state.latest_attestations.push(pending_attestation); + } + + Ok(()) +} + +/// Validates each `Deposit` and updates the state, short-circuiting on an invalid object. +/// +/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns +/// an `Err` describing the invalid object or cause of failure. +/// +/// Spec v0.4.0 +pub fn process_deposits( + state: &mut BeaconState, + deposits: &[Deposit], + spec: &ChainSpec, +) -> Result<(), Error> { + verify!( + deposits.len() as u64 <= spec.max_deposits, + Invalid::MaxDepositsExceeded + ); + for (i, deposit) in deposits.iter().enumerate() { + verify_deposit(state, deposit, VERIFY_DEPOSIT_MERKLE_PROOFS, spec) + .map_err(|e| e.into_with_index(i))?; + + state + .process_deposit( + deposit.deposit_data.deposit_input.pubkey.clone(), + deposit.deposit_data.amount, + deposit + .deposit_data + .deposit_input + .proof_of_possession + .clone(), + deposit.deposit_data.deposit_input.withdrawal_credentials, + None, + spec, + ) + .map_err(|_| Error::Invalid(Invalid::DepositProcessingFailed(i)))?; + + state.deposit_index += 1; + } + + Ok(()) +} + +/// Validates each `Exit` and updates the state, short-circuiting on an invalid object. +/// +/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns +/// an `Err` describing the invalid object or cause of failure. +/// +/// Spec v0.4.0 +pub fn process_exits( + state: &mut BeaconState, + voluntary_exits: &[VoluntaryExit], + spec: &ChainSpec, +) -> Result<(), Error> { + verify!( + voluntary_exits.len() as u64 <= spec.max_voluntary_exits, + Invalid::MaxExitsExceeded + ); + for (i, exit) in voluntary_exits.iter().enumerate() { + verify_exit(&state, exit, spec).map_err(|e| e.into_with_index(i))?; + + state.initiate_validator_exit(exit.validator_index as usize); + } + + Ok(()) +} + +/// Validates each `Transfer` and updates the state, short-circuiting on an invalid object. +/// +/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns +/// an `Err` describing the invalid object or cause of failure. +/// +/// Spec v0.4.0 +pub fn process_transfers( + state: &mut BeaconState, + transfers: &[Transfer], + spec: &ChainSpec, +) -> Result<(), Error> { + verify!( + transfers.len() as u64 <= spec.max_transfers, + Invalid::MaxTransfersExceed + ); + for (i, transfer) in transfers.iter().enumerate() { + verify_transfer(&state, transfer, spec).map_err(|e| e.into_with_index(i))?; + execute_transfer(state, transfer, spec).map_err(|e| e.into_with_index(i))?; + } + + Ok(()) +} diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs new file mode 100644 index 000000000..b97d8bacc --- /dev/null +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -0,0 +1,387 @@ +use types::*; + +macro_rules! impl_from_beacon_state_error { + ($type: ident) => { + impl From for $type { + fn from(e: BeaconStateError) -> $type { + $type::BeaconStateError(e) + } + } + }; +} + +macro_rules! impl_into_with_index_with_beacon_error { + ($error_type: ident, $invalid_type: ident) => { + impl IntoWithIndex for $error_type { + fn into_with_index(self, i: usize) -> BlockProcessingError { + match self { + $error_type::Invalid(e) => { + BlockProcessingError::Invalid(BlockInvalid::$invalid_type(i, e)) + } + $error_type::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e), + } + } + } + }; +} + +macro_rules! impl_into_with_index_without_beacon_error { + ($error_type: ident, $invalid_type: ident) => { + impl IntoWithIndex for $error_type { + fn into_with_index(self, i: usize) -> BlockProcessingError { + match self { + $error_type::Invalid(e) => { + BlockProcessingError::Invalid(BlockInvalid::$invalid_type(i, e)) + } + } + } + } + }; +} + +/// A conversion that consumes `self` and adds an `index` variable to resulting struct. +/// +/// Used here to allow converting an error into an upstream error that points to the object that +/// caused the error. For example, pointing to the index of an attestation that caused the +/// `AttestationInvalid` error. +pub trait IntoWithIndex: Sized { + fn into_with_index(self, index: usize) -> T; +} + +/* + * Block Validation + */ + +/// The object is invalid or validation failed. +#[derive(Debug, PartialEq)] +pub enum BlockProcessingError { + /// Validation completed successfully and the object is invalid. + Invalid(BlockInvalid), + /// Encountered a `BeaconStateError` whilst attempting to determine validity. + BeaconStateError(BeaconStateError), +} + +impl_from_beacon_state_error!(BlockProcessingError); + +/// Describes why an object is invalid. +#[derive(Debug, PartialEq)] +pub enum BlockInvalid { + StateSlotMismatch, + BadSignature, + BadRandaoSignature, + MaxAttestationsExceeded, + MaxAttesterSlashingsExceed, + MaxProposerSlashingsExceeded, + MaxDepositsExceeded, + MaxExitsExceeded, + MaxTransfersExceed, + AttestationInvalid(usize, AttestationInvalid), + AttesterSlashingInvalid(usize, AttesterSlashingInvalid), + ProposerSlashingInvalid(usize, ProposerSlashingInvalid), + DepositInvalid(usize, DepositInvalid), + // TODO: merge this into the `DepositInvalid` error. + DepositProcessingFailed(usize), + ExitInvalid(usize, ExitInvalid), + TransferInvalid(usize, TransferInvalid), +} + +impl Into for BlockInvalid { + fn into(self) -> BlockProcessingError { + BlockProcessingError::Invalid(self) + } +} + +/* + * Attestation Validation + */ + +/// The object is invalid or validation failed. +#[derive(Debug, PartialEq)] +pub enum AttestationValidationError { + /// Validation completed successfully and the object is invalid. + Invalid(AttestationInvalid), + /// Encountered a `BeaconStateError` whilst attempting to determine validity. + BeaconStateError(BeaconStateError), +} + +/// Describes why an object is invalid. +#[derive(Debug, PartialEq)] +pub enum AttestationInvalid { + /// Attestation references a pre-genesis slot. + /// + /// (genesis_slot, attestation_slot) + PreGenesis(Slot, Slot), + /// Attestation included before the inclusion delay. + /// + /// (state_slot, inclusion_delay, attestation_slot) + IncludedTooEarly(Slot, u64, Slot), + /// Attestation slot is too far in the past to be included in a block. + /// + /// (state_slot, attestation_slot) + IncludedTooLate(Slot, Slot), + /// Attestation justified epoch does not match the states current or previous justified epoch. + /// + /// (attestation_justified_epoch, state_epoch, used_previous_epoch) + WrongJustifiedEpoch(Epoch, Epoch, bool), + /// Attestation justified epoch root does not match root known to the state. + /// + /// (state_justified_root, attestation_justified_root) + WrongJustifiedRoot(Hash256, Hash256), + /// Attestation crosslink root does not match the state crosslink root for the attestations + /// slot. + BadLatestCrosslinkRoot, + /// The custody bitfield has some bits set `true`. This is not allowed in phase 0. + CustodyBitfieldHasSetBits, + /// There are no set bits on the attestation -- an attestation must be signed by at least one + /// validator. + AggregationBitfieldIsEmpty, + /// The custody bitfield length is not the smallest possible size to represent the committee. + /// + /// (committee_len, bitfield_len) + BadCustodyBitfieldLength(usize, usize), + /// The aggregation bitfield length is not the smallest possible size to represent the committee. + /// + /// (committee_len, bitfield_len) + BadAggregationBitfieldLength(usize, usize), + /// There was no known committee for the given shard in the given slot. + /// + /// (attestation_data_shard, attestation_data_slot) + NoCommitteeForShard(u64, Slot), + /// The attestation signature verification failed. + BadSignature, + /// The shard block root was not set to zero. This is a phase 0 requirement. + ShardBlockRootNotZero, +} + +impl_from_beacon_state_error!(AttestationValidationError); +impl_into_with_index_with_beacon_error!(AttestationValidationError, AttestationInvalid); + +/* + * `AttesterSlashing` Validation + */ + +/// The object is invalid or validation failed. +#[derive(Debug, PartialEq)] +pub enum AttesterSlashingValidationError { + /// Validation completed successfully and the object is invalid. + Invalid(AttesterSlashingInvalid), + /// Encountered a `BeaconStateError` whilst attempting to determine validity. + BeaconStateError(BeaconStateError), +} + +/// Describes why an object is invalid. +#[derive(Debug, PartialEq)] +pub enum AttesterSlashingInvalid { + /// The attestation data is identical, an attestation cannot conflict with itself. + AttestationDataIdentical, + /// The attestations were not in conflict. + NotSlashable, + /// The first `SlashableAttestation` was invalid. + SlashableAttestation1Invalid(SlashableAttestationInvalid), + /// The second `SlashableAttestation` was invalid. + SlashableAttestation2Invalid(SlashableAttestationInvalid), + /// The validator index is unknown. One cannot slash one who does not exist. + UnknownValidator(u64), + /// There were no indices able to be slashed. + NoSlashableIndices, +} + +impl_from_beacon_state_error!(AttesterSlashingValidationError); +impl_into_with_index_with_beacon_error!(AttesterSlashingValidationError, AttesterSlashingInvalid); + +/* + * `SlashableAttestation` Validation + */ + +/// The object is invalid or validation failed. +#[derive(Debug, PartialEq)] +pub enum SlashableAttestationValidationError { + /// Validation completed successfully and the object is invalid. + Invalid(SlashableAttestationInvalid), +} + +/// Describes why an object is invalid. +#[derive(Debug, PartialEq)] +pub enum SlashableAttestationInvalid { + /// The custody bitfield has some bits set `true`. This is not allowed in phase 0. + CustodyBitfieldHasSetBits, + /// No validator indices were specified. + NoValidatorIndices, + /// The validator indices were not in increasing order. + /// + /// The error occured between the given `index` and `index + 1` + BadValidatorIndicesOrdering(usize), + /// The custody bitfield length is not the smallest possible size to represent the validators. + /// + /// (validators_len, bitfield_len) + BadCustodyBitfieldLength(usize, usize), + /// The number of slashable indices exceed the global maximum. + /// + /// (max_indices, indices_given) + MaxIndicesExceed(usize, usize), + /// The validator index is unknown. One cannot slash one who does not exist. + UnknownValidator(u64), + /// The slashable attestation aggregate signature was not valid. + BadSignature, +} + +impl Into for SlashableAttestationValidationError { + fn into(self) -> SlashableAttestationInvalid { + match self { + SlashableAttestationValidationError::Invalid(e) => e, + } + } +} + +/* + * `ProposerSlashing` Validation + */ + +/// The object is invalid or validation failed. +#[derive(Debug, PartialEq)] +pub enum ProposerSlashingValidationError { + /// Validation completed successfully and the object is invalid. + Invalid(ProposerSlashingInvalid), +} + +/// Describes why an object is invalid. +#[derive(Debug, PartialEq)] +pub enum ProposerSlashingInvalid { + /// The proposer index is not a known validator. + ProposerUnknown(u64), + /// The two proposal have different slots. + /// + /// (proposal_1_slot, proposal_2_slot) + ProposalSlotMismatch(Slot, Slot), + /// The two proposal have different shards. + /// + /// (proposal_1_shard, proposal_2_shard) + ProposalShardMismatch(u64, u64), + /// The two proposal have different block roots. + /// + /// (proposal_1_root, proposal_2_root) + ProposalBlockRootMismatch(Hash256, Hash256), + /// The specified proposer has already been slashed. + ProposerAlreadySlashed, + /// The first proposal signature was invalid. + BadProposal1Signature, + /// The second proposal signature was invalid. + BadProposal2Signature, +} + +impl_into_with_index_without_beacon_error!( + ProposerSlashingValidationError, + ProposerSlashingInvalid +); + +/* + * `Deposit` Validation + */ + +/// The object is invalid or validation failed. +#[derive(Debug, PartialEq)] +pub enum DepositValidationError { + /// Validation completed successfully and the object is invalid. + Invalid(DepositInvalid), +} + +/// Describes why an object is invalid. +#[derive(Debug, PartialEq)] +pub enum DepositInvalid { + /// The deposit index does not match the state index. + /// + /// (state_index, deposit_index) + BadIndex(u64, u64), + /// The specified `branch` and `index` did not form a valid proof that the deposit is included + /// in the eth1 deposit root. + BadMerkleProof, +} + +impl_into_with_index_without_beacon_error!(DepositValidationError, DepositInvalid); + +/* + * `Exit` Validation + */ + +/// The object is invalid or validation failed. +#[derive(Debug, PartialEq)] +pub enum ExitValidationError { + /// Validation completed successfully and the object is invalid. + Invalid(ExitInvalid), +} + +/// Describes why an object is invalid. +#[derive(Debug, PartialEq)] +pub enum ExitInvalid { + /// The specified validator is not in the state's validator registry. + ValidatorUnknown(u64), + AlreadyExited, + /// The exit is for a future epoch. + /// + /// (state_epoch, exit_epoch) + FutureEpoch(Epoch, Epoch), + /// The exit signature was not signed by the validator. + BadSignature, +} + +impl_into_with_index_without_beacon_error!(ExitValidationError, ExitInvalid); + +/* + * `Transfer` Validation + */ + +/// The object is invalid or validation failed. +#[derive(Debug, PartialEq)] +pub enum TransferValidationError { + /// Validation completed successfully and the object is invalid. + Invalid(TransferInvalid), + /// Encountered a `BeaconStateError` whilst attempting to determine validity. + BeaconStateError(BeaconStateError), +} + +/// Describes why an object is invalid. +#[derive(Debug, PartialEq)] +pub enum TransferInvalid { + /// The validator indicated by `transfer.from` is unknown. + FromValidatorUnknown(u64), + /// The validator indicated by `transfer.to` is unknown. + ToValidatorUnknown(u64), + /// The balance of `transfer.from` is insufficient. + /// + /// (required, available) + FromBalanceInsufficient(u64, u64), + /// Adding `transfer.fee` to `transfer.amount` causes an overflow. + /// + /// (transfer_fee, transfer_amount) + FeeOverflow(u64, u64), + /// This transfer would result in the `transfer.from` account to have `0 < balance < + /// min_deposit_amount` + /// + /// (resulting_amount, min_deposit_amount) + InvalidResultingFromBalance(u64, u64), + /// The state slot does not match `transfer.slot`. + /// + /// (state_slot, transfer_slot) + StateSlotMismatch(Slot, Slot), + /// The `transfer.from` validator has been activated and is not withdrawable. + /// + /// (from_validator) + FromValidatorIneligableForTransfer(u64), + /// The validators withdrawal credentials do not match `transfer.pubkey`. + /// + /// (state_credentials, transfer_pubkey_credentials) + WithdrawalCredentialsMismatch(Hash256, Hash256), + /// The deposit was not signed by `deposit.pubkey`. + BadSignature, + /// Overflow when adding to `transfer.to` balance. + /// + /// (to_balance, transfer_amount) + ToBalanceOverflow(u64, u64), + /// Overflow when adding to beacon proposer balance. + /// + /// (proposer_balance, transfer_fee) + ProposerBalanceOverflow(u64, u64), +} + +impl_from_beacon_state_error!(TransferValidationError); +impl_into_with_index_with_beacon_error!(TransferValidationError, TransferInvalid); diff --git a/eth2/state_processing/src/per_block_processing/validate_attestation.rs b/eth2/state_processing/src/per_block_processing/validate_attestation.rs new file mode 100644 index 000000000..54bd2d332 --- /dev/null +++ b/eth2/state_processing/src/per_block_processing/validate_attestation.rs @@ -0,0 +1,255 @@ +use super::errors::{AttestationInvalid as Invalid, AttestationValidationError as Error}; +use ssz::TreeHash; +use types::beacon_state::helpers::*; +use types::*; + +/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the +/// given state. +/// +/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity. +/// +/// Spec v0.4.0 +pub fn validate_attestation( + state: &BeaconState, + attestation: &Attestation, + spec: &ChainSpec, +) -> Result<(), Error> { + validate_attestation_signature_optional(state, attestation, spec, true) +} + +/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the +/// given state, without validating the aggregate signature. +/// +/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity. +/// +/// Spec v0.4.0 +pub fn validate_attestation_without_signature( + state: &BeaconState, + attestation: &Attestation, + spec: &ChainSpec, +) -> Result<(), Error> { + validate_attestation_signature_optional(state, attestation, spec, false) +} + +/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the +/// given state, optionally validating the aggregate signature. +/// +/// +/// Spec v0.4.0 +fn validate_attestation_signature_optional( + state: &BeaconState, + attestation: &Attestation, + spec: &ChainSpec, + verify_signature: bool, +) -> Result<(), Error> { + // Verify that `attestation.data.slot >= GENESIS_SLOT`. + verify!( + attestation.data.slot >= spec.genesis_slot, + Invalid::PreGenesis(spec.genesis_slot, attestation.data.slot) + ); + + // Verify that `attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot`. + verify!( + attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot, + Invalid::IncludedTooEarly( + state.slot, + spec.min_attestation_inclusion_delay, + attestation.data.slot + ) + ); + + // Verify that `state.slot < attestation.data.slot + SLOTS_PER_EPOCH`. + verify!( + state.slot < attestation.data.slot + spec.slots_per_epoch, + Invalid::IncludedTooLate(state.slot, attestation.data.slot) + ); + + // Verify that `attestation.data.justified_epoch` is equal to `state.justified_epoch` if + // `slot_to_epoch(attestation.data.slot + 1) >= get_current_epoch(state) else + // state.previous_justified_epoch`. + if (attestation.data.slot + 1).epoch(spec.slots_per_epoch) >= state.current_epoch(spec) { + verify!( + attestation.data.justified_epoch == state.justified_epoch, + Invalid::WrongJustifiedEpoch( + attestation.data.justified_epoch, + state.justified_epoch, + false + ) + ); + } else { + verify!( + attestation.data.justified_epoch == state.previous_justified_epoch, + Invalid::WrongJustifiedEpoch( + attestation.data.justified_epoch, + state.previous_justified_epoch, + true + ) + ); + } + + // Verify that `attestation.data.justified_block_root` is equal to `get_block_root(state, + // get_epoch_start_slot(attestation.data.justified_epoch))`. + let justified_block_root = *state + .get_block_root( + attestation + .data + .justified_epoch + .start_slot(spec.slots_per_epoch), + &spec, + ) + .ok_or(BeaconStateError::InsufficientBlockRoots)?; + verify!( + attestation.data.justified_block_root == justified_block_root, + Invalid::WrongJustifiedRoot(justified_block_root, attestation.data.justified_block_root) + ); + + // Verify that either: + // + // (i)`state.latest_crosslinks[attestation.data.shard] == attestation.data.latest_crosslink`, + // + // (ii) `state.latest_crosslinks[attestation.data.shard] == + // Crosslink(crosslink_data_root=attestation.data.crosslink_data_root, + // epoch=slot_to_epoch(attestation.data.slot))`. + let potential_crosslink = Crosslink { + crosslink_data_root: attestation.data.crosslink_data_root, + epoch: attestation.data.slot.epoch(spec.slots_per_epoch), + }; + verify!( + (attestation.data.latest_crosslink + == state.latest_crosslinks[attestation.data.shard as usize]) + | (state.latest_crosslinks[attestation.data.shard as usize] == potential_crosslink), + Invalid::BadLatestCrosslinkRoot + ); + + // Get the committee for this attestation + let (committee, _shard) = state + .get_crosslink_committees_at_slot(attestation.data.slot, spec)? + .iter() + .find(|(_committee, shard)| *shard == attestation.data.shard) + .ok_or_else(|| { + Error::Invalid(Invalid::NoCommitteeForShard( + attestation.data.shard, + attestation.data.slot, + )) + })?; + + // Custody bitfield is all zeros (phase 0 requirement). + verify!( + attestation.custody_bitfield.num_set_bits() == 0, + Invalid::CustodyBitfieldHasSetBits + ); + // Custody bitfield length is correct. + verify!( + verify_bitfield_length(&attestation.custody_bitfield, committee.len()), + Invalid::BadCustodyBitfieldLength(committee.len(), attestation.custody_bitfield.len()) + ); + // Aggregation bitfield isn't empty. + verify!( + attestation.aggregation_bitfield.num_set_bits() != 0, + Invalid::AggregationBitfieldIsEmpty + ); + // Aggregation bitfield length is correct. + verify!( + verify_bitfield_length(&attestation.aggregation_bitfield, committee.len()), + Invalid::BadAggregationBitfieldLength( + committee.len(), + attestation.aggregation_bitfield.len() + ) + ); + + if verify_signature { + let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch); + verify!( + verify_attestation_signature( + state, + committee, + attestation_epoch, + &attestation.custody_bitfield, + &attestation.data, + &attestation.aggregate_signature, + spec + ), + Invalid::BadSignature + ); + } + + // [TO BE REMOVED IN PHASE 1] Verify that `attestation.data.crosslink_data_root == ZERO_HASH`. + verify!( + attestation.data.crosslink_data_root == spec.zero_hash, + Invalid::ShardBlockRootNotZero + ); + + Ok(()) +} + +/// Verifies an aggregate signature for some given `AttestationData`, returning `true` if the +/// `aggregate_signature` is valid. +/// +/// Returns `false` if: +/// - `aggregate_signature` was not signed correctly. +/// - `custody_bitfield` does not have a bit for each index of `committee`. +/// - A `validator_index` in `committee` is not in `state.validator_registry`. +/// +/// Spec v0.4.0 +fn verify_attestation_signature( + state: &BeaconState, + committee: &[usize], + attestation_epoch: Epoch, + custody_bitfield: &Bitfield, + attestation_data: &AttestationData, + aggregate_signature: &AggregateSignature, + spec: &ChainSpec, +) -> bool { + let mut aggregate_pubs = vec![AggregatePublicKey::new(); 2]; + let mut message_exists = vec![false; 2]; + + for (i, v) in committee.iter().enumerate() { + let custody_bit = match custody_bitfield.get(i) { + Ok(bit) => bit, + // Invalidate signature if custody_bitfield.len() < committee + Err(_) => return false, + }; + + message_exists[custody_bit as usize] = true; + + match state.validator_registry.get(*v as usize) { + Some(validator) => { + aggregate_pubs[custody_bit as usize].add(&validator.pubkey); + } + // Invalidate signature if validator index is unknown. + None => return false, + }; + } + + // Message when custody bitfield is `false` + let message_0 = AttestationDataAndCustodyBit { + data: attestation_data.clone(), + custody_bit: false, + } + .hash_tree_root(); + + // Message when custody bitfield is `true` + let message_1 = AttestationDataAndCustodyBit { + data: attestation_data.clone(), + custody_bit: true, + } + .hash_tree_root(); + + let mut messages = vec![]; + let mut keys = vec![]; + + // If any validator signed a message with a `false` custody bit. + if message_exists[0] { + messages.push(&message_0[..]); + keys.push(&aggregate_pubs[0]); + } + // If any validator signed a message with a `true` custody bit. + if message_exists[1] { + messages.push(&message_1[..]); + keys.push(&aggregate_pubs[1]); + } + + let domain = spec.get_domain(attestation_epoch, Domain::Attestation, &state.fork); + + aggregate_signature.verify_multiple(&messages[..], domain, &keys[..]) +} diff --git a/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs b/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs new file mode 100644 index 000000000..71ac97469 --- /dev/null +++ b/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs @@ -0,0 +1,49 @@ +use super::errors::{AttesterSlashingInvalid as Invalid, AttesterSlashingValidationError as Error}; +use super::verify_slashable_attestation::verify_slashable_attestation; +use types::*; + +/// Indicates if an `AttesterSlashing` is valid to be included in a block in the current epoch of the given +/// state. +/// +/// Returns `Ok(())` if the `AttesterSlashing` is valid, otherwise indicates the reason for invalidity. +/// +/// Spec v0.4.0 +pub fn verify_attester_slashing( + state: &BeaconState, + attester_slashing: &AttesterSlashing, + spec: &ChainSpec, +) -> Result, Error> { + let slashable_attestation_1 = &attester_slashing.slashable_attestation_1; + let slashable_attestation_2 = &attester_slashing.slashable_attestation_2; + + verify!( + slashable_attestation_1.data != slashable_attestation_2.data, + Invalid::AttestationDataIdentical + ); + verify!( + slashable_attestation_1.is_double_vote(slashable_attestation_2, spec) + | slashable_attestation_1.is_surround_vote(slashable_attestation_2, spec), + Invalid::NotSlashable + ); + + verify_slashable_attestation(state, &slashable_attestation_1, spec) + .map_err(|e| Error::Invalid(Invalid::SlashableAttestation1Invalid(e.into())))?; + verify_slashable_attestation(state, &slashable_attestation_2, spec) + .map_err(|e| Error::Invalid(Invalid::SlashableAttestation2Invalid(e.into())))?; + + let mut slashable_indices = vec![]; + for i in &slashable_attestation_1.validator_indices { + let validator = state + .validator_registry + .get(*i as usize) + .ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(*i)))?; + + if slashable_attestation_1.validator_indices.contains(&i) & !validator.slashed { + slashable_indices.push(*i); + } + } + + verify!(!slashable_indices.is_empty(), Invalid::NoSlashableIndices); + + Ok(slashable_indices) +} diff --git a/eth2/state_processing/src/per_block_processing/verify_deposit.rs b/eth2/state_processing/src/per_block_processing/verify_deposit.rs new file mode 100644 index 000000000..69dae1533 --- /dev/null +++ b/eth2/state_processing/src/per_block_processing/verify_deposit.rs @@ -0,0 +1,73 @@ +use super::errors::{DepositInvalid as Invalid, DepositValidationError as Error}; +use hashing::hash; +use merkle_proof::verify_merkle_proof; +use ssz::ssz_encode; +use ssz_derive::Encode; +use types::*; + +/// Indicates if a `Deposit` is valid to be included in a block in the current epoch of the given +/// state. +/// +/// Returns `Ok(())` if the `Deposit` is valid, otherwise indicates the reason for invalidity. +/// +/// Note: this function is incomplete. +/// +/// Spec v0.4.0 +pub fn verify_deposit( + state: &BeaconState, + deposit: &Deposit, + verify_merkle_branch: bool, + spec: &ChainSpec, +) -> Result<(), Error> { + verify!( + deposit.index == state.deposit_index, + Invalid::BadIndex(state.deposit_index, deposit.index) + ); + + if verify_merkle_branch { + verify!( + verify_deposit_merkle_proof(state, deposit, spec), + Invalid::BadMerkleProof + ); + } + + Ok(()) +} + +/// Verify that a deposit is included in the state's eth1 deposit root. +/// +/// Spec v0.4.0 +fn verify_deposit_merkle_proof(state: &BeaconState, deposit: &Deposit, spec: &ChainSpec) -> bool { + let leaf = hash(&get_serialized_deposit_data(deposit)); + verify_merkle_proof( + Hash256::from_slice(&leaf), + &deposit.branch, + spec.deposit_contract_tree_depth as usize, + deposit.index as usize, + state.latest_eth1_data.deposit_root, + ) +} + +/// Helper struct for easily getting the serialized data generated by the deposit contract. +/// +/// Spec v0.4.0 +#[derive(Encode)] +struct SerializedDepositData { + amount: u64, + timestamp: u64, + input: DepositInput, +} + +/// Return the serialized data generated by the deposit contract that is used to generate the +/// merkle proof. +/// +/// Spec v0.4.0 +fn get_serialized_deposit_data(deposit: &Deposit) -> Vec { + let serialized_deposit_data = SerializedDepositData { + amount: deposit.deposit_data.amount, + timestamp: deposit.deposit_data.timestamp, + input: deposit.deposit_data.deposit_input.clone(), + }; + + ssz_encode(&serialized_deposit_data) +} diff --git a/eth2/state_processing/src/per_block_processing/verify_exit.rs b/eth2/state_processing/src/per_block_processing/verify_exit.rs new file mode 100644 index 000000000..8cd54fb69 --- /dev/null +++ b/eth2/state_processing/src/per_block_processing/verify_exit.rs @@ -0,0 +1,42 @@ +use super::errors::{ExitInvalid as Invalid, ExitValidationError as Error}; +use ssz::SignedRoot; +use types::*; + +/// Indicates if an `Exit` is valid to be included in a block in the current epoch of the given +/// state. +/// +/// Returns `Ok(())` if the `Exit` is valid, otherwise indicates the reason for invalidity. +/// +/// Spec v0.4.0 +pub fn verify_exit( + state: &BeaconState, + exit: &VoluntaryExit, + spec: &ChainSpec, +) -> Result<(), Error> { + let validator = state + .validator_registry + .get(exit.validator_index as usize) + .ok_or_else(|| Error::Invalid(Invalid::ValidatorUnknown(exit.validator_index)))?; + + verify!( + validator.exit_epoch + > state.get_delayed_activation_exit_epoch(state.current_epoch(spec), spec), + Invalid::AlreadyExited + ); + + verify!( + state.current_epoch(spec) >= exit.epoch, + Invalid::FutureEpoch(state.current_epoch(spec), exit.epoch) + ); + + let message = exit.signed_root(); + let domain = spec.get_domain(exit.epoch, Domain::Exit, &state.fork); + + verify!( + exit.signature + .verify(&message[..], domain, &validator.pubkey), + Invalid::BadSignature + ); + + Ok(()) +} diff --git a/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs b/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs new file mode 100644 index 000000000..c3c0079a9 --- /dev/null +++ b/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs @@ -0,0 +1,87 @@ +use super::errors::{ProposerSlashingInvalid as Invalid, ProposerSlashingValidationError as Error}; +use ssz::SignedRoot; +use types::*; + +/// Indicates if a `ProposerSlashing` is valid to be included in a block in the current epoch of the given +/// state. +/// +/// Returns `Ok(())` if the `ProposerSlashing` is valid, otherwise indicates the reason for invalidity. +/// +/// Spec v0.4.0 +pub fn verify_proposer_slashing( + proposer_slashing: &ProposerSlashing, + state: &BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let proposer = state + .validator_registry + .get(proposer_slashing.proposer_index as usize) + .ok_or_else(|| { + Error::Invalid(Invalid::ProposerUnknown(proposer_slashing.proposer_index)) + })?; + + verify!( + proposer_slashing.proposal_1.slot == proposer_slashing.proposal_2.slot, + Invalid::ProposalSlotMismatch( + proposer_slashing.proposal_1.slot, + proposer_slashing.proposal_2.slot + ) + ); + + verify!( + proposer_slashing.proposal_1.shard == proposer_slashing.proposal_2.shard, + Invalid::ProposalShardMismatch( + proposer_slashing.proposal_1.shard, + proposer_slashing.proposal_2.shard + ) + ); + + verify!( + proposer_slashing.proposal_1.block_root != proposer_slashing.proposal_2.block_root, + Invalid::ProposalBlockRootMismatch( + proposer_slashing.proposal_1.block_root, + proposer_slashing.proposal_2.block_root + ) + ); + + verify!(!proposer.slashed, Invalid::ProposerAlreadySlashed); + + verify!( + verify_proposal_signature( + &proposer_slashing.proposal_1, + &proposer.pubkey, + &state.fork, + spec + ), + Invalid::BadProposal1Signature + ); + verify!( + verify_proposal_signature( + &proposer_slashing.proposal_2, + &proposer.pubkey, + &state.fork, + spec + ), + Invalid::BadProposal2Signature + ); + + Ok(()) +} + +/// Verifies the signature of a proposal. +/// +/// Returns `true` if the signature is valid. +fn verify_proposal_signature( + proposal: &Proposal, + pubkey: &PublicKey, + fork: &Fork, + spec: &ChainSpec, +) -> bool { + let message = proposal.signed_root(); + let domain = spec.get_domain( + proposal.slot.epoch(spec.slots_per_epoch), + Domain::Proposal, + fork, + ); + proposal.signature.verify(&message[..], domain, pubkey) +} diff --git a/eth2/state_processing/src/per_block_processing/verify_slashable_attestation.rs b/eth2/state_processing/src/per_block_processing/verify_slashable_attestation.rs new file mode 100644 index 000000000..f0d371043 --- /dev/null +++ b/eth2/state_processing/src/per_block_processing/verify_slashable_attestation.rs @@ -0,0 +1,112 @@ +use super::errors::{ + SlashableAttestationInvalid as Invalid, SlashableAttestationValidationError as Error, +}; +use ssz::TreeHash; +use types::beacon_state::helpers::verify_bitfield_length; +use types::*; + +/// Indicates if a `SlashableAttestation` is valid to be included in a block in the current epoch of the given +/// state. +/// +/// Returns `Ok(())` if the `SlashableAttestation` is valid, otherwise indicates the reason for invalidity. +/// +/// Spec v0.4.0 +pub fn verify_slashable_attestation( + state: &BeaconState, + slashable_attestation: &SlashableAttestation, + spec: &ChainSpec, +) -> Result<(), Error> { + if slashable_attestation.custody_bitfield.num_set_bits() > 0 { + invalid!(Invalid::CustodyBitfieldHasSetBits); + } + + if slashable_attestation.validator_indices.is_empty() { + invalid!(Invalid::NoValidatorIndices); + } + + for i in 0..(slashable_attestation.validator_indices.len() - 1) { + if slashable_attestation.validator_indices[i] + >= slashable_attestation.validator_indices[i + 1] + { + invalid!(Invalid::BadValidatorIndicesOrdering(i)); + } + } + + if !verify_bitfield_length( + &slashable_attestation.custody_bitfield, + slashable_attestation.validator_indices.len(), + ) { + invalid!(Invalid::BadCustodyBitfieldLength( + slashable_attestation.validator_indices.len(), + slashable_attestation.custody_bitfield.len() + )); + } + + if slashable_attestation.validator_indices.len() > spec.max_indices_per_slashable_vote as usize + { + invalid!(Invalid::MaxIndicesExceed( + spec.max_indices_per_slashable_vote as usize, + slashable_attestation.validator_indices.len() + )); + } + + // TODO: this signature verification could likely be replaced with: + // + // super::validate_attestation::validate_attestation_signature(..) + + let mut aggregate_pubs = vec![AggregatePublicKey::new(); 2]; + let mut message_exists = vec![false; 2]; + + for (i, v) in slashable_attestation.validator_indices.iter().enumerate() { + let custody_bit = match slashable_attestation.custody_bitfield.get(i) { + Ok(bit) => bit, + Err(_) => unreachable!(), + }; + + message_exists[custody_bit as usize] = true; + + match state.validator_registry.get(*v as usize) { + Some(validator) => { + aggregate_pubs[custody_bit as usize].add(&validator.pubkey); + } + None => invalid!(Invalid::UnknownValidator(*v)), + }; + } + + let message_0 = AttestationDataAndCustodyBit { + data: slashable_attestation.data.clone(), + custody_bit: false, + } + .hash_tree_root(); + let message_1 = AttestationDataAndCustodyBit { + data: slashable_attestation.data.clone(), + custody_bit: true, + } + .hash_tree_root(); + + let mut messages = vec![]; + let mut keys = vec![]; + + if message_exists[0] { + messages.push(&message_0[..]); + keys.push(&aggregate_pubs[0]); + } + if message_exists[1] { + messages.push(&message_1[..]); + keys.push(&aggregate_pubs[1]); + } + + let domain = { + let epoch = slashable_attestation.data.slot.epoch(spec.slots_per_epoch); + spec.get_domain(epoch, Domain::Attestation, &state.fork) + }; + + verify!( + slashable_attestation + .aggregate_signature + .verify_multiple(&messages[..], domain, &keys[..]), + Invalid::BadSignature + ); + + Ok(()) +} diff --git a/eth2/state_processing/src/per_block_processing/verify_transfer.rs b/eth2/state_processing/src/per_block_processing/verify_transfer.rs new file mode 100644 index 000000000..4746fc75c --- /dev/null +++ b/eth2/state_processing/src/per_block_processing/verify_transfer.rs @@ -0,0 +1,135 @@ +use super::errors::{TransferInvalid as Invalid, TransferValidationError as Error}; +use bls::get_withdrawal_credentials; +use ssz::SignedRoot; +use types::*; + +/// Indicates if a `Transfer` is valid to be included in a block in the current epoch of the given +/// state. +/// +/// Returns `Ok(())` if the `Transfer` is valid, otherwise indicates the reason for invalidity. +/// +/// Note: this function is incomplete. +/// +/// Spec v0.4.0 +pub fn verify_transfer( + state: &BeaconState, + transfer: &Transfer, + spec: &ChainSpec, +) -> Result<(), Error> { + let from_balance = *state + .validator_balances + .get(transfer.from as usize) + .ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.from)))?; + + let total_amount = transfer + .amount + .checked_add(transfer.fee) + .ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?; + + verify!( + from_balance >= transfer.amount, + Invalid::FromBalanceInsufficient(transfer.amount, from_balance) + ); + + verify!( + from_balance >= transfer.fee, + Invalid::FromBalanceInsufficient(transfer.fee, from_balance) + ); + + verify!( + (from_balance == total_amount) + || (from_balance >= (total_amount + spec.min_deposit_amount)), + Invalid::InvalidResultingFromBalance(from_balance - total_amount, spec.min_deposit_amount) + ); + + verify!( + state.slot == transfer.slot, + Invalid::StateSlotMismatch(state.slot, transfer.slot) + ); + + let from_validator = state + .validator_registry + .get(transfer.from as usize) + .ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.from)))?; + let epoch = state.slot.epoch(spec.slots_per_epoch); + + verify!( + from_validator.is_withdrawable_at(epoch) + || from_validator.activation_epoch == spec.far_future_epoch, + Invalid::FromValidatorIneligableForTransfer(transfer.from) + ); + + let transfer_withdrawal_credentials = Hash256::from_slice( + &get_withdrawal_credentials(&transfer.pubkey, spec.bls_withdrawal_prefix_byte)[..], + ); + verify!( + from_validator.withdrawal_credentials == transfer_withdrawal_credentials, + Invalid::WithdrawalCredentialsMismatch( + from_validator.withdrawal_credentials, + transfer_withdrawal_credentials + ) + ); + + let message = transfer.signed_root(); + let domain = spec.get_domain( + transfer.slot.epoch(spec.slots_per_epoch), + Domain::Transfer, + &state.fork, + ); + + verify!( + transfer + .signature + .verify(&message[..], domain, &transfer.pubkey), + Invalid::BadSignature + ); + + Ok(()) +} + +/// Executes a transfer on the state. +/// +/// Does not check that the transfer is valid, however checks for overflow in all actions. +/// +/// Spec v0.4.0 +pub fn execute_transfer( + state: &mut BeaconState, + transfer: &Transfer, + spec: &ChainSpec, +) -> Result<(), Error> { + let from_balance = *state + .validator_balances + .get(transfer.from as usize) + .ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.from)))?; + let to_balance = *state + .validator_balances + .get(transfer.to as usize) + .ok_or_else(|| Error::Invalid(Invalid::ToValidatorUnknown(transfer.to)))?; + + let proposer_index = state.get_beacon_proposer_index(state.slot, spec)?; + let proposer_balance = state.validator_balances[proposer_index]; + + let total_amount = transfer + .amount + .checked_add(transfer.fee) + .ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?; + + state.validator_balances[transfer.from as usize] = + from_balance.checked_sub(total_amount).ok_or_else(|| { + Error::Invalid(Invalid::FromBalanceInsufficient(total_amount, from_balance)) + })?; + + state.validator_balances[transfer.to as usize] = to_balance + .checked_add(transfer.amount) + .ok_or_else(|| Error::Invalid(Invalid::ToBalanceOverflow(to_balance, transfer.amount)))?; + + state.validator_balances[proposer_index] = + proposer_balance.checked_add(transfer.fee).ok_or_else(|| { + Error::Invalid(Invalid::ProposerBalanceOverflow( + proposer_balance, + transfer.fee, + )) + })?; + + Ok(()) +} diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs new file mode 100644 index 000000000..bd8aca3c4 --- /dev/null +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -0,0 +1,491 @@ +use attester_sets::AttesterSets; +use errors::EpochProcessingError as Error; +use inclusion_distance::{inclusion_distance, inclusion_slot}; +use integer_sqrt::IntegerSquareRoot; +use log::debug; +use rayon::prelude::*; +use ssz::TreeHash; +use std::collections::{HashMap, HashSet}; +use std::iter::FromIterator; +use types::{validator_registry::get_active_validator_indices, *}; +use winning_root::{winning_root, WinningRoot}; + +pub mod attester_sets; +pub mod errors; +pub mod inclusion_distance; +pub mod tests; +pub mod winning_root; + +pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { + let current_epoch = state.current_epoch(spec); + let previous_epoch = state.previous_epoch(spec); + let next_epoch = state.next_epoch(spec); + + debug!( + "Starting per-epoch processing on epoch {}...", + state.current_epoch(spec) + ); + + // Ensure all of the caches are built. + state.build_epoch_cache(RelativeEpoch::Previous, spec)?; + state.build_epoch_cache(RelativeEpoch::Current, spec)?; + state.build_epoch_cache(RelativeEpoch::Next, spec)?; + + let attesters = AttesterSets::new(&state, spec)?; + + let active_validator_indices = get_active_validator_indices( + &state.validator_registry, + state.slot.epoch(spec.slots_per_epoch), + ); + + let current_total_balance = state.get_total_balance(&active_validator_indices[..], spec); + let previous_total_balance = state.get_total_balance( + &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], + spec, + ); + + process_eth1_data(state, spec); + + process_justification( + state, + current_total_balance, + previous_total_balance, + attesters.previous_epoch_boundary.balance, + attesters.current_epoch_boundary.balance, + spec, + ); + + // Crosslinks + let winning_root_for_shards = process_crosslinks(state, spec)?; + + // Rewards and Penalities + let active_validator_indices_hashset: HashSet = + HashSet::from_iter(active_validator_indices.iter().cloned()); + process_rewards_and_penalities( + state, + active_validator_indices_hashset, + &attesters, + previous_total_balance, + &winning_root_for_shards, + spec, + )?; + + // Ejections + state.process_ejections(spec); + + // Validator Registry + process_validator_registry(state, spec)?; + + // Final updates + let active_tree_root = get_active_validator_indices( + &state.validator_registry, + next_epoch + Epoch::from(spec.activation_exit_delay), + ) + .hash_tree_root(); + state.latest_active_index_roots[(next_epoch.as_usize() + + spec.activation_exit_delay as usize) + % spec.latest_active_index_roots_length] = Hash256::from_slice(&active_tree_root[..]); + + state.latest_slashed_balances[next_epoch.as_usize() % spec.latest_slashed_exit_length] = + state.latest_slashed_balances[current_epoch.as_usize() % spec.latest_slashed_exit_length]; + state.latest_randao_mixes[next_epoch.as_usize() % spec.latest_randao_mixes_length] = state + .get_randao_mix(current_epoch, spec) + .and_then(|x| Some(*x)) + .ok_or_else(|| Error::NoRandaoSeed)?; + state.latest_attestations = state + .latest_attestations + .iter() + .filter(|a| a.data.slot.epoch(spec.slots_per_epoch) >= current_epoch) + .cloned() + .collect(); + + // Rotate the epoch caches to suit the epoch transition. + state.advance_caches(); + + debug!("Epoch transition complete."); + + Ok(()) +} + +/// Spec v0.4.0 +fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) { + let next_epoch = state.next_epoch(spec); + let voting_period = spec.epochs_per_eth1_voting_period; + + if next_epoch % voting_period == 0 { + for eth1_data_vote in &state.eth1_data_votes { + if eth1_data_vote.vote_count * 2 > voting_period { + state.latest_eth1_data = eth1_data_vote.eth1_data.clone(); + } + } + state.eth1_data_votes = vec![]; + } +} + +/// Spec v0.4.0 +fn process_justification( + state: &mut BeaconState, + current_total_balance: u64, + previous_total_balance: u64, + previous_epoch_boundary_attesting_balance: u64, + current_epoch_boundary_attesting_balance: u64, + spec: &ChainSpec, +) { + let previous_epoch = state.previous_epoch(spec); + let current_epoch = state.current_epoch(spec); + + let mut new_justified_epoch = state.justified_epoch; + state.justification_bitfield <<= 1; + + // If > 2/3 of the total balance attested to the previous epoch boundary + // + // - Set the 2nd bit of the bitfield. + // - Set the previous epoch to be justified. + if (3 * previous_epoch_boundary_attesting_balance) >= (2 * previous_total_balance) { + state.justification_bitfield |= 2; + new_justified_epoch = previous_epoch; + } + // If > 2/3 of the total balance attested to the previous epoch boundary + // + // - Set the 1st bit of the bitfield. + // - Set the current epoch to be justified. + if (3 * current_epoch_boundary_attesting_balance) >= (2 * current_total_balance) { + state.justification_bitfield |= 1; + new_justified_epoch = current_epoch; + } + + // If: + // + // - All three epochs prior to this epoch have been justified. + // - The previous justified justified epoch was three epochs ago. + // + // Then, set the finalized epoch to be three epochs ago. + if ((state.justification_bitfield >> 1) % 8 == 0b111) + & (state.previous_justified_epoch == previous_epoch - 2) + { + state.finalized_epoch = state.previous_justified_epoch; + } + // If: + // + // - Both two epochs prior to this epoch have been justified. + // - The previous justified epoch was two epochs ago. + // + // Then, set the finalized epoch to two epochs ago. + if ((state.justification_bitfield >> 1) % 4 == 0b11) + & (state.previous_justified_epoch == previous_epoch - 1) + { + state.finalized_epoch = state.previous_justified_epoch; + } + // If: + // + // - This epoch and the two prior have been justified. + // - The presently justified epoch was two epochs ago. + // + // Then, set the finalized epoch to two epochs ago. + if (state.justification_bitfield % 8 == 0b111) & (state.justified_epoch == previous_epoch - 1) { + state.finalized_epoch = state.justified_epoch; + } + // If: + // + // - This epoch and the epoch prior to it have been justified. + // - Set the previous epoch to be justified. + // + // Then, set the finalized epoch to be the previous epoch. + if (state.justification_bitfield % 4 == 0b11) & (state.justified_epoch == previous_epoch) { + state.finalized_epoch = state.justified_epoch; + } + + state.previous_justified_epoch = state.justified_epoch; + state.justified_epoch = new_justified_epoch; +} + +pub type WinningRootHashSet = HashMap; + +fn process_crosslinks( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result { + let current_epoch_attestations: Vec<&PendingAttestation> = state + .latest_attestations + .par_iter() + .filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.current_epoch(spec)) + .collect(); + + let previous_epoch_attestations: Vec<&PendingAttestation> = state + .latest_attestations + .par_iter() + .filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec)) + .collect(); + + let mut winning_root_for_shards: WinningRootHashSet = HashMap::new(); + + let previous_and_current_epoch_slots: Vec = state + .previous_epoch(spec) + .slot_iter(spec.slots_per_epoch) + .chain(state.current_epoch(spec).slot_iter(spec.slots_per_epoch)) + .collect(); + + for slot in previous_and_current_epoch_slots { + // Clone removes the borrow which becomes an issue when mutating `state.balances`. + let crosslink_committees_at_slot = + state.get_crosslink_committees_at_slot(slot, spec)?.clone(); + + for (crosslink_committee, shard) in crosslink_committees_at_slot { + let shard = shard as u64; + + let winning_root = winning_root( + state, + shard, + ¤t_epoch_attestations[..], + &previous_epoch_attestations[..], + spec, + )?; + + if let Some(winning_root) = winning_root { + let total_committee_balance = state.get_total_balance(&crosslink_committee, spec); + + // TODO: I think this has a bug. + if (3 * winning_root.total_attesting_balance) >= (2 * total_committee_balance) { + state.latest_crosslinks[shard as usize] = Crosslink { + epoch: state.current_epoch(spec), + crosslink_data_root: winning_root.crosslink_data_root, + } + } + winning_root_for_shards.insert(shard, winning_root); + } + } + } + + Ok(winning_root_for_shards) +} + +/// Spec v0.4.0 +fn process_rewards_and_penalities( + state: &mut BeaconState, + active_validator_indices: HashSet, + attesters: &AttesterSets, + previous_total_balance: u64, + winning_root_for_shards: &WinningRootHashSet, + spec: &ChainSpec, +) -> Result<(), Error> { + let next_epoch = state.next_epoch(spec); + + let previous_epoch_attestations: Vec<&PendingAttestation> = state + .latest_attestations + .par_iter() + .filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec)) + .collect(); + + let base_reward_quotient = previous_total_balance.integer_sqrt() / spec.base_reward_quotient; + + if base_reward_quotient == 0 { + return Err(Error::BaseRewardQuotientIsZero); + } + + // Justification and finalization + + let epochs_since_finality = next_epoch - state.finalized_epoch; + + if epochs_since_finality <= 4 { + for index in 0..state.validator_balances.len() { + let base_reward = state.base_reward(index, base_reward_quotient, spec); + + // Expected FFG source + if attesters.previous_epoch.indices.contains(&index) { + safe_add_assign!( + state.validator_balances[index], + base_reward * attesters.previous_epoch.balance / previous_total_balance + ); + } else if active_validator_indices.contains(&index) { + safe_sub_assign!(state.validator_balances[index], base_reward); + } + + // Expected FFG target + if attesters.previous_epoch_boundary.indices.contains(&index) { + safe_add_assign!( + state.validator_balances[index], + base_reward * attesters.previous_epoch_boundary.balance + / previous_total_balance + ); + } else if active_validator_indices.contains(&index) { + safe_sub_assign!(state.validator_balances[index], base_reward); + } + + // Expected beacon chain head + if attesters.previous_epoch_head.indices.contains(&index) { + safe_add_assign!( + state.validator_balances[index], + base_reward * attesters.previous_epoch_head.balance / previous_total_balance + ); + } else if active_validator_indices.contains(&index) { + safe_sub_assign!(state.validator_balances[index], base_reward); + } + } + + // Inclusion distance + for &index in &attesters.previous_epoch.indices { + let base_reward = state.base_reward(index, base_reward_quotient, spec); + let inclusion_distance = + inclusion_distance(state, &previous_epoch_attestations, index, spec)?; + + safe_add_assign!( + state.validator_balances[index], + base_reward * spec.min_attestation_inclusion_delay / inclusion_distance + ) + } + } else { + for index in 0..state.validator_balances.len() { + let inactivity_penalty = + state.inactivity_penalty(index, epochs_since_finality, base_reward_quotient, spec); + + if active_validator_indices.contains(&index) { + if !attesters.previous_epoch.indices.contains(&index) { + safe_sub_assign!(state.validator_balances[index], inactivity_penalty); + } + if !attesters.previous_epoch_boundary.indices.contains(&index) { + safe_sub_assign!(state.validator_balances[index], inactivity_penalty); + } + if !attesters.previous_epoch_head.indices.contains(&index) { + safe_sub_assign!(state.validator_balances[index], inactivity_penalty); + } + + if state.validator_registry[index].slashed { + let base_reward = state.base_reward(index, base_reward_quotient, spec); + safe_sub_assign!( + state.validator_balances[index], + 2 * inactivity_penalty + base_reward + ); + } + } + } + + for &index in &attesters.previous_epoch.indices { + let base_reward = state.base_reward(index, base_reward_quotient, spec); + let inclusion_distance = + inclusion_distance(state, &previous_epoch_attestations, index, spec)?; + + safe_sub_assign!( + state.validator_balances[index], + base_reward + - base_reward * spec.min_attestation_inclusion_delay / inclusion_distance + ); + } + } + + // Attestation inclusion + + for &index in &attesters.previous_epoch.indices { + let inclusion_slot = inclusion_slot(state, &previous_epoch_attestations[..], index, spec)?; + + let proposer_index = state + .get_beacon_proposer_index(inclusion_slot, spec) + .map_err(|_| Error::UnableToDetermineProducer)?; + + let base_reward = state.base_reward(proposer_index, base_reward_quotient, spec); + + safe_add_assign!( + state.validator_balances[proposer_index], + base_reward / spec.attestation_inclusion_reward_quotient + ); + } + + //Crosslinks + + for slot in state.previous_epoch(spec).slot_iter(spec.slots_per_epoch) { + // Clone removes the borrow which becomes an issue when mutating `state.balances`. + let crosslink_committees_at_slot = + state.get_crosslink_committees_at_slot(slot, spec)?.clone(); + + for (crosslink_committee, shard) in crosslink_committees_at_slot { + let shard = shard as u64; + + // Note: I'm a little uncertain of the logic here -- I am waiting for spec v0.5.0 to + // clear it up. + // + // What happens here is: + // + // - If there was some crosslink root elected by the super-majority of this committee, + // then we reward all who voted for that root and penalize all that did not. + // - However, if there _was not_ some super-majority-voted crosslink root, then penalize + // all the validators. + // + // I'm not quite sure that the second case (no super-majority crosslink) is correct. + if let Some(winning_root) = winning_root_for_shards.get(&shard) { + // Hash set de-dedups and (hopefully) offers a speed improvement from faster + // lookups. + let attesting_validator_indices: HashSet = + HashSet::from_iter(winning_root.attesting_validator_indices.iter().cloned()); + + for &index in &crosslink_committee { + let base_reward = state.base_reward(index, base_reward_quotient, spec); + + let total_balance = state.get_total_balance(&crosslink_committee, spec); + + if attesting_validator_indices.contains(&index) { + safe_add_assign!( + state.validator_balances[index], + base_reward * winning_root.total_attesting_balance / total_balance + ); + } else { + safe_sub_assign!(state.validator_balances[index], base_reward); + } + } + } else { + for &index in &crosslink_committee { + let base_reward = state.base_reward(index, base_reward_quotient, spec); + + safe_sub_assign!(state.validator_balances[index], base_reward); + } + } + } + } + + Ok(()) +} + +// Spec v0.4.0 +fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { + let current_epoch = state.current_epoch(spec); + let next_epoch = state.next_epoch(spec); + + state.previous_shuffling_epoch = state.current_shuffling_epoch; + state.previous_shuffling_start_shard = state.current_shuffling_start_shard; + + state.previous_shuffling_seed = state.current_shuffling_seed; + + let should_update_validator_registy = if state.finalized_epoch + > state.validator_registry_update_epoch + { + (0..state.get_current_epoch_committee_count(spec)).all(|i| { + let shard = (state.current_shuffling_start_shard + i as u64) % spec.shard_count; + state.latest_crosslinks[shard as usize].epoch > state.validator_registry_update_epoch + }) + } else { + false + }; + + if should_update_validator_registy { + state.update_validator_registry(spec); + + state.current_shuffling_epoch = next_epoch; + state.current_shuffling_start_shard = (state.current_shuffling_start_shard + + state.get_current_epoch_committee_count(spec) as u64) + % spec.shard_count; + state.current_shuffling_seed = state.generate_seed(state.current_shuffling_epoch, spec)? + } else { + let epochs_since_last_registry_update = + current_epoch - state.validator_registry_update_epoch; + if (epochs_since_last_registry_update > 1) + & epochs_since_last_registry_update.is_power_of_two() + { + state.current_shuffling_epoch = next_epoch; + state.current_shuffling_seed = + state.generate_seed(state.current_shuffling_epoch, spec)? + } + } + + state.process_slashings(spec); + state.process_exit_queue(spec); + + Ok(()) +} diff --git a/eth2/state_processing/src/per_epoch_processing/attester_sets.rs b/eth2/state_processing/src/per_epoch_processing/attester_sets.rs new file mode 100644 index 000000000..2b674e1bc --- /dev/null +++ b/eth2/state_processing/src/per_epoch_processing/attester_sets.rs @@ -0,0 +1,98 @@ +use std::collections::HashSet; +use types::*; + +#[derive(Default)] +pub struct Attesters { + pub indices: HashSet, + pub balance: u64, +} + +impl Attesters { + fn add(&mut self, additional_indices: &[usize], additional_balance: u64) { + self.indices.reserve(additional_indices.len()); + for i in additional_indices { + self.indices.insert(*i); + } + self.balance.saturating_add(additional_balance); + } +} + +pub struct AttesterSets { + pub current_epoch: Attesters, + pub current_epoch_boundary: Attesters, + pub previous_epoch: Attesters, + pub previous_epoch_boundary: Attesters, + pub previous_epoch_head: Attesters, +} + +impl AttesterSets { + pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result { + let mut current_epoch = Attesters::default(); + let mut current_epoch_boundary = Attesters::default(); + let mut previous_epoch = Attesters::default(); + let mut previous_epoch_boundary = Attesters::default(); + let mut previous_epoch_head = Attesters::default(); + + for a in &state.latest_attestations { + let attesting_indices = + state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; + let attesting_balance = state.get_total_balance(&attesting_indices, spec); + + if is_from_epoch(a, state.current_epoch(spec), spec) { + current_epoch.add(&attesting_indices, attesting_balance); + + if has_common_epoch_boundary_root(a, state, state.current_epoch(spec), spec)? { + current_epoch_boundary.add(&attesting_indices, attesting_balance); + } + } else if is_from_epoch(a, state.previous_epoch(spec), spec) { + previous_epoch.add(&attesting_indices, attesting_balance); + + if has_common_epoch_boundary_root(a, state, state.previous_epoch(spec), spec)? { + previous_epoch_boundary.add(&attesting_indices, attesting_balance); + } + + if has_common_beacon_block_root(a, state, spec)? { + previous_epoch_head.add(&attesting_indices, attesting_balance); + } + } + } + + Ok(Self { + current_epoch, + current_epoch_boundary, + previous_epoch, + previous_epoch_boundary, + previous_epoch_head, + }) + } +} + +fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool { + a.data.slot.epoch(spec.slots_per_epoch) == epoch +} + +fn has_common_epoch_boundary_root( + a: &PendingAttestation, + state: &BeaconState, + epoch: Epoch, + spec: &ChainSpec, +) -> Result { + let slot = epoch.start_slot(spec.slots_per_epoch); + let state_boundary_root = *state + .get_block_root(slot, spec) + .ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?; + + Ok(a.data.epoch_boundary_root == state_boundary_root) +} + +fn has_common_beacon_block_root( + a: &PendingAttestation, + state: &BeaconState, + spec: &ChainSpec, +) -> Result { + let state_block_root = *state + .get_block_root(a.data.slot, spec) + .ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?; + + Ok(a.data.beacon_block_root == state_block_root) +} diff --git a/eth2/state_processing/src/per_epoch_processing/errors.rs b/eth2/state_processing/src/per_epoch_processing/errors.rs new file mode 100644 index 000000000..51e9b253c --- /dev/null +++ b/eth2/state_processing/src/per_epoch_processing/errors.rs @@ -0,0 +1,36 @@ +use types::*; + +#[derive(Debug, PartialEq)] +pub enum EpochProcessingError { + UnableToDetermineProducer, + NoBlockRoots, + BaseRewardQuotientIsZero, + NoRandaoSeed, + BeaconStateError(BeaconStateError), + InclusionError(InclusionError), +} + +impl From for EpochProcessingError { + fn from(e: InclusionError) -> EpochProcessingError { + EpochProcessingError::InclusionError(e) + } +} + +impl From for EpochProcessingError { + fn from(e: BeaconStateError) -> EpochProcessingError { + EpochProcessingError::BeaconStateError(e) + } +} + +#[derive(Debug, PartialEq)] +pub enum InclusionError { + /// The validator did not participate in an attestation in this period. + NoAttestationsForValidator, + BeaconStateError(BeaconStateError), +} + +impl From for InclusionError { + fn from(e: BeaconStateError) -> InclusionError { + InclusionError::BeaconStateError(e) + } +} diff --git a/eth2/state_processing/src/per_epoch_processing/inclusion_distance.rs b/eth2/state_processing/src/per_epoch_processing/inclusion_distance.rs new file mode 100644 index 000000000..243dc67f0 --- /dev/null +++ b/eth2/state_processing/src/per_epoch_processing/inclusion_distance.rs @@ -0,0 +1,61 @@ +use super::errors::InclusionError; +use types::*; + +/// Returns the distance between the first included attestation for some validator and this +/// slot. +/// +/// Note: In the spec this is defined "inline", not as a helper function. +/// +/// Spec v0.4.0 +pub fn inclusion_distance( + state: &BeaconState, + attestations: &[&PendingAttestation], + validator_index: usize, + spec: &ChainSpec, +) -> Result { + let attestation = earliest_included_attestation(state, attestations, validator_index, spec)?; + Ok((attestation.inclusion_slot - attestation.data.slot).as_u64()) +} + +/// Returns the slot of the earliest included attestation for some validator. +/// +/// Note: In the spec this is defined "inline", not as a helper function. +/// +/// Spec v0.4.0 +pub fn inclusion_slot( + state: &BeaconState, + attestations: &[&PendingAttestation], + validator_index: usize, + spec: &ChainSpec, +) -> Result { + let attestation = earliest_included_attestation(state, attestations, validator_index, spec)?; + Ok(attestation.inclusion_slot) +} + +/// Finds the earliest included attestation for some validator. +/// +/// Note: In the spec this is defined "inline", not as a helper function. +/// +/// Spec v0.4.0 +fn earliest_included_attestation( + state: &BeaconState, + attestations: &[&PendingAttestation], + validator_index: usize, + spec: &ChainSpec, +) -> Result { + let mut included_attestations = vec![]; + + for (i, a) in attestations.iter().enumerate() { + let participants = + state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; + if participants.iter().any(|i| *i == validator_index) { + included_attestations.push(i); + } + } + + let earliest_attestation_index = included_attestations + .iter() + .min_by_key(|i| attestations[**i].inclusion_slot) + .ok_or_else(|| InclusionError::NoAttestationsForValidator)?; + Ok(attestations[*earliest_attestation_index].clone()) +} diff --git a/eth2/state_processing/src/per_epoch_processing/tests.rs b/eth2/state_processing/src/per_epoch_processing/tests.rs new file mode 100644 index 000000000..627df858b --- /dev/null +++ b/eth2/state_processing/src/per_epoch_processing/tests.rs @@ -0,0 +1,21 @@ +#![cfg(test)] +use crate::per_epoch_processing; +use env_logger::{Builder, Env}; +use types::beacon_state::BeaconStateBuilder; +use types::*; + +#[test] +fn runs_without_error() { + Builder::from_env(Env::default().default_filter_or("error")).init(); + + let mut builder = BeaconStateBuilder::new(8); + builder.spec = ChainSpec::few_validators(); + + builder.build().unwrap(); + builder.teleport_to_end_of_epoch(builder.spec.genesis_epoch + 4); + + let mut state = builder.cloned_state(); + + let spec = &builder.spec; + per_epoch_processing(&mut state, spec).unwrap(); +} diff --git a/eth2/state_processing/src/per_epoch_processing/winning_root.rs b/eth2/state_processing/src/per_epoch_processing/winning_root.rs new file mode 100644 index 000000000..07678f93b --- /dev/null +++ b/eth2/state_processing/src/per_epoch_processing/winning_root.rs @@ -0,0 +1,118 @@ +use std::collections::HashSet; +use std::iter::FromIterator; +use types::*; + +#[derive(Clone)] +pub struct WinningRoot { + pub crosslink_data_root: Hash256, + pub attesting_validator_indices: Vec, + pub total_attesting_balance: u64, +} + +impl WinningRoot { + /// Returns `true` if `self` is a "better" candidate than `other`. + /// + /// A winning root is "better" than another if it has a higher `total_attesting_balance`. Ties + /// are broken by favouring the lower `crosslink_data_root` value. + /// + /// Spec v0.4.0 + pub fn is_better_than(&self, other: &Self) -> bool { + if self.total_attesting_balance > other.total_attesting_balance { + true + } else if self.total_attesting_balance == other.total_attesting_balance { + self.crosslink_data_root < other.crosslink_data_root + } else { + false + } + } +} + +/// Returns the `crosslink_data_root` with the highest total attesting balance for the given shard. +/// Breaks ties by favouring the smaller `crosslink_data_root` hash. +/// +/// The `WinningRoot` object also contains additional fields that are useful in later stages of +/// per-epoch processing. +/// +/// Spec v0.4.0 +pub fn winning_root( + state: &BeaconState, + shard: u64, + current_epoch_attestations: &[&PendingAttestation], + previous_epoch_attestations: &[&PendingAttestation], + spec: &ChainSpec, +) -> Result, BeaconStateError> { + let mut winning_root: Option = None; + + let crosslink_data_roots: HashSet = HashSet::from_iter( + previous_epoch_attestations + .iter() + .chain(current_epoch_attestations.iter()) + .filter_map(|a| { + if a.data.shard == shard { + Some(a.data.crosslink_data_root) + } else { + None + } + }), + ); + + for crosslink_data_root in crosslink_data_roots { + let attesting_validator_indices = get_attesting_validator_indices( + state, + shard, + current_epoch_attestations, + previous_epoch_attestations, + &crosslink_data_root, + spec, + )?; + + let total_attesting_balance: u64 = attesting_validator_indices + .iter() + .fold(0, |acc, i| acc + state.get_effective_balance(*i, spec)); + + let candidate = WinningRoot { + crosslink_data_root, + attesting_validator_indices, + total_attesting_balance, + }; + + if let Some(ref winner) = winning_root { + if candidate.is_better_than(&winner) { + winning_root = Some(candidate); + } + } else { + winning_root = Some(candidate); + } + } + + Ok(winning_root) +} + +/// Returns all indices which voted for a given crosslink. May contain duplicates. +/// +/// Spec v0.4.0 +fn get_attesting_validator_indices( + state: &BeaconState, + shard: u64, + current_epoch_attestations: &[&PendingAttestation], + previous_epoch_attestations: &[&PendingAttestation], + crosslink_data_root: &Hash256, + spec: &ChainSpec, +) -> Result, BeaconStateError> { + let mut indices = vec![]; + + for a in current_epoch_attestations + .iter() + .chain(previous_epoch_attestations.iter()) + { + if (a.data.shard == shard) && (a.data.crosslink_data_root == *crosslink_data_root) { + indices.append(&mut state.get_attestation_participants( + &a.data, + &a.aggregation_bitfield, + spec, + )?); + } + } + + Ok(indices) +} diff --git a/eth2/state_processing/src/per_slot_processing.rs b/eth2/state_processing/src/per_slot_processing.rs new file mode 100644 index 000000000..0bb405c98 --- /dev/null +++ b/eth2/state_processing/src/per_slot_processing.rs @@ -0,0 +1,58 @@ +use crate::*; +use types::{BeaconState, BeaconStateError, ChainSpec, Hash256}; + +#[derive(Debug, PartialEq)] +pub enum Error { + BeaconStateError(BeaconStateError), + EpochProcessingError(EpochProcessingError), +} + +/// Advances a state forward by one slot, performing per-epoch processing if required. +/// +/// Spec v0.4.0 +pub fn per_slot_processing( + state: &mut BeaconState, + previous_block_root: Hash256, + spec: &ChainSpec, +) -> Result<(), Error> { + if (state.slot + 1) % spec.slots_per_epoch == 0 { + per_epoch_processing(state, spec)?; + state.advance_caches(); + } + + state.slot += 1; + + update_block_roots(state, previous_block_root, spec); + + Ok(()) +} + +/// Updates the state's block roots as per-slot processing is performed. +/// +/// Spec v0.4.0 +pub fn update_block_roots(state: &mut BeaconState, previous_block_root: Hash256, spec: &ChainSpec) { + state.latest_block_roots[(state.slot.as_usize() - 1) % spec.latest_block_roots_length] = + previous_block_root; + + if state.slot.as_usize() % spec.latest_block_roots_length == 0 { + let root = merkle_root(&state.latest_block_roots[..]); + state.batched_block_roots.push(root); + } +} + +fn merkle_root(_input: &[Hash256]) -> Hash256 { + // TODO: implement correctly. + Hash256::zero() +} + +impl From for Error { + fn from(e: BeaconStateError) -> Error { + Error::BeaconStateError(e) + } +} + +impl From for Error { + fn from(e: EpochProcessingError) -> Error { + Error::EpochProcessingError(e) + } +} diff --git a/eth2/state_processing/src/slot_processable.rs b/eth2/state_processing/src/slot_processable.rs deleted file mode 100644 index 9e3b611fd..000000000 --- a/eth2/state_processing/src/slot_processable.rs +++ /dev/null @@ -1,70 +0,0 @@ -use crate::{EpochProcessable, EpochProcessingError}; -use types::{beacon_state::BeaconStateError, BeaconState, ChainSpec, Hash256}; - -#[derive(Debug, PartialEq)] -pub enum Error { - BeaconStateError(BeaconStateError), - EpochProcessingError(EpochProcessingError), -} - -pub trait SlotProcessable { - fn per_slot_processing( - &mut self, - previous_block_root: Hash256, - spec: &ChainSpec, - ) -> Result<(), Error>; -} - -impl SlotProcessable for BeaconState -where - BeaconState: EpochProcessable, -{ - fn per_slot_processing( - &mut self, - previous_block_root: Hash256, - spec: &ChainSpec, - ) -> Result<(), Error> { - if (self.slot + 1) % spec.epoch_length == 0 { - self.per_epoch_processing(spec)?; - } - - self.slot += 1; - - self.latest_randao_mixes[self.slot.as_usize() % spec.latest_randao_mixes_length] = - self.latest_randao_mixes[(self.slot.as_usize() - 1) % spec.latest_randao_mixes_length]; - - // Block roots. - self.latest_block_roots[(self.slot.as_usize() - 1) % spec.latest_block_roots_length] = - previous_block_root; - - if self.slot.as_usize() % spec.latest_block_roots_length == 0 { - let root = merkle_root(&self.latest_block_roots[..]); - self.batched_block_roots.push(root); - } - Ok(()) - } -} - -fn merkle_root(_input: &[Hash256]) -> Hash256 { - Hash256::zero() -} - -impl From for Error { - fn from(e: BeaconStateError) -> Error { - Error::BeaconStateError(e) - } -} - -impl From for Error { - fn from(e: EpochProcessingError) -> Error { - Error::EpochProcessingError(e) - } -} - -#[cfg(test)] -mod tests { - #[test] - fn it_works() { - assert_eq!(2 + 2, 4); - } -} diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index f51e20236..ea1343dba 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -7,9 +7,10 @@ edition = "2018" [dependencies] bls = { path = "../utils/bls" } boolean-bitfield = { path = "../utils/boolean-bitfield" } -ethereum-types = "0.4.0" +ethereum-types = "0.5" hashing = { path = "../utils/hashing" } honey-badger-split = { path = "../utils/honey-badger-split" } +int_to_bytes = { path = "../utils/int_to_bytes" } log = "0.4" rayon = "1.0" rand = "0.5.5" @@ -20,6 +21,7 @@ slog = "^2.2.3" ssz = { path = "../utils/ssz" } ssz_derive = { path = "../utils/ssz_derive" } swap_or_not_shuffle = { path = "../utils/swap_or_not_shuffle" } +test_random_derive = { path = "../utils/test_random_derive" } [dev-dependencies] env_logger = "0.6.0" diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index 7388a8e49..03ef8ce48 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -1,11 +1,15 @@ -use super::{AggregatePublicKey, AggregateSignature, AttestationData, Bitfield, Hash256}; +use super::{AggregateSignature, AttestationData, Bitfield}; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz::TreeHash; +use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use test_random_derive::TestRandom; -#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode)] +/// Details an attestation that can be slashable. +/// +/// Spec v0.4.0 +#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)] pub struct Attestation { pub aggregation_bitfield: Bitfield, pub data: AttestationData, @@ -13,54 +17,11 @@ pub struct Attestation { pub aggregate_signature: AggregateSignature, } -impl Attestation { - pub fn canonical_root(&self) -> Hash256 { - Hash256::from(&self.hash_tree_root()[..]) - } - - pub fn signable_message(&self, custody_bit: bool) -> Vec { - self.data.signable_message(custody_bit) - } - - pub fn verify_signature( - &self, - group_public_key: &AggregatePublicKey, - custody_bit: bool, - // TODO: use domain. - _domain: u64, - ) -> bool { - self.aggregate_signature - .verify(&self.signable_message(custody_bit), group_public_key) - } -} - -impl TreeHash for Attestation { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.aggregation_bitfield.hash_tree_root_internal()); - result.append(&mut self.data.hash_tree_root_internal()); - result.append(&mut self.custody_bitfield.hash_tree_root_internal()); - result.append(&mut self.aggregate_signature.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for Attestation { - fn random_for_test(rng: &mut T) -> Self { - Self { - data: <_>::random_for_test(rng), - aggregation_bitfield: <_>::random_for_test(rng), - custody_bitfield: <_>::random_for_test(rng), - aggregate_signature: <_>::random_for_test(rng), - } - } -} - #[cfg(test)] mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/attestation_data.rs b/eth2/types/src/attestation_data.rs index 7edb0b72b..1dfadfb1d 100644 --- a/eth2/types/src/attestation_data.rs +++ b/eth2/types/src/attestation_data.rs @@ -1,28 +1,33 @@ use crate::test_utils::TestRandom; -use crate::{AttestationDataAndCustodyBit, Crosslink, Epoch, Hash256, Slot}; +use crate::{Crosslink, Epoch, Hash256, Slot}; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz::TreeHash; +use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use test_random_derive::TestRandom; -pub const SSZ_ATTESTION_DATA_LENGTH: usize = { - 8 + // slot - 8 + // shard - 32 + // beacon_block_hash - 32 + // epoch_boundary_root - 32 + // shard_block_hash - 32 + // latest_crosslink_hash - 8 + // justified_epoch - 32 // justified_block_root -}; - -#[derive(Debug, Clone, PartialEq, Default, Serialize, Hash, Encode, Decode)] +/// The data upon which an attestation is based. +/// +/// Spec v0.4.0 +#[derive( + Debug, + Clone, + PartialEq, + Default, + Serialize, + Hash, + Encode, + Decode, + TreeHash, + TestRandom, + SignedRoot, +)] pub struct AttestationData { pub slot: Slot, pub shard: u64, pub beacon_block_root: Hash256, pub epoch_boundary_root: Hash256, - pub shard_block_root: Hash256, + pub crosslink_data_root: Hash256, pub latest_crosslink: Crosslink, pub justified_epoch: Epoch, pub justified_block_root: Hash256, @@ -30,55 +35,11 @@ pub struct AttestationData { impl Eq for AttestationData {} -impl AttestationData { - pub fn canonical_root(&self) -> Hash256 { - Hash256::from(&self.hash_tree_root()[..]) - } - - pub fn signable_message(&self, custody_bit: bool) -> Vec { - let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { - data: self.clone(), - custody_bit, - }; - attestation_data_and_custody_bit.hash_tree_root() - } -} - -impl TreeHash for AttestationData { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.slot.hash_tree_root_internal()); - result.append(&mut self.shard.hash_tree_root_internal()); - result.append(&mut self.beacon_block_root.hash_tree_root_internal()); - result.append(&mut self.epoch_boundary_root.hash_tree_root_internal()); - result.append(&mut self.shard_block_root.hash_tree_root_internal()); - result.append(&mut self.latest_crosslink.hash_tree_root_internal()); - result.append(&mut self.justified_epoch.hash_tree_root_internal()); - result.append(&mut self.justified_block_root.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for AttestationData { - fn random_for_test(rng: &mut T) -> Self { - Self { - slot: <_>::random_for_test(rng), - shard: <_>::random_for_test(rng), - beacon_block_root: <_>::random_for_test(rng), - epoch_boundary_root: <_>::random_for_test(rng), - shard_block_root: <_>::random_for_test(rng), - latest_crosslink: <_>::random_for_test(rng), - justified_epoch: <_>::random_for_test(rng), - justified_block_root: <_>::random_for_test(rng), - } - } -} - #[cfg(test)] mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/attestation_data_and_custody_bit.rs b/eth2/types/src/attestation_data_and_custody_bit.rs index 3f107be82..83018c194 100644 --- a/eth2/types/src/attestation_data_and_custody_bit.rs +++ b/eth2/types/src/attestation_data_and_custody_bit.rs @@ -2,31 +2,22 @@ use super::AttestationData; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::Serialize; -use ssz::TreeHash; -use ssz_derive::{Decode, Encode}; +use ssz_derive::{Decode, Encode, TreeHash}; -#[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode)] +/// Used for pairing an attestation with a proof-of-custody. +/// +/// Spec v0.4.0 +#[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode, TreeHash)] pub struct AttestationDataAndCustodyBit { pub data: AttestationData, pub custody_bit: bool, } -impl TreeHash for AttestationDataAndCustodyBit { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.data.hash_tree_root_internal()); - // TODO: add bool ssz - // result.append(custody_bit.hash_tree_root_internal()); - ssz::hash(&result) - } -} - impl TestRandom for AttestationDataAndCustodyBit { fn random_for_test(rng: &mut T) -> Self { Self { data: <_>::random_for_test(rng), - // TODO: deal with bools - custody_bit: false, + custody_bit: <_>::random_for_test(rng), } } } @@ -35,7 +26,7 @@ impl TestRandom for AttestationDataAndCustodyBit { mod test { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/attester_slashing.rs b/eth2/types/src/attester_slashing.rs index f84998324..1cb671960 100644 --- a/eth2/types/src/attester_slashing.rs +++ b/eth2/types/src/attester_slashing.rs @@ -1,38 +1,27 @@ use crate::{test_utils::TestRandom, SlashableAttestation}; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)] +mod builder; + +pub use builder::AttesterSlashingBuilder; + +/// Two conflicting attestations. +/// +/// Spec v0.4.0 +#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] pub struct AttesterSlashing { pub slashable_attestation_1: SlashableAttestation, pub slashable_attestation_2: SlashableAttestation, } -impl TreeHash for AttesterSlashing { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.slashable_attestation_1.hash_tree_root_internal()); - result.append(&mut self.slashable_attestation_2.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for AttesterSlashing { - fn random_for_test(rng: &mut T) -> Self { - Self { - slashable_attestation_1: <_>::random_for_test(rng), - slashable_attestation_2: <_>::random_for_test(rng), - } - } -} - #[cfg(test)] mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/attester_slashing/builder.rs b/eth2/types/src/attester_slashing/builder.rs new file mode 100644 index 000000000..05301f30b --- /dev/null +++ b/eth2/types/src/attester_slashing/builder.rs @@ -0,0 +1,87 @@ +use crate::*; +use ssz::TreeHash; + +/// Builds an `AttesterSlashing`. +pub struct AttesterSlashingBuilder(); + +impl AttesterSlashingBuilder { + /// Builds an `AttesterSlashing` that is a double vote. + /// + /// The `signer` function is used to sign the double-vote and accepts: + /// + /// - `validator_index: u64` + /// - `message: &[u8]` + /// - `epoch: Epoch` + /// - `domain: Domain` + /// + /// Where domain is a domain "constant" (e.g., `spec.domain_attestation`). + pub fn double_vote(validator_indices: &[u64], signer: F) -> AttesterSlashing + where + F: Fn(u64, &[u8], Epoch, Domain) -> Signature, + { + let double_voted_slot = Slot::new(0); + let shard = 0; + let justified_epoch = Epoch::new(0); + let epoch = Epoch::new(0); + let hash_1 = Hash256::from_low_u64_le(1); + let hash_2 = Hash256::from_low_u64_le(2); + + let mut slashable_attestation_1 = SlashableAttestation { + validator_indices: validator_indices.to_vec(), + data: AttestationData { + slot: double_voted_slot, + shard, + beacon_block_root: hash_1, + epoch_boundary_root: hash_1, + crosslink_data_root: hash_1, + latest_crosslink: Crosslink { + epoch, + crosslink_data_root: hash_1, + }, + justified_epoch, + justified_block_root: hash_1, + }, + custody_bitfield: Bitfield::new(), + aggregate_signature: AggregateSignature::new(), + }; + + let mut slashable_attestation_2 = SlashableAttestation { + validator_indices: validator_indices.to_vec(), + data: AttestationData { + slot: double_voted_slot, + shard, + beacon_block_root: hash_2, + epoch_boundary_root: hash_2, + crosslink_data_root: hash_2, + latest_crosslink: Crosslink { + epoch, + crosslink_data_root: hash_2, + }, + justified_epoch, + justified_block_root: hash_2, + }, + custody_bitfield: Bitfield::new(), + aggregate_signature: AggregateSignature::new(), + }; + + let add_signatures = |attestation: &mut SlashableAttestation| { + for (i, validator_index) in validator_indices.iter().enumerate() { + let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { + data: attestation.data.clone(), + custody_bit: attestation.custody_bitfield.get(i).unwrap(), + }; + let message = attestation_data_and_custody_bit.hash_tree_root(); + let signature = signer(*validator_index, &message[..], epoch, Domain::Attestation); + attestation.aggregate_signature.add(&signature); + } + }; + + add_signatures(&mut slashable_attestation_1); + add_signatures(&mut slashable_attestation_2); + + AttesterSlashing { + slashable_attestation_1, + slashable_attestation_2, + } + } +} diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index c252d03f7..2e1e24ef7 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -1,20 +1,24 @@ use crate::test_utils::TestRandom; -use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, ProposalSignedData, Slot}; +use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Slot}; use bls::Signature; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz::TreeHash; +use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use test_random_derive::TestRandom; -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)] +/// A block of the `BeaconChain`. +/// +/// Spec v0.4.0 +#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)] pub struct BeaconBlock { pub slot: Slot, pub parent_root: Hash256, pub state_root: Hash256, pub randao_reveal: Signature, pub eth1_data: Eth1Data, - pub signature: Signature, pub body: BeaconBlockBody, + pub signature: Signature, } impl BeaconBlock { @@ -35,56 +39,15 @@ impl BeaconBlock { attester_slashings: vec![], attestations: vec![], deposits: vec![], - exits: vec![], + voluntary_exits: vec![], + transfers: vec![], }, } } + /// Returns the `hash_tree_root` of the block. pub fn canonical_root(&self) -> Hash256 { - Hash256::from(&self.hash_tree_root()[..]) - } - - pub fn proposal_root(&self, spec: &ChainSpec) -> Hash256 { - let block_without_signature_root = { - let mut block_without_signature = self.clone(); - block_without_signature.signature = spec.empty_signature.clone(); - block_without_signature.canonical_root() - }; - - let proposal = ProposalSignedData { - slot: self.slot, - shard: spec.beacon_chain_shard_number, - block_root: block_without_signature_root, - }; - Hash256::from(&proposal.hash_tree_root()[..]) - } -} - -impl TreeHash for BeaconBlock { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.slot.hash_tree_root_internal()); - result.append(&mut self.parent_root.hash_tree_root_internal()); - result.append(&mut self.state_root.hash_tree_root_internal()); - result.append(&mut self.randao_reveal.hash_tree_root_internal()); - result.append(&mut self.eth1_data.hash_tree_root_internal()); - result.append(&mut self.signature.hash_tree_root_internal()); - result.append(&mut self.body.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for BeaconBlock { - fn random_for_test(rng: &mut T) -> Self { - Self { - slot: <_>::random_for_test(rng), - parent_root: <_>::random_for_test(rng), - state_root: <_>::random_for_test(rng), - randao_reveal: <_>::random_for_test(rng), - eth1_data: <_>::random_for_test(rng), - signature: <_>::random_for_test(rng), - body: <_>::random_for_test(rng), - } + Hash256::from_slice(&self.hash_tree_root()[..]) } } @@ -92,7 +55,7 @@ impl TestRandom for BeaconBlock { mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/beacon_block_body.rs b/eth2/types/src/beacon_block_body.rs index e051f5940..e7dec2e4b 100644 --- a/eth2/types/src/beacon_block_body.rs +++ b/eth2/types/src/beacon_block_body.rs @@ -1,48 +1,28 @@ -use super::{Attestation, AttesterSlashing, Deposit, Exit, ProposerSlashing}; +use super::{Attestation, AttesterSlashing, Deposit, ProposerSlashing, Transfer, VoluntaryExit}; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; -#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode)] +/// The body of a `BeaconChain` block, containing operations. +/// +/// Spec v0.4.0 +#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] pub struct BeaconBlockBody { pub proposer_slashings: Vec, pub attester_slashings: Vec, pub attestations: Vec, pub deposits: Vec, - pub exits: Vec, -} - -impl TreeHash for BeaconBlockBody { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.proposer_slashings.hash_tree_root_internal()); - result.append(&mut self.attester_slashings.hash_tree_root_internal()); - result.append(&mut self.attestations.hash_tree_root_internal()); - result.append(&mut self.deposits.hash_tree_root_internal()); - result.append(&mut self.exits.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for BeaconBlockBody { - fn random_for_test(rng: &mut T) -> Self { - Self { - proposer_slashings: <_>::random_for_test(rng), - attester_slashings: <_>::random_for_test(rng), - attestations: <_>::random_for_test(rng), - deposits: <_>::random_for_test(rng), - exits: <_>::random_for_test(rng), - } - } + pub voluntary_exits: Vec, + pub transfers: Vec, } #[cfg(test)] mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 21deb6fe7..809408b32 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -1,44 +1,59 @@ +use self::epoch_cache::EpochCache; use crate::test_utils::TestRandom; -use crate::{ - validator::StatusFlags, validator_registry::get_active_validator_indices, AttestationData, - Bitfield, ChainSpec, Crosslink, Deposit, Epoch, Eth1Data, Eth1DataVote, Fork, Hash256, - PendingAttestation, PublicKey, Signature, Slot, Validator, -}; +use crate::{validator_registry::get_active_validator_indices, *}; use bls::verify_proof_of_possession; +use helpers::*; use honey_badger_split::SplitExt; -use log::trace; +use int_to_bytes::int_to_bytes32; +use log::{debug, error, trace}; use rand::RngCore; +use rayon::prelude::*; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; -use swap_or_not_shuffle::get_permutated_index; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; +use std::collections::HashMap; +use swap_or_not_shuffle::shuffle_list; +pub use builder::BeaconStateBuilder; + +mod builder; +mod epoch_cache; +pub mod helpers; mod tests; +pub type Committee = Vec; +pub type CrosslinkCommittees = Vec<(Committee, u64)>; +pub type Shard = u64; +pub type CommitteeIndex = u64; +pub type AttestationDuty = (Slot, Shard, CommitteeIndex); +pub type AttestationDutyMap = HashMap; +pub type ShardCommitteeIndexMap = HashMap; + +pub const CACHED_EPOCHS: usize = 3; + +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum RelativeEpoch { + Previous, + Current, + Next, +} + #[derive(Debug, PartialEq)] -pub enum BeaconStateError { +pub enum Error { EpochOutOfBounds, + /// The supplied shard is unknown. It may be larger than the maximum shard count, or not in a + /// committee for the given slot. + SlotOutOfBounds, + ShardOutOfBounds, UnableToShuffle, + UnknownValidator, + InvalidBitfield, InsufficientRandaoMixes, InsufficientValidators, InsufficientBlockRoots, InsufficientIndexRoots, InsufficientAttestations, InsufficientCommittees, -} - -#[derive(Debug, PartialEq)] -pub enum InclusionError { - /// The validator did not participate in an attestation in this period. - NoAttestationsForValidator, - AttestationParticipantsError(AttestationParticipantsError), -} - -#[derive(Debug, PartialEq)] -pub enum AttestationParticipantsError { - /// There is no committee for the given shard in the given epoch. - NoCommitteeForShard, - BeaconStateError(BeaconStateError), + EpochCacheUninitialized(RelativeEpoch), } macro_rules! safe_add_assign { @@ -52,7 +67,7 @@ macro_rules! safe_sub_assign { }; } -#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode)] +#[derive(Debug, PartialEq, Clone, Default, Serialize)] pub struct BeaconState { // Misc pub slot: Slot, @@ -66,12 +81,12 @@ pub struct BeaconState { // Randomness and committees pub latest_randao_mixes: Vec, - pub previous_epoch_start_shard: u64, - pub current_epoch_start_shard: u64, - pub previous_calculation_epoch: Epoch, - pub current_calculation_epoch: Epoch, - pub previous_epoch_seed: Hash256, - pub current_epoch_seed: Hash256, + pub previous_shuffling_start_shard: u64, + pub current_shuffling_start_shard: u64, + pub previous_shuffling_epoch: Epoch, + pub current_shuffling_epoch: Epoch, + pub previous_shuffling_seed: Hash256, + pub current_shuffling_seed: Hash256, // Finality pub previous_justified_epoch: Epoch, @@ -82,30 +97,35 @@ pub struct BeaconState { // Recent state pub latest_crosslinks: Vec, pub latest_block_roots: Vec, - pub latest_index_roots: Vec, - pub latest_penalized_balances: Vec, + pub latest_active_index_roots: Vec, + pub latest_slashed_balances: Vec, pub latest_attestations: Vec, pub batched_block_roots: Vec, // Ethereum 1.0 chain data pub latest_eth1_data: Eth1Data, pub eth1_data_votes: Vec, + pub deposit_index: u64, + + // Caching (not in the spec) + pub cache_index_offset: usize, + pub caches: Vec, } impl BeaconState { /// Produce the first state of the Beacon Chain. - pub fn genesis( + pub fn genesis_without_validators( genesis_time: u64, - initial_validator_deposits: Vec, latest_eth1_data: Eth1Data, spec: &ChainSpec, - ) -> Result { + ) -> Result { + debug!("Creating genesis state (without validator processing)."); let initial_crosslink = Crosslink { epoch: spec.genesis_epoch, - shard_block_root: spec.zero_hash, + crosslink_data_root: spec.zero_hash, }; - let mut genesis_state = BeaconState { + Ok(BeaconState { /* * Misc */ @@ -128,12 +148,12 @@ impl BeaconState { * Randomness and committees */ latest_randao_mixes: vec![spec.zero_hash; spec.latest_randao_mixes_length as usize], - previous_epoch_start_shard: spec.genesis_start_shard, - current_epoch_start_shard: spec.genesis_start_shard, - previous_calculation_epoch: spec.genesis_epoch, - current_calculation_epoch: spec.genesis_epoch, - previous_epoch_seed: spec.zero_hash, - current_epoch_seed: spec.zero_hash, + previous_shuffling_start_shard: spec.genesis_start_shard, + current_shuffling_start_shard: spec.genesis_start_shard, + previous_shuffling_epoch: spec.genesis_epoch, + current_shuffling_epoch: spec.genesis_epoch, + previous_shuffling_seed: spec.zero_hash, + current_shuffling_seed: spec.zero_hash, /* * Finality @@ -148,8 +168,11 @@ impl BeaconState { */ latest_crosslinks: vec![initial_crosslink; spec.shard_count as usize], latest_block_roots: vec![spec.zero_hash; spec.latest_block_roots_length as usize], - latest_index_roots: vec![spec.zero_hash; spec.latest_index_roots_length as usize], - latest_penalized_balances: vec![0; spec.latest_penalized_exit_length as usize], + latest_active_index_roots: vec![ + spec.zero_hash; + spec.latest_active_index_roots_length as usize + ], + latest_slashed_balances: vec![0; spec.latest_slashed_exit_length as usize], latest_attestations: vec![], batched_block_roots: vec![], @@ -158,17 +181,36 @@ impl BeaconState { */ latest_eth1_data, eth1_data_votes: vec![], - }; + deposit_index: 0, - for deposit in initial_validator_deposits { - let _index = genesis_state.process_deposit( - deposit.deposit_data.deposit_input.pubkey, - deposit.deposit_data.amount, - deposit.deposit_data.deposit_input.proof_of_possession, - deposit.deposit_data.deposit_input.withdrawal_credentials, - spec, - ); - } + /* + * Caching (not in spec) + */ + cache_index_offset: 0, + caches: vec![EpochCache::empty(); CACHED_EPOCHS], + }) + } + + /// Produce the first state of the Beacon Chain. + pub fn genesis( + genesis_time: u64, + initial_validator_deposits: Vec, + latest_eth1_data: Eth1Data, + spec: &ChainSpec, + ) -> Result { + let mut genesis_state = + BeaconState::genesis_without_validators(genesis_time, latest_eth1_data, spec)?; + + debug!("Processing genesis deposits..."); + + let deposit_data = initial_validator_deposits + .par_iter() + .map(|deposit| &deposit.deposit_data) + .collect(); + + genesis_state.process_deposits(deposit_data, spec); + + trace!("Processed genesis deposits."); for validator_index in 0..genesis_state.validator_registry.len() { if genesis_state.get_effective_balance(validator_index, spec) >= spec.max_deposit_amount @@ -177,108 +219,593 @@ impl BeaconState { } } + genesis_state.deposit_index = initial_validator_deposits.len() as u64; + let genesis_active_index_root = hash_tree_root(get_active_validator_indices( &genesis_state.validator_registry, spec.genesis_epoch, )); - genesis_state.latest_index_roots = - vec![genesis_active_index_root; spec.latest_index_roots_length]; - genesis_state.current_epoch_seed = genesis_state.generate_seed(spec.genesis_epoch, spec)?; + genesis_state.latest_active_index_roots = + vec![genesis_active_index_root; spec.latest_active_index_roots_length]; + genesis_state.current_shuffling_seed = + genesis_state.generate_seed(spec.genesis_epoch, spec)?; Ok(genesis_state) } - /// Return the tree hash root for this `BeaconState`. - /// - /// Spec v0.2.0 + /// Returns the `hash_tree_root` of the state. pub fn canonical_root(&self) -> Hash256 { - Hash256::from(&self.hash_tree_root()[..]) + Hash256::from_slice(&self.hash_tree_root()[..]) + } + + /// Build an epoch cache, unless it is has already been built. + pub fn build_epoch_cache( + &mut self, + relative_epoch: RelativeEpoch, + spec: &ChainSpec, + ) -> Result<(), Error> { + let cache_index = self.cache_index(relative_epoch); + + if self.caches[cache_index].initialized { + Ok(()) + } else { + self.force_build_epoch_cache(relative_epoch, spec) + } + } + + /// Always builds an epoch cache, even if it is already initialized. + pub fn force_build_epoch_cache( + &mut self, + relative_epoch: RelativeEpoch, + spec: &ChainSpec, + ) -> Result<(), Error> { + let epoch = self.absolute_epoch(relative_epoch, spec); + let cache_index = self.cache_index(relative_epoch); + + self.caches[cache_index] = EpochCache::initialized(&self, epoch, spec)?; + + Ok(()) + } + + /// Converts a `RelativeEpoch` into an `Epoch` with respect to the epoch of this state. + fn absolute_epoch(&self, relative_epoch: RelativeEpoch, spec: &ChainSpec) -> Epoch { + match relative_epoch { + RelativeEpoch::Previous => self.previous_epoch(spec), + RelativeEpoch::Current => self.current_epoch(spec), + RelativeEpoch::Next => self.next_epoch(spec), + } + } + + /// Converts an `Epoch` into a `RelativeEpoch` with respect to the epoch of this state. + /// + /// Returns an error if the given `epoch` not "previous", "current" or "next" compared to the + /// epoch of this tate. + fn relative_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> Result { + match epoch { + e if e == self.current_epoch(spec) => Ok(RelativeEpoch::Current), + e if e == self.previous_epoch(spec) => Ok(RelativeEpoch::Previous), + e if e == self.next_epoch(spec) => Ok(RelativeEpoch::Next), + _ => Err(Error::EpochOutOfBounds), + } + } + + /// Advances the cache for this state into the next epoch. + /// + /// This should be used if the `slot` of this state is advanced beyond an epoch boundary. + /// + /// The `Next` cache becomes the `Current` and the `Current` cache becomes the `Previous`. The + /// `Previous` cache is abandoned. + /// + /// Care should be taken to update the `Current` epoch in case a registry update is performed + /// -- `Next` epoch is always _without_ a registry change. If you perform a registry update, + /// you should rebuild the `Current` cache so it uses the new seed. + pub fn advance_caches(&mut self) { + self.drop_cache(RelativeEpoch::Previous); + + self.cache_index_offset += 1; + self.cache_index_offset %= CACHED_EPOCHS; + } + + /// Removes the specified cache and sets it to uninitialized. + pub fn drop_cache(&mut self, relative_epoch: RelativeEpoch) { + let previous_cache_index = self.cache_index(relative_epoch); + self.caches[previous_cache_index] = EpochCache::empty(); + } + + /// Returns the index of `self.caches` for some `RelativeEpoch`. + fn cache_index(&self, relative_epoch: RelativeEpoch) -> usize { + let base_index = match relative_epoch { + RelativeEpoch::Current => 1, + RelativeEpoch::Previous => 0, + RelativeEpoch::Next => 2, + }; + + (base_index + self.cache_index_offset) % CACHED_EPOCHS + } + + /// Returns the cache for some `RelativeEpoch`. Returns an error if the cache has not been + /// initialized. + fn cache(&self, relative_epoch: RelativeEpoch) -> Result<&EpochCache, Error> { + let cache = &self.caches[self.cache_index(relative_epoch)]; + + if cache.initialized { + Ok(cache) + } else { + Err(Error::EpochCacheUninitialized(relative_epoch)) + } } /// The epoch corresponding to `self.slot`. /// - /// Spec v0.2.0 + /// Spec v0.4.0 pub fn current_epoch(&self, spec: &ChainSpec) -> Epoch { - self.slot.epoch(spec.epoch_length) + self.slot.epoch(spec.slots_per_epoch) } /// The epoch prior to `self.current_epoch()`. /// - /// Spec v0.2.0 + /// If the current epoch is the genesis epoch, the genesis_epoch is returned. + /// + /// Spec v0.4.0 pub fn previous_epoch(&self, spec: &ChainSpec) -> Epoch { let current_epoch = self.current_epoch(&spec); - if current_epoch == spec.genesis_epoch { - current_epoch - } else { - current_epoch - 1 - } + std::cmp::max(current_epoch - 1, spec.genesis_epoch) } /// The epoch following `self.current_epoch()`. /// - /// Spec v0.2.0 + /// Spec v0.4.0 pub fn next_epoch(&self, spec: &ChainSpec) -> Epoch { self.current_epoch(spec).saturating_add(1_u64) } /// The first slot of the epoch corresponding to `self.slot`. /// - /// Spec v0.2.0 + /// Spec v0.4.0 pub fn current_epoch_start_slot(&self, spec: &ChainSpec) -> Slot { - self.current_epoch(spec).start_slot(spec.epoch_length) + self.current_epoch(spec).start_slot(spec.slots_per_epoch) } - /// The first slot of the epoch preceeding the one corresponding to `self.slot`. + /// The first slot of the epoch preceding the one corresponding to `self.slot`. /// - /// Spec v0.2.0 + /// Spec v0.4.0 pub fn previous_epoch_start_slot(&self, spec: &ChainSpec) -> Slot { - self.previous_epoch(spec).start_slot(spec.epoch_length) + self.previous_epoch(spec).start_slot(spec.slots_per_epoch) } - /// Return the number of committees in one epoch. + /// Return the number of committees in the previous epoch. /// - /// TODO: this should probably be a method on `ChainSpec`. + /// Spec v0.4.0 + fn get_previous_epoch_committee_count(&self, spec: &ChainSpec) -> u64 { + let previous_active_validators = + get_active_validator_indices(&self.validator_registry, self.previous_shuffling_epoch); + spec.get_epoch_committee_count(previous_active_validators.len()) + } + + /// Return the number of committees in the current epoch. /// - /// Spec v0.2.0 - pub fn get_epoch_committee_count( + /// Spec v0.4.0 + pub fn get_current_epoch_committee_count(&self, spec: &ChainSpec) -> u64 { + let current_active_validators = + get_active_validator_indices(&self.validator_registry, self.current_shuffling_epoch); + spec.get_epoch_committee_count(current_active_validators.len()) + } + + /// Return the number of committees in the next epoch. + /// + /// Spec v0.4.0 + pub fn get_next_epoch_committee_count(&self, spec: &ChainSpec) -> u64 { + let next_active_validators = + get_active_validator_indices(&self.validator_registry, self.next_epoch(spec)); + spec.get_epoch_committee_count(next_active_validators.len()) + } + + /// Returns the crosslink committees for some slot. + /// + /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. + /// + /// Spec v0.4.0 + pub fn get_crosslink_committees_at_slot( &self, - active_validator_count: usize, + slot: Slot, spec: &ChainSpec, - ) -> u64 { - std::cmp::max( - 1, - std::cmp::min( - spec.shard_count / spec.epoch_length, - active_validator_count as u64 / spec.epoch_length / spec.target_committee_size, - ), - ) * spec.epoch_length + ) -> Result<&CrosslinkCommittees, Error> { + let epoch = slot.epoch(spec.slots_per_epoch); + let relative_epoch = self.relative_epoch(epoch, spec)?; + let cache = self.cache(relative_epoch)?; + + let slot_offset = slot - epoch.start_slot(spec.slots_per_epoch); + + Ok(&cache.committees[slot_offset.as_usize()]) + } + + /// Return the block root at a recent `slot`. + /// + /// Spec v0.4.0 + pub fn get_block_root(&self, slot: Slot, spec: &ChainSpec) -> Option<&Hash256> { + if (self.slot <= slot + spec.latest_block_roots_length as u64) && (slot < self.slot) { + self.latest_block_roots + .get(slot.as_usize() % spec.latest_block_roots_length) + } else { + None + } + } + + /// Return the randao mix at a recent ``epoch``. + /// + /// Spec v0.4.0 + pub fn get_randao_mix(&self, epoch: Epoch, spec: &ChainSpec) -> Option<&Hash256> { + let current_epoch = self.current_epoch(spec); + + if (current_epoch - (spec.latest_randao_mixes_length as u64) < epoch) + & (epoch <= current_epoch) + { + self.latest_randao_mixes + .get(epoch.as_usize() % spec.latest_randao_mixes_length) + } else { + None + } + } + + /// Return the index root at a recent `epoch`. + /// + /// Spec v0.4.0 + pub fn get_active_index_root(&self, epoch: Epoch, spec: &ChainSpec) -> Option { + let current_epoch = self.current_epoch(spec); + + if (current_epoch - spec.latest_active_index_roots_length as u64 + + spec.activation_exit_delay + < epoch) + & (epoch <= current_epoch + spec.activation_exit_delay) + { + Some( + self.latest_active_index_roots + [epoch.as_usize() % spec.latest_active_index_roots_length], + ) + } else { + None + } + } + + /// Generate a seed for the given `epoch`. + /// + /// Spec v0.4.0 + pub fn generate_seed(&self, epoch: Epoch, spec: &ChainSpec) -> Result { + let mut input = self + .get_randao_mix(epoch - spec.min_seed_lookahead, spec) + .ok_or_else(|| Error::InsufficientRandaoMixes)? + .as_bytes() + .to_vec(); + + input.append( + &mut self + .get_active_index_root(epoch, spec) + .ok_or_else(|| Error::InsufficientIndexRoots)? + .as_bytes() + .to_vec(), + ); + + input.append(&mut int_to_bytes32(epoch.as_u64())); + + Ok(Hash256::from_slice(&hash(&input[..])[..])) + } + + /// Returns the beacon proposer index for the `slot`. + /// + /// If the state does not contain an index for a beacon proposer at the requested `slot`, then `None` is returned. + /// + /// Spec v0.4.0 + pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result { + let committees = self.get_crosslink_committees_at_slot(slot, spec)?; + trace!( + "get_beacon_proposer_index: slot: {}, committees_count: {}", + slot, + committees.len() + ); + committees + .first() + .ok_or(Error::InsufficientValidators) + .and_then(|(first_committee, _)| { + let index = slot + .as_usize() + .checked_rem(first_committee.len()) + .ok_or(Error::InsufficientValidators)?; + Ok(first_committee[index]) + }) + } + + /// Returns the list of validator indices which participiated in the attestation. + /// + /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. + /// + /// Spec v0.4.0 + pub fn get_attestation_participants( + &self, + attestation_data: &AttestationData, + bitfield: &Bitfield, + spec: &ChainSpec, + ) -> Result, Error> { + let epoch = attestation_data.slot.epoch(spec.slots_per_epoch); + let relative_epoch = self.relative_epoch(epoch, spec)?; + let cache = self.cache(relative_epoch)?; + + let (committee_slot_index, committee_index) = cache + .shard_committee_index_map + .get(&attestation_data.shard) + .ok_or_else(|| Error::ShardOutOfBounds)?; + let (committee, shard) = &cache.committees[*committee_slot_index][*committee_index]; + + assert_eq!(*shard, attestation_data.shard, "Bad epoch cache build."); + + if !verify_bitfield_length(&bitfield, committee.len()) { + return Err(Error::InvalidBitfield); + } + + let mut participants = vec![]; + for (i, validator_index) in committee.iter().enumerate() { + if bitfield.get(i).unwrap() { + participants.push(*validator_index); + } + } + + Ok(participants) + } + + /// Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. + /// + /// Spec v0.4.0 + pub fn get_effective_balance(&self, validator_index: usize, spec: &ChainSpec) -> u64 { + std::cmp::min( + self.validator_balances[validator_index], + spec.max_deposit_amount, + ) + } + + /// Return the combined effective balance of an array of validators. + /// + /// Spec v0.4.0 + pub fn get_total_balance(&self, validator_indices: &[usize], spec: &ChainSpec) -> u64 { + validator_indices + .iter() + .fold(0, |acc, i| acc + self.get_effective_balance(*i, spec)) + } + + /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. + /// + /// Spec v0.4.0 + pub fn get_delayed_activation_exit_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> Epoch { + epoch + 1 + spec.activation_exit_delay + } + + /// Process multiple deposits in sequence. + /// + /// Builds a hashmap of validator pubkeys to validator index and passes it to each successive + /// call to `process_deposit(..)`. This requires much less computation than successive calls to + /// `process_deposits(..)` without the hashmap. + /// + /// Spec v0.4.0 + pub fn process_deposits( + &mut self, + deposits: Vec<&DepositData>, + spec: &ChainSpec, + ) -> Vec { + let mut added_indices = vec![]; + let mut pubkey_map: HashMap = HashMap::new(); + + for (i, validator) in self.validator_registry.iter().enumerate() { + pubkey_map.insert(validator.pubkey.clone(), i); + } + + for deposit_data in deposits { + let result = self.process_deposit( + deposit_data.deposit_input.pubkey.clone(), + deposit_data.amount, + deposit_data.deposit_input.proof_of_possession.clone(), + deposit_data.deposit_input.withdrawal_credentials, + Some(&pubkey_map), + spec, + ); + if let Ok(index) = result { + added_indices.push(index); + } + } + added_indices + } + + /// Process a validator deposit, returning the validator index if the deposit is valid. + /// + /// Optionally accepts a hashmap of all validator pubkeys to their validator index. Without + /// this hashmap, each call to `process_deposits` requires an iteration though + /// `self.validator_registry`. This becomes highly inefficient at scale. + /// + /// Spec v0.4.0 + pub fn process_deposit( + &mut self, + pubkey: PublicKey, + amount: u64, + proof_of_possession: Signature, + withdrawal_credentials: Hash256, + pubkey_map: Option<&HashMap>, + spec: &ChainSpec, + ) -> Result { + // TODO: update proof of possession to function written above ( + // requires bls::create_proof_of_possession to be updated + // + // https://github.com/sigp/lighthouse/issues/239 + if !verify_proof_of_possession(&proof_of_possession, &pubkey) { + return Err(()); + } + + let validator_index = if let Some(pubkey_map) = pubkey_map { + pubkey_map.get(&pubkey).and_then(|i| Some(*i)) + } else { + self.validator_registry + .iter() + .position(|v| v.pubkey == pubkey) + }; + + if let Some(index) = validator_index { + if self.validator_registry[index].withdrawal_credentials == withdrawal_credentials { + safe_add_assign!(self.validator_balances[index], amount); + Ok(index) + } else { + Err(()) + } + } else { + let validator = Validator { + pubkey, + withdrawal_credentials, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + initiated_exit: false, + slashed: false, + }; + self.validator_registry.push(validator); + self.validator_balances.push(amount); + Ok(self.validator_registry.len() - 1) + } + } + + /// Activate the validator of the given ``index``. + /// + /// Spec v0.4.0 + pub fn activate_validator( + &mut self, + validator_index: usize, + is_genesis: bool, + spec: &ChainSpec, + ) { + let current_epoch = self.current_epoch(spec); + + self.validator_registry[validator_index].activation_epoch = if is_genesis { + spec.genesis_epoch + } else { + self.get_delayed_activation_exit_epoch(current_epoch, spec) + } + } + + /// Initiate an exit for the validator of the given `index`. + /// + /// Spec v0.4.0 + pub fn initiate_validator_exit(&mut self, validator_index: usize) { + self.validator_registry[validator_index].initiated_exit = true; + } + + /// Exit the validator of the given `index`. + /// + /// Spec v0.4.0 + fn exit_validator(&mut self, validator_index: usize, spec: &ChainSpec) { + let current_epoch = self.current_epoch(spec); + let delayed_epoch = self.get_delayed_activation_exit_epoch(current_epoch, spec); + + if self.validator_registry[validator_index].exit_epoch <= delayed_epoch { + return; + } + + self.validator_registry[validator_index].exit_epoch = delayed_epoch; + } + + /// Slash the validator with index ``index``. + /// + /// Spec v0.4.0 + pub fn slash_validator( + &mut self, + validator_index: usize, + spec: &ChainSpec, + ) -> Result<(), Error> { + let current_epoch = self.current_epoch(spec); + + let validator = &self + .validator_registry + .get(validator_index) + .ok_or_else(|| Error::UnknownValidator)?; + + if self.slot + >= validator + .withdrawable_epoch + .start_slot(spec.slots_per_epoch) + { + return Err(Error::SlotOutOfBounds); + } + + self.exit_validator(validator_index, spec); + + self.latest_slashed_balances[current_epoch.as_usize() % spec.latest_slashed_exit_length] += + self.get_effective_balance(validator_index, spec); + + let whistleblower_index = self.get_beacon_proposer_index(self.slot, spec)?; + let whistleblower_reward = self.get_effective_balance(validator_index, spec); + safe_add_assign!( + self.validator_balances[whistleblower_index as usize], + whistleblower_reward + ); + safe_sub_assign!( + self.validator_balances[validator_index], + whistleblower_reward + ); + self.validator_registry[validator_index].slashed = true; + self.validator_registry[validator_index].withdrawable_epoch = + current_epoch + Epoch::from(spec.latest_slashed_exit_length); + + debug!( + "Whistleblower {} penalized validator {}.", + whistleblower_index, validator_index + ); + + Ok(()) + } + + /// Initiate an exit for the validator of the given `index`. + /// + /// Spec v0.4.0 + pub fn prepare_validator_for_withdrawal(&mut self, validator_index: usize, spec: &ChainSpec) { + //TODO: we're not ANDing here, we're setting. Potentially wrong. + self.validator_registry[validator_index].withdrawable_epoch = + self.current_epoch(spec) + spec.min_validator_withdrawability_delay; + } + + /// Returns the crosslink committees for some slot. + /// + /// Utilizes the cache and will fail if the appropriate cache is not initialized. + /// + /// Spec v0.4.0 + pub(crate) fn get_shuffling_for_slot( + &self, + slot: Slot, + registry_change: bool, + + spec: &ChainSpec, + ) -> Result>, Error> { + let (_committees_per_epoch, seed, shuffling_epoch, _shuffling_start_shard) = + self.get_committee_params_at_slot(slot, registry_change, spec)?; + + self.get_shuffling(seed, shuffling_epoch, spec) } /// Shuffle ``validators`` into crosslink committees seeded by ``seed`` and ``epoch``. + /// /// Return a list of ``committees_per_epoch`` committees where each /// committee is itself a list of validator indices. /// - /// Spec v0.1 - pub fn get_shuffling( + /// Spec v0.4.0 + pub(crate) fn get_shuffling( &self, seed: Hash256, epoch: Epoch, spec: &ChainSpec, - ) -> Option>> { + ) -> Result>, Error> { let active_validator_indices = get_active_validator_indices(&self.validator_registry, epoch); if active_validator_indices.is_empty() { - return None; + error!("get_shuffling: no validators."); + return Err(Error::InsufficientValidators); } - trace!( - "get_shuffling: active_validator_indices.len() == {}", - active_validator_indices.len() - ); + debug!("Shuffling {} validators...", active_validator_indices.len()); - let committees_per_epoch = - self.get_epoch_committee_count(active_validator_indices.len(), spec); + let committees_per_epoch = spec.get_epoch_committee_count(active_validator_indices.len()); trace!( "get_shuffling: active_validator_indices.len() == {}, committees_per_epoch: {}", @@ -286,99 +813,90 @@ impl BeaconState { committees_per_epoch ); - let mut shuffled_active_validator_indices = vec![0; active_validator_indices.len()]; - for &i in &active_validator_indices { - let shuffled_i = get_permutated_index( - i, - active_validator_indices.len(), - &seed[..], - spec.shuffle_round_count, - )?; - shuffled_active_validator_indices[i] = active_validator_indices[shuffled_i] - } + let active_validator_indices: Vec = active_validator_indices.to_vec(); - Some( - shuffled_active_validator_indices - .honey_badger_split(committees_per_epoch as usize) - .map(|slice: &[usize]| slice.to_vec()) - .collect(), + let shuffled_active_validator_indices = shuffle_list( + active_validator_indices, + spec.shuffle_round_count, + &seed[..], + true, ) + .ok_or_else(|| Error::UnableToShuffle)?; + + Ok(shuffled_active_validator_indices + .honey_badger_split(committees_per_epoch as usize) + .map(|slice: &[usize]| slice.to_vec()) + .collect()) } - /// Return the number of committees in the previous epoch. + /// Returns the following params for the given slot: /// - /// Spec v0.2.0 - fn get_previous_epoch_committee_count(&self, spec: &ChainSpec) -> u64 { - let previous_active_validators = - get_active_validator_indices(&self.validator_registry, self.previous_calculation_epoch); - self.get_epoch_committee_count(previous_active_validators.len(), spec) - } - - /// Return the number of committees in the current epoch. + /// - epoch committee count + /// - epoch seed + /// - calculation epoch + /// - start shard /// - /// Spec v0.2.0 - pub fn get_current_epoch_committee_count(&self, spec: &ChainSpec) -> u64 { - let current_active_validators = - get_active_validator_indices(&self.validator_registry, self.current_calculation_epoch); - self.get_epoch_committee_count(current_active_validators.len(), spec) - } - - /// Return the number of committees in the next epoch. + /// In the spec, this functionality is included in the `get_crosslink_committees_at_slot(..)` + /// function. It is separated here to allow the division of shuffling and committee building, + /// as is required for efficient operations. /// - /// Spec v0.2.0 - pub fn get_next_epoch_committee_count(&self, spec: &ChainSpec) -> u64 { - let current_active_validators = - get_active_validator_indices(&self.validator_registry, self.next_epoch(spec)); - self.get_epoch_committee_count(current_active_validators.len(), spec) - } - - pub fn get_active_index_root(&self, epoch: Epoch, spec: &ChainSpec) -> Option { - let current_epoch = self.current_epoch(spec); - - let earliest_index_root = current_epoch - Epoch::from(spec.latest_index_roots_length) - + Epoch::from(spec.entry_exit_delay) - + 1; - let latest_index_root = current_epoch + spec.entry_exit_delay; - - trace!( - "get_active_index_root: epoch: {}, earliest: {}, latest: {}", - epoch, - earliest_index_root, - latest_index_root - ); - - if (epoch >= earliest_index_root) & (epoch <= latest_index_root) { - Some(self.latest_index_roots[epoch.as_usize() % spec.latest_index_roots_length]) - } else { - trace!("get_active_index_root: epoch out of range."); - None - } - } - - /// Generate a seed for the given ``epoch``. - /// - /// Spec v0.2.0 - pub fn generate_seed( + /// Spec v0.4.0 + pub(crate) fn get_committee_params_at_slot( &self, - epoch: Epoch, + slot: Slot, + registry_change: bool, spec: &ChainSpec, - ) -> Result { - let mut input = self - .get_randao_mix(epoch, spec) - .ok_or_else(|| BeaconStateError::InsufficientRandaoMixes)? - .to_vec(); + ) -> Result<(u64, Hash256, Epoch, u64), Error> { + let epoch = slot.epoch(spec.slots_per_epoch); + let current_epoch = self.current_epoch(spec); + let previous_epoch = self.previous_epoch(spec); + let next_epoch = self.next_epoch(spec); - input.append( - &mut self - .get_active_index_root(epoch, spec) - .ok_or_else(|| BeaconStateError::InsufficientIndexRoots)? - .to_vec(), - ); - - // TODO: ensure `Hash256::from(u64)` == `int_to_bytes32`. - input.append(&mut Hash256::from(epoch.as_u64()).to_vec()); - - Ok(Hash256::from(&hash(&input[..])[..])) + if epoch == current_epoch { + Ok(( + self.get_current_epoch_committee_count(spec), + self.current_shuffling_seed, + self.current_shuffling_epoch, + self.current_shuffling_start_shard, + )) + } else if epoch == previous_epoch { + Ok(( + self.get_previous_epoch_committee_count(spec), + self.previous_shuffling_seed, + self.previous_shuffling_epoch, + self.previous_shuffling_start_shard, + )) + } else if epoch == next_epoch { + let current_committees_per_epoch = self.get_current_epoch_committee_count(spec); + let epochs_since_last_registry_update = + current_epoch - self.validator_registry_update_epoch; + let (seed, shuffling_start_shard) = if registry_change { + let next_seed = self.generate_seed(next_epoch, spec)?; + ( + next_seed, + (self.current_shuffling_start_shard + current_committees_per_epoch) + % spec.shard_count, + ) + } else if (epochs_since_last_registry_update > 1) + & epochs_since_last_registry_update.is_power_of_two() + { + let next_seed = self.generate_seed(next_epoch, spec)?; + (next_seed, self.current_shuffling_start_shard) + } else { + ( + self.current_shuffling_seed, + self.current_shuffling_start_shard, + ) + }; + Ok(( + self.get_next_epoch_committee_count(spec), + seed, + next_epoch, + shuffling_start_shard, + )) + } else { + Err(Error::EpochOutOfBounds) + } } /// Return the list of ``(committee, shard)`` tuples for the ``slot``. @@ -386,80 +904,25 @@ impl BeaconState { /// Note: There are two possible shufflings for crosslink committees for a /// `slot` in the next epoch: with and without a `registry_change` /// - /// Spec v0.2.0 - pub fn get_crosslink_committees_at_slot( + /// Note: does not utilize the cache, `get_crosslink_committees_at_slot` is an equivalent + /// function which uses the cache. + /// + /// Spec v0.4.0 + pub(crate) fn calculate_crosslink_committees_at_slot( &self, slot: Slot, registry_change: bool, + shuffling: Vec>, spec: &ChainSpec, - ) -> Result, u64)>, BeaconStateError> { - let epoch = slot.epoch(spec.epoch_length); - let current_epoch = self.current_epoch(spec); - let previous_epoch = self.previous_epoch(spec); - let next_epoch = self.next_epoch(spec); + ) -> Result, u64)>, Error> { + let (committees_per_epoch, _seed, _shuffling_epoch, shuffling_start_shard) = + self.get_committee_params_at_slot(slot, registry_change, spec)?; - let (committees_per_epoch, seed, shuffling_epoch, shuffling_start_shard) = - if epoch == current_epoch { - trace!("get_crosslink_committees_at_slot: current_epoch"); - ( - self.get_current_epoch_committee_count(spec), - self.current_epoch_seed, - self.current_calculation_epoch, - self.current_epoch_start_shard, - ) - } else if epoch == previous_epoch { - trace!("get_crosslink_committees_at_slot: previous_epoch"); - ( - self.get_previous_epoch_committee_count(spec), - self.previous_epoch_seed, - self.previous_calculation_epoch, - self.previous_epoch_start_shard, - ) - } else if epoch == next_epoch { - trace!("get_crosslink_committees_at_slot: next_epoch"); - let current_committees_per_epoch = self.get_current_epoch_committee_count(spec); - let epochs_since_last_registry_update = - current_epoch - self.validator_registry_update_epoch; - let (seed, shuffling_start_shard) = if registry_change { - let next_seed = self.generate_seed(next_epoch, spec)?; - ( - next_seed, - (self.current_epoch_start_shard + current_committees_per_epoch) - % spec.shard_count, - ) - } else if (epochs_since_last_registry_update > 1) - & epochs_since_last_registry_update.is_power_of_two() - { - let next_seed = self.generate_seed(next_epoch, spec)?; - (next_seed, self.current_epoch_start_shard) - } else { - (self.current_epoch_seed, self.current_epoch_start_shard) - }; - ( - self.get_next_epoch_committee_count(spec), - seed, - next_epoch, - shuffling_start_shard, - ) - } else { - return Err(BeaconStateError::EpochOutOfBounds); - }; - - let shuffling = self - .get_shuffling(seed, shuffling_epoch, spec) - .ok_or_else(|| BeaconStateError::UnableToShuffle)?; - let offset = slot.as_u64() % spec.epoch_length; - let committees_per_slot = committees_per_epoch / spec.epoch_length; + let offset = slot.as_u64() % spec.slots_per_epoch; + let committees_per_slot = committees_per_epoch / spec.slots_per_epoch; let slot_start_shard = (shuffling_start_shard + committees_per_slot * offset) % spec.shard_count; - trace!( - "get_crosslink_committees_at_slot: committees_per_slot: {}, slot_start_shard: {}, seed: {}", - committees_per_slot, - slot_start_shard, - seed - ); - let mut crosslinks_at_slot = vec![]; for i in 0..committees_per_slot { let tuple = ( @@ -474,96 +937,70 @@ impl BeaconState { /// Returns the `slot`, `shard` and `committee_index` for which a validator must produce an /// attestation. /// - /// Spec v0.2.0 + /// Only reads the current epoch. + /// + /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. + /// + /// Spec v0.4.0 pub fn attestation_slot_and_shard_for_validator( &self, validator_index: usize, - spec: &ChainSpec, - ) -> Result, BeaconStateError> { - let mut result = None; - for slot in self.current_epoch(spec).slot_iter(spec.epoch_length) { - for (committee, shard) in self.get_crosslink_committees_at_slot(slot, false, spec)? { - if let Some(committee_index) = committee.iter().position(|&i| i == validator_index) - { - result = Some((slot, shard, committee_index as u64)); - } - } - } - Ok(result) + _spec: &ChainSpec, + ) -> Result, Error> { + let cache = self.cache(RelativeEpoch::Current)?; + + Ok(cache + .attestation_duty_map + .get(&(validator_index as u64)) + .and_then(|tuple| Some(*tuple))) } - /// An entry or exit triggered in the ``epoch`` given by the input takes effect at - /// the epoch given by the output. + /// Process the slashings. /// - /// Spec v0.2.0 - pub fn get_entry_exit_effect_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> Epoch { - epoch + 1 + spec.entry_exit_delay - } - - /// Returns the beacon proposer index for the `slot`. - /// - /// If the state does not contain an index for a beacon proposer at the requested `slot`, then `None` is returned. - /// - /// Spec v0.2.0 - pub fn get_beacon_proposer_index( - &self, - slot: Slot, - spec: &ChainSpec, - ) -> Result { - let committees = self.get_crosslink_committees_at_slot(slot, false, spec)?; - trace!( - "get_beacon_proposer_index: slot: {}, committees_count: {}", - slot, - committees.len() - ); - committees - .first() - .ok_or(BeaconStateError::InsufficientValidators) - .and_then(|(first_committee, _)| { - let index = (slot.as_usize()) - .checked_rem(first_committee.len()) - .ok_or(BeaconStateError::InsufficientValidators)?; - Ok(first_committee[index]) - }) - } - - /// Process the penalties and prepare the validators who are eligible to withdrawal. - /// - /// Spec v0.2.0 - pub fn process_penalties_and_exits(&mut self, spec: &ChainSpec) { + /// Spec v0.4.0 + pub fn process_slashings(&mut self, spec: &ChainSpec) { let current_epoch = self.current_epoch(spec); let active_validator_indices = get_active_validator_indices(&self.validator_registry, current_epoch); let total_balance = self.get_total_balance(&active_validator_indices[..], spec); - for index in 0..self.validator_balances.len() { - let validator = &self.validator_registry[index]; - - if current_epoch - == validator.penalized_epoch + Epoch::from(spec.latest_penalized_exit_length / 2) + for (index, validator) in self.validator_registry.iter().enumerate() { + if validator.slashed + && (current_epoch + == validator.withdrawable_epoch + - Epoch::from(spec.latest_slashed_exit_length / 2)) { - let epoch_index: usize = - current_epoch.as_usize() % spec.latest_penalized_exit_length; + let epoch_index: usize = current_epoch.as_usize() % spec.latest_slashed_exit_length; - let total_at_start = self.latest_penalized_balances - [(epoch_index + 1) % spec.latest_penalized_exit_length]; - let total_at_end = self.latest_penalized_balances[epoch_index]; + let total_at_start = self.latest_slashed_balances + [(epoch_index + 1) % spec.latest_slashed_exit_length]; + let total_at_end = self.latest_slashed_balances[epoch_index]; let total_penalities = total_at_end.saturating_sub(total_at_start); - let penalty = self.get_effective_balance(index, spec) - * std::cmp::min(total_penalities * 3, total_balance) - / total_balance; + let penalty = std::cmp::max( + self.get_effective_balance(index, spec) + * std::cmp::min(total_penalities * 3, total_balance) + / total_balance, + self.get_effective_balance(index, spec) / spec.min_penalty_quotient, + ); + safe_sub_assign!(self.validator_balances[index], penalty); } } + } + + /// Process the exit queue. + /// + /// Spec v0.4.0 + pub fn process_exit_queue(&mut self, spec: &ChainSpec) { + let current_epoch = self.current_epoch(spec); let eligible = |index: usize| { let validator = &self.validator_registry[index]; - if validator.penalized_epoch <= current_epoch { - let penalized_withdrawal_epochs = spec.latest_penalized_exit_length / 2; - current_epoch >= validator.penalized_epoch + penalized_withdrawal_epochs as u64 + if validator.withdrawable_epoch != spec.far_future_epoch { + false } else { - current_epoch >= validator.exit_epoch + spec.min_validator_withdrawal_epochs + current_epoch >= validator.exit_epoch + spec.min_validator_withdrawability_delay } }; @@ -571,27 +1008,18 @@ impl BeaconState { .filter(|i| eligible(*i)) .collect(); eligable_indices.sort_by_key(|i| self.validator_registry[*i].exit_epoch); + for (withdrawn_so_far, index) in eligable_indices.iter().enumerate() { - self.prepare_validator_for_withdrawal(*index); - if withdrawn_so_far as u64 >= spec.max_withdrawals_per_epoch { + if withdrawn_so_far as u64 >= spec.max_exit_dequeues_per_epoch { break; } + self.prepare_validator_for_withdrawal(*index, spec); } } - /// Return the randao mix at a recent ``epoch``. - /// - /// Returns `None` if the epoch is out-of-bounds of `self.latest_randao_mixes`. - /// - /// Spec v0.2.0 - pub fn get_randao_mix(&self, epoch: Epoch, spec: &ChainSpec) -> Option<&Hash256> { - self.latest_randao_mixes - .get(epoch.as_usize() % spec.latest_randao_mixes_length) - } - /// Update validator registry, activating/exiting validators if possible. /// - /// Spec v0.2.0 + /// Spec v0.4.0 pub fn update_validator_registry(&mut self, spec: &ChainSpec) { let current_epoch = self.current_epoch(spec); let active_validator_indices = @@ -607,8 +1035,8 @@ impl BeaconState { for index in 0..self.validator_registry.len() { let validator = &self.validator_registry[index]; - if (validator.activation_epoch > self.get_entry_exit_effect_epoch(current_epoch, spec)) - && self.validator_balances[index] >= spec.max_deposit_amount + if (validator.activation_epoch == spec.far_future_epoch) + & (self.validator_balances[index] == spec.max_deposit_amount) { balance_churn += self.get_effective_balance(index, spec); if balance_churn > max_balance_churn { @@ -622,9 +1050,7 @@ impl BeaconState { for index in 0..self.validator_registry.len() { let validator = &self.validator_registry[index]; - if (validator.exit_epoch > self.get_entry_exit_effect_epoch(current_epoch, spec)) - && validator.status_flags == Some(StatusFlags::InitiatedExit) - { + if (validator.exit_epoch == spec.far_future_epoch) & (validator.initiated_exit) { balance_churn += self.get_effective_balance(index, spec); if balance_churn > max_balance_churn { break; @@ -636,135 +1062,38 @@ impl BeaconState { self.validator_registry_update_epoch = current_epoch; } - /// Process a validator deposit, returning the validator index if the deposit is valid. + + /// Confirm validator owns PublicKey /// - /// Spec v0.2.0 - pub fn process_deposit( - &mut self, + /// Spec v0.4.0 + pub fn validate_proof_of_possession( + &self, pubkey: PublicKey, - amount: u64, proof_of_possession: Signature, withdrawal_credentials: Hash256, spec: &ChainSpec, - ) -> Result { - // TODO: ensure verify proof-of-possession represents the spec accurately. - if !verify_proof_of_possession(&proof_of_possession, &pubkey) { - return Err(()); - } + ) -> bool { + let proof_of_possession_data = DepositInput { + pubkey: pubkey.clone(), + withdrawal_credentials, + proof_of_possession: Signature::empty_signature(), + }; - if let Some(index) = self - .validator_registry - .iter() - .position(|v| v.pubkey == pubkey) - { - if self.validator_registry[index].withdrawal_credentials == withdrawal_credentials { - safe_add_assign!(self.validator_balances[index], amount); - Ok(index) - } else { - Err(()) - } - } else { - let validator = Validator { - pubkey, - withdrawal_credentials, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawal_epoch: spec.far_future_epoch, - penalized_epoch: spec.far_future_epoch, - status_flags: None, - }; - self.validator_registry.push(validator); - self.validator_balances.push(amount); - Ok(self.validator_registry.len() - 1) - } - } - - /// Activate the validator of the given ``index``. - /// - /// Spec v0.2.0 - pub fn activate_validator( - &mut self, - validator_index: usize, - is_genesis: bool, - spec: &ChainSpec, - ) { - let current_epoch = self.current_epoch(spec); - - self.validator_registry[validator_index].activation_epoch = if is_genesis { - spec.genesis_epoch - } else { - self.get_entry_exit_effect_epoch(current_epoch, spec) - } - } - - /// Initiate an exit for the validator of the given `index`. - /// - /// Spec v0.2.0 - pub fn initiate_validator_exit(&mut self, validator_index: usize) { - // TODO: the spec does an `|=` here, ensure this isn't buggy. - self.validator_registry[validator_index].status_flags = Some(StatusFlags::InitiatedExit); - } - - /// Exit the validator of the given `index`. - /// - /// Spec v0.2.0 - fn exit_validator(&mut self, validator_index: usize, spec: &ChainSpec) { - let current_epoch = self.current_epoch(spec); - - if self.validator_registry[validator_index].exit_epoch - <= self.get_entry_exit_effect_epoch(current_epoch, spec) - { - return; - } - - self.validator_registry[validator_index].exit_epoch = - self.get_entry_exit_effect_epoch(current_epoch, spec); - } - - /// Penalize the validator of the given ``index``. - /// - /// Exits the validator and assigns its effective balance to the block producer for this - /// state. - /// - /// Spec v0.2.0 - pub fn penalize_validator( - &mut self, - validator_index: usize, - spec: &ChainSpec, - ) -> Result<(), BeaconStateError> { - self.exit_validator(validator_index, spec); - let current_epoch = self.current_epoch(spec); - - self.latest_penalized_balances - [current_epoch.as_usize() % spec.latest_penalized_exit_length] += - self.get_effective_balance(validator_index, spec); - - let whistleblower_index = self.get_beacon_proposer_index(self.slot, spec)?; - let whistleblower_reward = self.get_effective_balance(validator_index, spec); - safe_add_assign!( - self.validator_balances[whistleblower_index as usize], - whistleblower_reward - ); - safe_sub_assign!( - self.validator_balances[validator_index], - whistleblower_reward - ); - self.validator_registry[validator_index].penalized_epoch = current_epoch; - Ok(()) - } - - /// Initiate an exit for the validator of the given `index`. - /// - /// Spec v0.2.0 - pub fn prepare_validator_for_withdrawal(&mut self, validator_index: usize) { - //TODO: we're not ANDing here, we're setting. Potentially wrong. - self.validator_registry[validator_index].status_flags = Some(StatusFlags::Withdrawable); + proof_of_possession.verify( + &proof_of_possession_data.hash_tree_root(), + spec.get_domain( + self.slot.epoch(spec.slots_per_epoch), + Domain::Deposit, + &self.fork, + ), + &pubkey, + ) } /// Iterate through the validator registry and eject active validators with balance below /// ``EJECTION_BALANCE``. /// - /// Spec v0.2.0 + /// Spec v0.4.0 pub fn process_ejections(&mut self, spec: &ChainSpec) { for validator_index in get_active_validator_indices(&self.validator_registry, self.current_epoch(spec)) @@ -779,7 +1108,7 @@ impl BeaconState { /// /// Note: this is defined "inline" in the spec, not as a helper function. /// - /// Spec v0.2.0 + /// Spec v0.4.0 pub fn inactivity_penalty( &self, validator_index: usize, @@ -794,72 +1123,11 @@ impl BeaconState { / 2 } - /// Returns the distance between the first included attestation for some validator and this - /// slot. - /// - /// Note: In the spec this is defined "inline", not as a helper function. - /// - /// Spec v0.2.0 - pub fn inclusion_distance( - &self, - attestations: &[&PendingAttestation], - validator_index: usize, - spec: &ChainSpec, - ) -> Result { - let attestation = - self.earliest_included_attestation(attestations, validator_index, spec)?; - Ok((attestation.inclusion_slot - attestation.data.slot).as_u64()) - } - - /// Returns the slot of the earliest included attestation for some validator. - /// - /// Note: In the spec this is defined "inline", not as a helper function. - /// - /// Spec v0.2.0 - pub fn inclusion_slot( - &self, - attestations: &[&PendingAttestation], - validator_index: usize, - spec: &ChainSpec, - ) -> Result { - let attestation = - self.earliest_included_attestation(attestations, validator_index, spec)?; - Ok(attestation.inclusion_slot) - } - - /// Finds the earliest included attestation for some validator. - /// - /// Note: In the spec this is defined "inline", not as a helper function. - /// - /// Spec v0.2.0 - fn earliest_included_attestation( - &self, - attestations: &[&PendingAttestation], - validator_index: usize, - spec: &ChainSpec, - ) -> Result { - let mut included_attestations = vec![]; - - for (i, a) in attestations.iter().enumerate() { - let participants = - self.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; - if participants.iter().any(|i| *i == validator_index) { - included_attestations.push(i); - } - } - - let earliest_attestation_index = included_attestations - .iter() - .min_by_key(|i| attestations[**i].inclusion_slot) - .ok_or_else(|| InclusionError::NoAttestationsForValidator)?; - Ok(attestations[*earliest_attestation_index].clone()) - } - /// Returns the base reward for some validator. /// /// Note: In the spec this is defined "inline", not as a helper function. /// - /// Spec v0.2.0 + /// Spec v0.4.0 pub fn base_reward( &self, validator_index: usize, @@ -869,102 +1137,127 @@ impl BeaconState { self.get_effective_balance(validator_index, spec) / base_reward_quotient / 5 } - /// Return the combined effective balance of an array of validators. + /// Returns the union of all participants in the provided attestations /// - /// Spec v0.2.0 - pub fn get_total_balance(&self, validator_indices: &[usize], spec: &ChainSpec) -> u64 { - validator_indices - .iter() - .fold(0, |acc, i| acc + self.get_effective_balance(*i, spec)) - } - - /// Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. - /// - /// Spec v0.2.0 - pub fn get_effective_balance(&self, validator_index: usize, spec: &ChainSpec) -> u64 { - std::cmp::min( - self.validator_balances[validator_index], - spec.max_deposit_amount, - ) - } - - /// Return the block root at a recent `slot`. - /// - /// Spec v0.2.0 - pub fn get_block_root(&self, slot: Slot, spec: &ChainSpec) -> Option<&Hash256> { - self.latest_block_roots - .get(slot.as_usize() % spec.latest_block_roots_length) - } - + /// Spec v0.4.0 pub fn get_attestation_participants_union( &self, attestations: &[&PendingAttestation], spec: &ChainSpec, - ) -> Result, AttestationParticipantsError> { - let mut all_participants = attestations.iter().try_fold::<_, _, Result< - Vec, - AttestationParticipantsError, - >>(vec![], |mut acc, a| { - acc.append(&mut self.get_attestation_participants( - &a.data, - &a.aggregation_bitfield, - spec, - )?); - Ok(acc) - })?; + ) -> Result, Error> { + let mut all_participants = attestations + .iter() + .try_fold::<_, _, Result, Error>>(vec![], |mut acc, a| { + acc.append(&mut self.get_attestation_participants( + &a.data, + &a.aggregation_bitfield, + spec, + )?); + Ok(acc) + })?; all_participants.sort_unstable(); all_participants.dedup(); Ok(all_participants) } - - /// Return the participant indices at for the ``attestation_data`` and ``bitfield``. - /// - /// In effect, this converts the "committee indices" on the bitfield into "validator indices" - /// for self.validator_registy. - /// - /// Spec v0.2.0 - pub fn get_attestation_participants( - &self, - attestation_data: &AttestationData, - bitfield: &Bitfield, - spec: &ChainSpec, - ) -> Result, AttestationParticipantsError> { - let crosslink_committees = - self.get_crosslink_committees_at_slot(attestation_data.slot, false, spec)?; - - let committee_index: usize = crosslink_committees - .iter() - .position(|(_committee, shard)| *shard == attestation_data.shard) - .ok_or_else(|| AttestationParticipantsError::NoCommitteeForShard)?; - let (crosslink_committee, _shard) = &crosslink_committees[committee_index]; - - /* - * TODO: verify bitfield length is valid. - */ - - let mut participants = vec![]; - for (i, validator_index) in crosslink_committee.iter().enumerate() { - if bitfield.get(i).unwrap() { - participants.push(*validator_index); - } - } - Ok(participants) - } } fn hash_tree_root(input: Vec) -> Hash256 { - Hash256::from(&input.hash_tree_root()[..]) + Hash256::from_slice(&input.hash_tree_root()[..]) } -impl From for AttestationParticipantsError { - fn from(e: BeaconStateError) -> AttestationParticipantsError { - AttestationParticipantsError::BeaconStateError(e) +impl Encodable for BeaconState { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.slot); + s.append(&self.genesis_time); + s.append(&self.fork); + s.append(&self.validator_registry); + s.append(&self.validator_balances); + s.append(&self.validator_registry_update_epoch); + s.append(&self.latest_randao_mixes); + s.append(&self.previous_shuffling_start_shard); + s.append(&self.current_shuffling_start_shard); + s.append(&self.previous_shuffling_epoch); + s.append(&self.current_shuffling_epoch); + s.append(&self.previous_shuffling_seed); + s.append(&self.current_shuffling_seed); + s.append(&self.previous_justified_epoch); + s.append(&self.justified_epoch); + s.append(&self.justification_bitfield); + s.append(&self.finalized_epoch); + s.append(&self.latest_crosslinks); + s.append(&self.latest_block_roots); + s.append(&self.latest_active_index_roots); + s.append(&self.latest_slashed_balances); + s.append(&self.latest_attestations); + s.append(&self.batched_block_roots); + s.append(&self.latest_eth1_data); + s.append(&self.eth1_data_votes); + s.append(&self.deposit_index); } } -impl From for InclusionError { - fn from(e: AttestationParticipantsError) -> InclusionError { - InclusionError::AttestationParticipantsError(e) +impl Decodable for BeaconState { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (slot, i) = <_>::ssz_decode(bytes, i)?; + let (genesis_time, i) = <_>::ssz_decode(bytes, i)?; + let (fork, i) = <_>::ssz_decode(bytes, i)?; + let (validator_registry, i) = <_>::ssz_decode(bytes, i)?; + let (validator_balances, i) = <_>::ssz_decode(bytes, i)?; + let (validator_registry_update_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (latest_randao_mixes, i) = <_>::ssz_decode(bytes, i)?; + let (previous_shuffling_start_shard, i) = <_>::ssz_decode(bytes, i)?; + let (current_shuffling_start_shard, i) = <_>::ssz_decode(bytes, i)?; + let (previous_shuffling_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (current_shuffling_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (previous_shuffling_seed, i) = <_>::ssz_decode(bytes, i)?; + let (current_shuffling_seed, i) = <_>::ssz_decode(bytes, i)?; + let (previous_justified_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (justified_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (justification_bitfield, i) = <_>::ssz_decode(bytes, i)?; + let (finalized_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (latest_crosslinks, i) = <_>::ssz_decode(bytes, i)?; + let (latest_block_roots, i) = <_>::ssz_decode(bytes, i)?; + let (latest_active_index_roots, i) = <_>::ssz_decode(bytes, i)?; + let (latest_slashed_balances, i) = <_>::ssz_decode(bytes, i)?; + let (latest_attestations, i) = <_>::ssz_decode(bytes, i)?; + let (batched_block_roots, i) = <_>::ssz_decode(bytes, i)?; + let (latest_eth1_data, i) = <_>::ssz_decode(bytes, i)?; + let (eth1_data_votes, i) = <_>::ssz_decode(bytes, i)?; + let (deposit_index, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + Self { + slot, + genesis_time, + fork, + validator_registry, + validator_balances, + validator_registry_update_epoch, + latest_randao_mixes, + previous_shuffling_start_shard, + current_shuffling_start_shard, + previous_shuffling_epoch, + current_shuffling_epoch, + previous_shuffling_seed, + current_shuffling_seed, + previous_justified_epoch, + justified_epoch, + justification_bitfield, + finalized_epoch, + latest_crosslinks, + latest_block_roots, + latest_active_index_roots, + latest_slashed_balances, + latest_attestations, + batched_block_roots, + latest_eth1_data, + eth1_data_votes, + deposit_index, + cache_index_offset: 0, + caches: vec![EpochCache::empty(); CACHED_EPOCHS], + }, + i, + )) } } @@ -982,24 +1275,29 @@ impl TreeHash for BeaconState { .hash_tree_root_internal(), ); result.append(&mut self.latest_randao_mixes.hash_tree_root_internal()); - result.append(&mut self.previous_epoch_start_shard.hash_tree_root_internal()); - result.append(&mut self.current_epoch_start_shard.hash_tree_root_internal()); - result.append(&mut self.previous_calculation_epoch.hash_tree_root_internal()); - result.append(&mut self.current_calculation_epoch.hash_tree_root_internal()); - result.append(&mut self.previous_epoch_seed.hash_tree_root_internal()); - result.append(&mut self.current_epoch_seed.hash_tree_root_internal()); + result.append( + &mut self + .previous_shuffling_start_shard + .hash_tree_root_internal(), + ); + result.append(&mut self.current_shuffling_start_shard.hash_tree_root_internal()); + result.append(&mut self.previous_shuffling_epoch.hash_tree_root_internal()); + result.append(&mut self.current_shuffling_epoch.hash_tree_root_internal()); + result.append(&mut self.previous_shuffling_seed.hash_tree_root_internal()); + result.append(&mut self.current_shuffling_seed.hash_tree_root_internal()); result.append(&mut self.previous_justified_epoch.hash_tree_root_internal()); result.append(&mut self.justified_epoch.hash_tree_root_internal()); result.append(&mut self.justification_bitfield.hash_tree_root_internal()); result.append(&mut self.finalized_epoch.hash_tree_root_internal()); result.append(&mut self.latest_crosslinks.hash_tree_root_internal()); result.append(&mut self.latest_block_roots.hash_tree_root_internal()); - result.append(&mut self.latest_index_roots.hash_tree_root_internal()); - result.append(&mut self.latest_penalized_balances.hash_tree_root_internal()); + result.append(&mut self.latest_active_index_roots.hash_tree_root_internal()); + result.append(&mut self.latest_slashed_balances.hash_tree_root_internal()); result.append(&mut self.latest_attestations.hash_tree_root_internal()); result.append(&mut self.batched_block_roots.hash_tree_root_internal()); result.append(&mut self.latest_eth1_data.hash_tree_root_internal()); result.append(&mut self.eth1_data_votes.hash_tree_root_internal()); + result.append(&mut self.deposit_index.hash_tree_root_internal()); hash(&result) } } @@ -1014,24 +1312,27 @@ impl TestRandom for BeaconState { validator_balances: <_>::random_for_test(rng), validator_registry_update_epoch: <_>::random_for_test(rng), latest_randao_mixes: <_>::random_for_test(rng), - previous_epoch_start_shard: <_>::random_for_test(rng), - current_epoch_start_shard: <_>::random_for_test(rng), - previous_calculation_epoch: <_>::random_for_test(rng), - current_calculation_epoch: <_>::random_for_test(rng), - previous_epoch_seed: <_>::random_for_test(rng), - current_epoch_seed: <_>::random_for_test(rng), + previous_shuffling_start_shard: <_>::random_for_test(rng), + current_shuffling_start_shard: <_>::random_for_test(rng), + previous_shuffling_epoch: <_>::random_for_test(rng), + current_shuffling_epoch: <_>::random_for_test(rng), + previous_shuffling_seed: <_>::random_for_test(rng), + current_shuffling_seed: <_>::random_for_test(rng), previous_justified_epoch: <_>::random_for_test(rng), justified_epoch: <_>::random_for_test(rng), justification_bitfield: <_>::random_for_test(rng), finalized_epoch: <_>::random_for_test(rng), latest_crosslinks: <_>::random_for_test(rng), latest_block_roots: <_>::random_for_test(rng), - latest_index_roots: <_>::random_for_test(rng), - latest_penalized_balances: <_>::random_for_test(rng), + latest_active_index_roots: <_>::random_for_test(rng), + latest_slashed_balances: <_>::random_for_test(rng), latest_attestations: <_>::random_for_test(rng), batched_block_roots: <_>::random_for_test(rng), latest_eth1_data: <_>::random_for_test(rng), eth1_data_votes: <_>::random_for_test(rng), + deposit_index: <_>::random_for_test(rng), + cache_index_offset: 0, + caches: vec![EpochCache::empty(); CACHED_EPOCHS], } } } diff --git a/eth2/types/src/beacon_state/builder.rs b/eth2/types/src/beacon_state/builder.rs new file mode 100644 index 000000000..0be297db7 --- /dev/null +++ b/eth2/types/src/beacon_state/builder.rs @@ -0,0 +1,263 @@ +use crate::*; +use bls::create_proof_of_possession; + +/// Builds a `BeaconState` for use in testing or benchmarking. +/// +/// Building the `BeaconState` is a three step processes: +/// +/// 1. Create a new `BeaconStateBuilder`. +/// 2. Call `Self::build()` or `Self::build_fast()` generate a `BeaconState`. +/// 3. (Optional) Use builder functions to modify the `BeaconState`. +/// 4. Call `Self::cloned_state()` to obtain a `BeaconState` cloned from this struct. +/// +/// Step (2) happens prior to step (3) because some functionality requires an existing +/// `BeaconState`. +/// +/// Step (4) produces a clone of the BeaconState and doesn't consume the `BeaconStateBuilder` to +/// allow access to `self.keypairs` and `self.spec`. +pub struct BeaconStateBuilder { + pub validator_count: usize, + pub state: Option, + pub genesis_time: u64, + pub latest_eth1_data: Eth1Data, + pub spec: ChainSpec, + pub keypairs: Vec, +} + +impl BeaconStateBuilder { + /// Create a new builder with the given number of validators. + pub fn new(validator_count: usize) -> Self { + let genesis_time = 10_000_000; + + let latest_eth1_data = Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + }; + + let spec = ChainSpec::foundation(); + + Self { + validator_count, + state: None, + genesis_time, + latest_eth1_data, + spec, + keypairs: vec![], + } + } + + /// Builds a `BeaconState` using the `BeaconState::genesis(..)` function. + /// + /// Each validator is assigned a unique, randomly-generated keypair and all + /// proof-of-possessions are verified during genesis. + pub fn build(&mut self) -> Result<(), BeaconStateError> { + self.keypairs = (0..self.validator_count) + .collect::>() + .iter() + .map(|_| Keypair::random()) + .collect(); + + let initial_validator_deposits = self + .keypairs + .iter() + .map(|keypair| Deposit { + branch: vec![], // branch verification is not specified. + index: 0, // index verification is not specified. + deposit_data: DepositData { + amount: 32_000_000_000, // 32 ETH (in Gwei) + timestamp: self.genesis_time - 1, + deposit_input: DepositInput { + pubkey: keypair.pk.clone(), + withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. + proof_of_possession: create_proof_of_possession(&keypair), + }, + }, + }) + .collect(); + + let state = BeaconState::genesis( + self.genesis_time, + initial_validator_deposits, + self.latest_eth1_data.clone(), + &self.spec, + )?; + + self.state = Some(state); + + Ok(()) + } + + /// Builds a `BeaconState` using the `BeaconState::genesis(..)` function, without supplying any + /// validators. Instead validators are added to the state post-genesis. + /// + /// One keypair is randomly generated and all validators are assigned this same keypair. + /// Proof-of-possessions are not created (or validated). + /// + /// This function runs orders of magnitude faster than `Self::build()`, however it will be + /// erroneous for functions which use a validators public key as an identifier (e.g., + /// deposits). + pub fn build_fast(&mut self) -> Result<(), BeaconStateError> { + let common_keypair = Keypair::random(); + + let mut validator_registry = Vec::with_capacity(self.validator_count); + let mut validator_balances = Vec::with_capacity(self.validator_count); + self.keypairs = Vec::with_capacity(self.validator_count); + + for _ in 0..self.validator_count { + self.keypairs.push(common_keypair.clone()); + validator_balances.push(32_000_000_000); + validator_registry.push(Validator { + pubkey: common_keypair.pk.clone(), + withdrawal_credentials: Hash256::zero(), + activation_epoch: self.spec.genesis_epoch, + ..Validator::default() + }) + } + + let state = BeaconState { + validator_registry, + validator_balances, + ..BeaconState::genesis( + self.genesis_time, + vec![], + self.latest_eth1_data.clone(), + &self.spec, + )? + }; + + self.state = Some(state); + + Ok(()) + } + + /// Sets the `BeaconState` to be in the last slot of the given epoch. + /// + /// Sets all justification/finalization parameters to be be as "perfect" as possible (i.e., + /// highest justified and finalized slots, full justification bitfield, etc). + pub fn teleport_to_end_of_epoch(&mut self, epoch: Epoch) { + let state = self.state.as_mut().expect("Genesis required"); + + let slot = epoch.end_slot(self.spec.slots_per_epoch); + + state.slot = slot; + state.validator_registry_update_epoch = epoch - 1; + + state.previous_shuffling_epoch = epoch - 1; + state.current_shuffling_epoch = epoch; + + state.previous_shuffling_seed = Hash256::from_low_u64_le(0); + state.current_shuffling_seed = Hash256::from_low_u64_le(1); + + state.previous_justified_epoch = epoch - 2; + state.justified_epoch = epoch - 1; + state.justification_bitfield = u64::max_value(); + state.finalized_epoch = epoch - 1; + } + + /// Creates a full set of attestations for the `BeaconState`. Each attestation has full + /// participation from its committee and references the expected beacon_block hashes. + /// + /// These attestations should be fully conducive to justification and finalization. + pub fn insert_attestations(&mut self) { + let state = self.state.as_mut().expect("Genesis required"); + + state + .build_epoch_cache(RelativeEpoch::Previous, &self.spec) + .unwrap(); + state + .build_epoch_cache(RelativeEpoch::Current, &self.spec) + .unwrap(); + + let current_epoch = state.current_epoch(&self.spec); + let previous_epoch = state.previous_epoch(&self.spec); + let current_epoch_depth = + (state.slot - current_epoch.end_slot(self.spec.slots_per_epoch)).as_usize(); + + let previous_epoch_slots = previous_epoch.slot_iter(self.spec.slots_per_epoch); + let current_epoch_slots = current_epoch + .slot_iter(self.spec.slots_per_epoch) + .take(current_epoch_depth); + + for slot in previous_epoch_slots.chain(current_epoch_slots) { + let committees = state + .get_crosslink_committees_at_slot(slot, &self.spec) + .unwrap() + .clone(); + + for (committee, shard) in committees { + state + .latest_attestations + .push(committee_to_pending_attestation( + state, &committee, shard, slot, &self.spec, + )) + } + } + } + + /// Returns a cloned `BeaconState`. + pub fn cloned_state(&self) -> BeaconState { + self.state.as_ref().expect("Genesis required").clone() + } +} + +/// Builds a valid PendingAttestation with full participation for some committee. +fn committee_to_pending_attestation( + state: &BeaconState, + committee: &[usize], + shard: u64, + slot: Slot, + spec: &ChainSpec, +) -> PendingAttestation { + let current_epoch = state.current_epoch(spec); + let previous_epoch = state.previous_epoch(spec); + + let mut aggregation_bitfield = Bitfield::new(); + let mut custody_bitfield = Bitfield::new(); + + for (i, _) in committee.iter().enumerate() { + aggregation_bitfield.set(i, true); + custody_bitfield.set(i, true); + } + + let is_previous_epoch = + state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch); + + let justified_epoch = if is_previous_epoch { + state.previous_justified_epoch + } else { + state.justified_epoch + }; + + let epoch_boundary_root = if is_previous_epoch { + *state + .get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap() + } else { + *state + .get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap() + }; + + let justified_block_root = *state + .get_block_root(justified_epoch.start_slot(spec.slots_per_epoch), &spec) + .unwrap(); + + PendingAttestation { + aggregation_bitfield, + data: AttestationData { + slot, + shard, + beacon_block_root: *state.get_block_root(slot, spec).unwrap(), + epoch_boundary_root, + crosslink_data_root: Hash256::zero(), + latest_crosslink: Crosslink { + epoch: slot.epoch(spec.slots_per_epoch), + crosslink_data_root: Hash256::zero(), + }, + justified_epoch, + justified_block_root, + }, + custody_bitfield, + inclusion_slot: slot, + } +} diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs new file mode 100644 index 000000000..bbc991646 --- /dev/null +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -0,0 +1,84 @@ +use super::{AttestationDutyMap, BeaconState, CrosslinkCommittees, Error, ShardCommitteeIndexMap}; +use crate::{ChainSpec, Epoch}; +use log::trace; +use serde_derive::Serialize; +use std::collections::HashMap; + +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct EpochCache { + /// True if this cache has been initialized. + pub initialized: bool, + /// The crosslink committees for an epoch. + pub committees: Vec, + /// Maps validator index to a slot, shard and committee index for attestation. + pub attestation_duty_map: AttestationDutyMap, + /// Maps a shard to an index of `self.committees`. + pub shard_committee_index_map: ShardCommitteeIndexMap, +} + +impl EpochCache { + pub fn empty() -> EpochCache { + EpochCache { + initialized: false, + committees: vec![], + attestation_duty_map: AttestationDutyMap::new(), + shard_committee_index_map: ShardCommitteeIndexMap::new(), + } + } + + pub fn initialized( + state: &BeaconState, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result { + let mut epoch_committees: Vec = + Vec::with_capacity(spec.slots_per_epoch as usize); + let mut attestation_duty_map: AttestationDutyMap = HashMap::new(); + let mut shard_committee_index_map: ShardCommitteeIndexMap = HashMap::new(); + + let shuffling = + state.get_shuffling_for_slot(epoch.start_slot(spec.slots_per_epoch), false, spec)?; + + for (epoch_committeess_index, slot) in epoch.slot_iter(spec.slots_per_epoch).enumerate() { + let slot_committees = state.calculate_crosslink_committees_at_slot( + slot, + false, + shuffling.clone(), + spec, + )?; + + for (slot_committees_index, (committee, shard)) in slot_committees.iter().enumerate() { + // Empty committees are not permitted. + if committee.is_empty() { + return Err(Error::InsufficientValidators); + } + + trace!( + "shard: {}, epoch_i: {}, slot_i: {}", + shard, + epoch_committeess_index, + slot_committees_index + ); + + shard_committee_index_map + .insert(*shard, (epoch_committeess_index, slot_committees_index)); + + for (committee_index, validator_index) in committee.iter().enumerate() { + attestation_duty_map.insert( + *validator_index as u64, + (slot, *shard, committee_index as u64), + ); + } + } + + epoch_committees.push(slot_committees) + } + + Ok(EpochCache { + initialized: true, + committees: epoch_committees, + attestation_duty_map, + shard_committee_index_map, + }) + } +} diff --git a/eth2/types/src/beacon_state/helpers.rs b/eth2/types/src/beacon_state/helpers.rs new file mode 100644 index 000000000..c93b16f76 --- /dev/null +++ b/eth2/types/src/beacon_state/helpers.rs @@ -0,0 +1,20 @@ +use crate::*; + +/// Verify ``bitfield`` against the ``committee_size``. +/// +/// Is title `verify_bitfield` in spec. +/// +/// Spec v0.4.0 +pub fn verify_bitfield_length(bitfield: &Bitfield, committee_size: usize) -> bool { + if bitfield.num_bytes() != ((committee_size + 7) / 8) { + return false; + } + + for i in committee_size..(bitfield.num_bytes() * 8) { + if bitfield.get(i).expect("Impossible due to previous check.") { + return false; + } + } + + true +} diff --git a/eth2/types/src/beacon_state/tests.rs b/eth2/types/src/beacon_state/tests.rs index 7e25d4dba..40bfd146c 100644 --- a/eth2/types/src/beacon_state/tests.rs +++ b/eth2/types/src/beacon_state/tests.rs @@ -2,75 +2,62 @@ use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; -use crate::{ - beacon_state::BeaconStateError, BeaconState, ChainSpec, Deposit, DepositData, DepositInput, - Eth1Data, Hash256, Keypair, -}; -use bls::create_proof_of_possession; +use crate::{BeaconState, ChainSpec}; use ssz::{ssz_encode, Decodable}; -struct BeaconStateTestBuilder { - pub genesis_time: u64, - pub initial_validator_deposits: Vec, - pub latest_eth1_data: Eth1Data, - pub spec: ChainSpec, - pub keypairs: Vec, -} - -impl BeaconStateTestBuilder { - pub fn with_random_validators(validator_count: usize) -> Self { - let genesis_time = 10_000_000; - let keypairs: Vec = (0..validator_count) - .collect::>() - .iter() - .map(|_| Keypair::random()) - .collect(); - let initial_validator_deposits = keypairs - .iter() - .map(|keypair| Deposit { - branch: vec![], // branch verification is not specified. - index: 0, // index verification is not specified. - deposit_data: DepositData { - amount: 32_000_000_000, // 32 ETH (in Gwei) - timestamp: genesis_time - 1, - deposit_input: DepositInput { - pubkey: keypair.pk.clone(), - withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. - proof_of_possession: create_proof_of_possession(&keypair), - }, - }, - }) - .collect(); - let latest_eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - }; - let spec = ChainSpec::foundation(); - - Self { - genesis_time, - initial_validator_deposits, - latest_eth1_data, - spec, - keypairs, - } - } - - pub fn build(&self) -> Result { - BeaconState::genesis( - self.genesis_time, - self.initial_validator_deposits.clone(), - self.latest_eth1_data.clone(), - &self.spec, - ) - } -} - #[test] pub fn can_produce_genesis_block() { - let builder = BeaconStateTestBuilder::with_random_validators(2); + let mut builder = BeaconStateBuilder::new(2); + builder.build().unwrap(); +} + +/// Tests that `get_attestation_participants` is consistent with the result of +/// get_crosslink_committees_at_slot` with a full bitfield. +#[test] +pub fn get_attestation_participants_consistency() { + let mut rng = XorShiftRng::from_seed([42; 16]); + + let mut builder = BeaconStateBuilder::new(8); + builder.spec = ChainSpec::few_validators(); builder.build().unwrap(); + + let mut state = builder.cloned_state(); + let spec = builder.spec.clone(); + + state + .build_epoch_cache(RelativeEpoch::Previous, &spec) + .unwrap(); + state + .build_epoch_cache(RelativeEpoch::Current, &spec) + .unwrap(); + state.build_epoch_cache(RelativeEpoch::Next, &spec).unwrap(); + + for slot in state + .slot + .epoch(spec.slots_per_epoch) + .slot_iter(spec.slots_per_epoch) + { + let committees = state.get_crosslink_committees_at_slot(slot, &spec).unwrap(); + + for (committee, shard) in committees { + let mut attestation_data = AttestationData::random_for_test(&mut rng); + attestation_data.slot = slot; + attestation_data.shard = *shard; + + let mut bitfield = Bitfield::new(); + for (i, _) in committee.iter().enumerate() { + bitfield.set(i, true); + } + + assert_eq!( + state + .get_attestation_participants(&attestation_data, &bitfield, &spec) + .unwrap(), + *committee + ); + } + } } #[test] diff --git a/eth2/types/src/casper_slashing.rs b/eth2/types/src/casper_slashing.rs deleted file mode 100644 index 6346db65c..000000000 --- a/eth2/types/src/casper_slashing.rs +++ /dev/null @@ -1,60 +0,0 @@ -use super::SlashableVoteData; -use crate::test_utils::TestRandom; -use rand::RngCore; -use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; - -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)] -pub struct CasperSlashing { - pub slashable_vote_data_1: SlashableVoteData, - pub slashable_vote_data_2: SlashableVoteData, -} - -impl TreeHash for CasperSlashing { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.slashable_vote_data_1.hash_tree_root_internal()); - result.append(&mut self.slashable_vote_data_2.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for CasperSlashing { - fn random_for_test(rng: &mut T) -> Self { - Self { - slashable_vote_data_1: <_>::random_for_test(rng), - slashable_vote_data_2: <_>::random_for_test(rng), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; - - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = CasperSlashing::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = CasperSlashing::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } -} diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index b5d5689e3..789bb6c0c 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -1,11 +1,20 @@ -use crate::{Address, Epoch, Hash256, Slot}; +use crate::{Address, Epoch, Fork, Hash256, Slot}; use bls::Signature; const GWEI: u64 = 1_000_000_000; +pub enum Domain { + Deposit, + Attestation, + Proposal, + Exit, + Randao, + Transfer, +} + /// Holds all the "constants" for a BeaconChain. /// -/// Spec v0.2.0 +/// Spec v0.4.0 #[derive(PartialEq, Debug, Clone)] pub struct ChainSpec { /* @@ -16,7 +25,7 @@ pub struct ChainSpec { pub max_balance_churn_quotient: u64, pub beacon_chain_shard_number: u64, pub max_indices_per_slashable_vote: u64, - pub max_withdrawals_per_epoch: u64, + pub max_exit_dequeues_per_epoch: u64, pub shuffle_round_count: u8, /* @@ -48,29 +57,30 @@ pub struct ChainSpec { /* * Time parameters */ - pub slot_duration: u64, + pub seconds_per_slot: u64, pub min_attestation_inclusion_delay: u64, - pub epoch_length: u64, - pub seed_lookahead: Epoch, - pub entry_exit_delay: u64, - pub eth1_data_voting_period: u64, - pub min_validator_withdrawal_epochs: Epoch, + pub slots_per_epoch: u64, + pub min_seed_lookahead: Epoch, + pub activation_exit_delay: u64, + pub epochs_per_eth1_voting_period: u64, + pub min_validator_withdrawability_delay: Epoch, /* * State list lengths */ pub latest_block_roots_length: usize, pub latest_randao_mixes_length: usize, - pub latest_index_roots_length: usize, - pub latest_penalized_exit_length: usize, + pub latest_active_index_roots_length: usize, + pub latest_slashed_exit_length: usize, /* * Reward and penalty quotients */ pub base_reward_quotient: u64, pub whistleblower_reward_quotient: u64, - pub includer_reward_quotient: u64, + pub attestation_inclusion_reward_quotient: u64, pub inactivity_penalty_quotient: u64, + pub min_penalty_quotient: u64, /* * Max operations per block @@ -79,29 +89,63 @@ pub struct ChainSpec { pub max_attester_slashings: u64, pub max_attestations: u64, pub max_deposits: u64, - pub max_exits: u64, + pub max_voluntary_exits: u64, + pub max_transfers: u64, /* * Signature domains + * + * Fields should be private to prevent accessing a domain that hasn't been modified to suit + * some `Fork`. + * + * Use `ChainSpec::get_domain(..)` to access these values. */ - pub domain_deposit: u64, - pub domain_attestation: u64, - pub domain_proposal: u64, - pub domain_exit: u64, - pub domain_randao: u64, + domain_deposit: u64, + domain_attestation: u64, + domain_proposal: u64, + domain_exit: u64, + domain_randao: u64, + domain_transfer: u64, } impl ChainSpec { - /// Returns a `ChainSpec` compatible with the specification from Ethereum Foundation. + /// Return the number of committees in one epoch. /// - /// Of course, the actual foundation specs are unknown at this point so these are just a rough - /// estimate. + /// Spec v0.4.0 + pub fn get_epoch_committee_count(&self, active_validator_count: usize) -> u64 { + std::cmp::max( + 1, + std::cmp::min( + self.shard_count / self.slots_per_epoch, + active_validator_count as u64 / self.slots_per_epoch / self.target_committee_size, + ), + ) * self.slots_per_epoch + } + + /// Get the domain number that represents the fork meta and signature domain. /// - /// Spec v0.2.0 + /// Spec v0.4.0 + pub fn get_domain(&self, epoch: Epoch, domain: Domain, fork: &Fork) -> u64 { + let domain_constant = match domain { + Domain::Deposit => self.domain_deposit, + Domain::Attestation => self.domain_attestation, + Domain::Proposal => self.domain_proposal, + Domain::Exit => self.domain_exit, + Domain::Randao => self.domain_randao, + Domain::Transfer => self.domain_transfer, + }; + + let fork_version = fork.get_fork_version(epoch); + fork_version * u64::pow(2, 32) + domain_constant + } + + /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. + /// + /// Spec v0.4.0 pub fn foundation() -> Self { - let genesis_slot = Slot::new(2_u64.pow(19)); - let epoch_length = 64; - let genesis_epoch = genesis_slot.epoch(epoch_length); + let genesis_slot = Slot::new(2_u64.pow(32)); + let slots_per_epoch = 64; + let genesis_epoch = genesis_slot.epoch(slots_per_epoch); Self { /* @@ -112,7 +156,7 @@ impl ChainSpec { max_balance_churn_quotient: 32, beacon_chain_shard_number: u64::max_value(), max_indices_per_slashable_vote: 4_096, - max_withdrawals_per_epoch: 4, + max_exit_dequeues_per_epoch: 4, shuffle_round_count: 90, /* @@ -133,7 +177,7 @@ impl ChainSpec { * Initial Values */ genesis_fork_version: 0, - genesis_slot: Slot::new(2_u64.pow(19)), + genesis_slot, genesis_epoch, genesis_start_shard: 0, far_future_epoch: Epoch::new(u64::max_value()), @@ -144,29 +188,30 @@ impl ChainSpec { /* * Time parameters */ - slot_duration: 6, + seconds_per_slot: 6, min_attestation_inclusion_delay: 4, - epoch_length, - seed_lookahead: Epoch::new(1), - entry_exit_delay: 4, - eth1_data_voting_period: 16, - min_validator_withdrawal_epochs: Epoch::new(256), + slots_per_epoch, + min_seed_lookahead: Epoch::new(1), + activation_exit_delay: 4, + epochs_per_eth1_voting_period: 16, + min_validator_withdrawability_delay: Epoch::new(256), /* * State list lengths */ latest_block_roots_length: 8_192, latest_randao_mixes_length: 8_192, - latest_index_roots_length: 8_192, - latest_penalized_exit_length: 8_192, + latest_active_index_roots_length: 8_192, + latest_slashed_exit_length: 8_192, /* * Reward and penalty quotients */ base_reward_quotient: 32, whistleblower_reward_quotient: 512, - includer_reward_quotient: 8, + attestation_inclusion_reward_quotient: 8, inactivity_penalty_quotient: 16_777_216, + min_penalty_quotient: 32, /* * Max operations per block @@ -175,7 +220,8 @@ impl ChainSpec { max_attester_slashings: 1, max_attestations: 128, max_deposits: 16, - max_exits: 16, + max_voluntary_exits: 16, + max_transfers: 16, /* * Signature domains @@ -185,25 +231,24 @@ impl ChainSpec { domain_proposal: 2, domain_exit: 3, domain_randao: 4, + domain_transfer: 5, } } -} -impl ChainSpec { /// Returns a `ChainSpec` compatible with the specification suitable for 8 validators. /// - /// Spec v0.2.0 + /// Spec v0.4.0 pub fn few_validators() -> Self { - let genesis_slot = Slot::new(2_u64.pow(19)); - let epoch_length = 8; - let genesis_epoch = genesis_slot.epoch(epoch_length); + let genesis_slot = Slot::new(2_u64.pow(32)); + let slots_per_epoch = 8; + let genesis_epoch = genesis_slot.epoch(slots_per_epoch); Self { - shard_count: 1, + shard_count: 8, target_committee_size: 1, genesis_slot, genesis_epoch, - epoch_length, + slots_per_epoch, ..ChainSpec::foundation() } } diff --git a/eth2/types/src/crosslink.rs b/eth2/types/src/crosslink.rs index 19c71f604..f49195a75 100644 --- a/eth2/types/src/crosslink.rs +++ b/eth2/types/src/crosslink.rs @@ -2,48 +2,25 @@ use crate::test_utils::TestRandom; use crate::{Epoch, Hash256}; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; -#[derive(Debug, Clone, PartialEq, Default, Serialize, Hash, Encode, Decode)] +/// Specifies the block hash for a shard at an epoch. +/// +/// Spec v0.4.0 +#[derive( + Debug, Clone, PartialEq, Default, Serialize, Hash, Encode, Decode, TreeHash, TestRandom, +)] pub struct Crosslink { pub epoch: Epoch, - pub shard_block_root: Hash256, -} - -impl Crosslink { - /// Generates a new instance where `dynasty` and `hash` are both zero. - pub fn zero() -> Self { - Self { - epoch: Epoch::new(0), - shard_block_root: Hash256::zero(), - } - } -} - -impl TreeHash for Crosslink { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.epoch.hash_tree_root_internal()); - result.append(&mut self.shard_block_root.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for Crosslink { - fn random_for_test(rng: &mut T) -> Self { - Self { - epoch: <_>::random_for_test(rng), - shard_block_root: <_>::random_for_test(rng), - } - } + pub crosslink_data_root: Hash256, } #[cfg(test)] mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/deposit.rs b/eth2/types/src/deposit.rs index 78f43532a..2e69ea599 100644 --- a/eth2/types/src/deposit.rs +++ b/eth2/types/src/deposit.rs @@ -2,41 +2,24 @@ use super::{DepositData, Hash256}; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)] +/// A deposit to potentially become a beacon chain validator. +/// +/// Spec v0.4.0 +#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] pub struct Deposit { pub branch: Vec, pub index: u64, pub deposit_data: DepositData, } -impl TreeHash for Deposit { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.branch.hash_tree_root_internal()); - result.append(&mut self.index.hash_tree_root_internal()); - result.append(&mut self.deposit_data.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for Deposit { - fn random_for_test(rng: &mut T) -> Self { - Self { - branch: <_>::random_for_test(rng), - index: <_>::random_for_test(rng), - deposit_data: <_>::random_for_test(rng), - } - } -} - #[cfg(test)] mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/deposit_data.rs b/eth2/types/src/deposit_data.rs index 8f49deb3c..1eb2722a9 100644 --- a/eth2/types/src/deposit_data.rs +++ b/eth2/types/src/deposit_data.rs @@ -2,41 +2,24 @@ use super::DepositInput; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)] +/// Data generated by the deposit contract. +/// +/// Spec v0.4.0 +#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] pub struct DepositData { pub amount: u64, pub timestamp: u64, pub deposit_input: DepositInput, } -impl TreeHash for DepositData { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.amount.hash_tree_root_internal()); - result.append(&mut self.timestamp.hash_tree_root_internal()); - result.append(&mut self.deposit_input.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for DepositData { - fn random_for_test(rng: &mut T) -> Self { - Self { - amount: <_>::random_for_test(rng), - timestamp: <_>::random_for_test(rng), - deposit_input: <_>::random_for_test(rng), - } - } -} - #[cfg(test)] mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/deposit_input.rs b/eth2/types/src/deposit_input.rs index 7556fc2ca..c4c79c3d1 100644 --- a/eth2/types/src/deposit_input.rs +++ b/eth2/types/src/deposit_input.rs @@ -3,41 +3,24 @@ use crate::test_utils::TestRandom; use bls::{PublicKey, Signature}; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)] +/// The data supplied by the user to the deposit contract. +/// +/// Spec v0.4.0 +#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] pub struct DepositInput { pub pubkey: PublicKey, pub withdrawal_credentials: Hash256, pub proof_of_possession: Signature, } -impl TreeHash for DepositInput { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.pubkey.hash_tree_root_internal()); - result.append(&mut self.withdrawal_credentials.hash_tree_root_internal()); - result.append(&mut self.proof_of_possession.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for DepositInput { - fn random_for_test(rng: &mut T) -> Self { - Self { - pubkey: <_>::random_for_test(rng), - withdrawal_credentials: <_>::random_for_test(rng), - proof_of_possession: <_>::random_for_test(rng), - } - } -} - #[cfg(test)] mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/eth1_data.rs b/eth2/types/src/eth1_data.rs index b0dc14e7a..2c817ca38 100644 --- a/eth2/types/src/eth1_data.rs +++ b/eth2/types/src/eth1_data.rs @@ -2,39 +2,23 @@ use super::Hash256; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; -// Note: this is refer to as DepositRootVote in specs -#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode)] +/// Contains data obtained from the Eth1 chain. +/// +/// Spec v0.4.0 +#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] pub struct Eth1Data { pub deposit_root: Hash256, pub block_hash: Hash256, } -impl TreeHash for Eth1Data { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.deposit_root.hash_tree_root_internal()); - result.append(&mut self.block_hash.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for Eth1Data { - fn random_for_test(rng: &mut T) -> Self { - Self { - deposit_root: <_>::random_for_test(rng), - block_hash: <_>::random_for_test(rng), - } - } -} - #[cfg(test)] mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/eth1_data_vote.rs b/eth2/types/src/eth1_data_vote.rs index eda6e6a6a..898145575 100644 --- a/eth2/types/src/eth1_data_vote.rs +++ b/eth2/types/src/eth1_data_vote.rs @@ -2,39 +2,23 @@ use super::Eth1Data; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; -// Note: this is refer to as DepositRootVote in specs -#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode)] +/// A summation of votes for some `Eth1Data`. +/// +/// Spec v0.4.0 +#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] pub struct Eth1DataVote { pub eth1_data: Eth1Data, pub vote_count: u64, } -impl TreeHash for Eth1DataVote { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.eth1_data.hash_tree_root_internal()); - result.append(&mut self.vote_count.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for Eth1DataVote { - fn random_for_test(rng: &mut T) -> Self { - Self { - eth1_data: <_>::random_for_test(rng), - vote_count: <_>::random_for_test(rng), - } - } -} - #[cfg(test)] mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/exit.rs b/eth2/types/src/exit.rs deleted file mode 100644 index 18d743b83..000000000 --- a/eth2/types/src/exit.rs +++ /dev/null @@ -1,63 +0,0 @@ -use crate::{test_utils::TestRandom, Epoch}; -use bls::Signature; -use rand::RngCore; -use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; - -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)] -pub struct Exit { - pub epoch: Epoch, - pub validator_index: u64, - pub signature: Signature, -} - -impl TreeHash for Exit { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.epoch.hash_tree_root_internal()); - result.append(&mut self.validator_index.hash_tree_root_internal()); - result.append(&mut self.signature.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for Exit { - fn random_for_test(rng: &mut T) -> Self { - Self { - epoch: <_>::random_for_test(rng), - validator_index: <_>::random_for_test(rng), - signature: <_>::random_for_test(rng), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; - - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Exit::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Exit::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } -} diff --git a/eth2/types/src/fork.rs b/eth2/types/src/fork.rs index 85d530e19..0acd6da90 100644 --- a/eth2/types/src/fork.rs +++ b/eth2/types/src/fork.rs @@ -1,33 +1,28 @@ use crate::{test_utils::TestRandom, Epoch}; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; -#[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode)] +/// Specifies a fork of the `BeaconChain`, to prevent replay attacks. +/// +/// Spec v0.4.0 +#[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] pub struct Fork { pub previous_version: u64, pub current_version: u64, pub epoch: Epoch, } -impl TreeHash for Fork { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.previous_version.hash_tree_root_internal()); - result.append(&mut self.current_version.hash_tree_root_internal()); - result.append(&mut self.epoch.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for Fork { - fn random_for_test(rng: &mut T) -> Self { - Self { - previous_version: <_>::random_for_test(rng), - current_version: <_>::random_for_test(rng), - epoch: <_>::random_for_test(rng), +impl Fork { + /// Return the fork version of the given ``epoch``. + /// + /// Spec v0.4.0 + pub fn get_fork_version(&self, epoch: Epoch) -> u64 { + if epoch < self.epoch { + return self.previous_version; } + self.current_version } } @@ -35,7 +30,7 @@ impl TestRandom for Fork { mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index f2c128440..9bf60f2c9 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -7,7 +7,6 @@ pub mod attester_slashing; pub mod beacon_block; pub mod beacon_block_body; pub mod beacon_state; -pub mod casper_slashing; pub mod chain_spec; pub mod crosslink; pub mod deposit; @@ -15,23 +14,22 @@ pub mod deposit_data; pub mod deposit_input; pub mod eth1_data; pub mod eth1_data_vote; -pub mod exit; pub mod fork; pub mod free_attestation; pub mod pending_attestation; -pub mod proposal_signed_data; +pub mod proposal; pub mod proposer_slashing; pub mod readers; pub mod shard_reassignment_record; pub mod slashable_attestation; -pub mod slashable_vote_data; +pub mod transfer; +pub mod voluntary_exit; #[macro_use] pub mod slot_epoch_macros; pub mod slot_epoch; pub mod slot_height; pub mod validator; pub mod validator_registry; -pub mod validator_registry_delta_block; use ethereum_types::{H160, H256, U256}; use std::collections::HashMap; @@ -42,27 +40,25 @@ pub use crate::attestation_data_and_custody_bit::AttestationDataAndCustodyBit; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::BeaconBlock; pub use crate::beacon_block_body::BeaconBlockBody; -pub use crate::beacon_state::BeaconState; -pub use crate::casper_slashing::CasperSlashing; -pub use crate::chain_spec::ChainSpec; +pub use crate::beacon_state::{BeaconState, Error as BeaconStateError, RelativeEpoch}; +pub use crate::chain_spec::{ChainSpec, Domain}; pub use crate::crosslink::Crosslink; pub use crate::deposit::Deposit; pub use crate::deposit_data::DepositData; pub use crate::deposit_input::DepositInput; pub use crate::eth1_data::Eth1Data; pub use crate::eth1_data_vote::Eth1DataVote; -pub use crate::exit::Exit; pub use crate::fork::Fork; pub use crate::free_attestation::FreeAttestation; pub use crate::pending_attestation::PendingAttestation; -pub use crate::proposal_signed_data::ProposalSignedData; +pub use crate::proposal::Proposal; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::slashable_attestation::SlashableAttestation; -pub use crate::slashable_vote_data::SlashableVoteData; pub use crate::slot_epoch::{Epoch, Slot}; pub use crate::slot_height::SlotHeight; -pub use crate::validator::{StatusFlags as ValidatorStatusFlags, Validator}; -pub use crate::validator_registry_delta_block::ValidatorRegistryDeltaBlock; +pub use crate::transfer::Transfer; +pub use crate::validator::Validator; +pub use crate::voluntary_exit::VoluntaryExit; pub type Hash256 = H256; pub type Address = H160; diff --git a/eth2/types/src/pending_attestation.rs b/eth2/types/src/pending_attestation.rs index 42f990210..0430d18ba 100644 --- a/eth2/types/src/pending_attestation.rs +++ b/eth2/types/src/pending_attestation.rs @@ -2,10 +2,13 @@ use crate::test_utils::TestRandom; use crate::{AttestationData, Bitfield, Slot}; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; -#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode)] +/// An attestation that has been included in the state but not yet fully processed. +/// +/// Spec v0.4.0 +#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TreeHash, TestRandom)] pub struct PendingAttestation { pub aggregation_bitfield: Bitfield, pub data: AttestationData, @@ -13,33 +16,11 @@ pub struct PendingAttestation { pub inclusion_slot: Slot, } -impl TreeHash for PendingAttestation { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.aggregation_bitfield.hash_tree_root_internal()); - result.append(&mut self.data.hash_tree_root_internal()); - result.append(&mut self.custody_bitfield.hash_tree_root_internal()); - result.append(&mut self.inclusion_slot.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for PendingAttestation { - fn random_for_test(rng: &mut T) -> Self { - Self { - data: <_>::random_for_test(rng), - aggregation_bitfield: <_>::random_for_test(rng), - custody_bitfield: <_>::random_for_test(rng), - inclusion_slot: <_>::random_for_test(rng), - } - } -} - #[cfg(test)] mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/proposal.rs b/eth2/types/src/proposal.rs new file mode 100644 index 000000000..b1fd737a0 --- /dev/null +++ b/eth2/types/src/proposal.rs @@ -0,0 +1,78 @@ +use crate::test_utils::TestRandom; +use crate::{Hash256, Slot}; +use bls::Signature; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::TreeHash; +use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use test_random_derive::TestRandom; + +/// A proposal for some shard or beacon block. +/// +/// Spec v0.4.0 +#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)] +pub struct Proposal { + pub slot: Slot, + /// Shard number (spec.beacon_chain_shard_number for beacon chain) + pub shard: u64, + pub block_root: Hash256, + pub signature: Signature, +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::{ssz_encode, Decodable, SignedRoot, TreeHash}; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Proposal::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root_internal() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Proposal::random_for_test(&mut rng); + + let result = original.hash_tree_root_internal(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } + + #[derive(TreeHash)] + struct SignedProposal { + pub slot: Slot, + pub shard: u64, + pub block_root: Hash256, + } + + impl Into for Proposal { + fn into(self) -> SignedProposal { + SignedProposal { + slot: self.slot, + shard: self.shard, + block_root: self.block_root, + } + } + } + + #[test] + pub fn test_signed_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Proposal::random_for_test(&mut rng); + + let other: SignedProposal = original.clone().into(); + + assert_eq!(original.signed_root(), other.hash_tree_root()); + } + +} diff --git a/eth2/types/src/proposal_signed_data.rs b/eth2/types/src/proposal_signed_data.rs index 63c0f1ce6..58f45a41d 100644 --- a/eth2/types/src/proposal_signed_data.rs +++ b/eth2/types/src/proposal_signed_data.rs @@ -2,62 +2,19 @@ use crate::test_utils::TestRandom; use crate::{Hash256, Slot}; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; -#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode)] +#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] pub struct ProposalSignedData { pub slot: Slot, pub shard: u64, pub block_root: Hash256, } -impl TreeHash for ProposalSignedData { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.slot.hash_tree_root_internal()); - result.append(&mut self.shard.hash_tree_root_internal()); - result.append(&mut self.block_root.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for ProposalSignedData { - fn random_for_test(rng: &mut T) -> Self { - Self { - slot: <_>::random_for_test(rng), - shard: <_>::random_for_test(rng), - block_root: <_>::random_for_test(rng), - } - } -} - #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = ProposalSignedData::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = ProposalSignedData::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(ProposalSignedData); } diff --git a/eth2/types/src/proposer_slashing.rs b/eth2/types/src/proposer_slashing.rs index b3a819a7f..f86e7f3a8 100644 --- a/eth2/types/src/proposer_slashing.rs +++ b/eth2/types/src/proposer_slashing.rs @@ -1,49 +1,29 @@ -use super::ProposalSignedData; +use super::Proposal; use crate::test_utils::TestRandom; -use bls::Signature; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)] +mod builder; + +pub use builder::ProposerSlashingBuilder; + +/// Two conflicting proposals from the same proposer (validator). +/// +/// Spec v0.4.0 +#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] pub struct ProposerSlashing { pub proposer_index: u64, - pub proposal_data_1: ProposalSignedData, - pub proposal_signature_1: Signature, - pub proposal_data_2: ProposalSignedData, - pub proposal_signature_2: Signature, -} - -impl TreeHash for ProposerSlashing { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.proposer_index.hash_tree_root_internal()); - result.append(&mut self.proposal_data_1.hash_tree_root_internal()); - result.append(&mut self.proposal_signature_1.hash_tree_root_internal()); - result.append(&mut self.proposal_data_2.hash_tree_root_internal()); - result.append(&mut self.proposal_signature_2.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for ProposerSlashing { - fn random_for_test(rng: &mut T) -> Self { - Self { - proposer_index: <_>::random_for_test(rng), - proposal_data_1: <_>::random_for_test(rng), - proposal_signature_1: <_>::random_for_test(rng), - proposal_data_2: <_>::random_for_test(rng), - proposal_signature_2: <_>::random_for_test(rng), - } - } + pub proposal_1: Proposal, + pub proposal_2: Proposal, } #[cfg(test)] mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/proposer_slashing/builder.rs b/eth2/types/src/proposer_slashing/builder.rs new file mode 100644 index 000000000..472a76ec1 --- /dev/null +++ b/eth2/types/src/proposer_slashing/builder.rs @@ -0,0 +1,57 @@ +use crate::*; +use ssz::SignedRoot; + +/// Builds a `ProposerSlashing`. +pub struct ProposerSlashingBuilder(); + +impl ProposerSlashingBuilder { + /// Builds a `ProposerSlashing` that is a double vote. + /// + /// The `signer` function is used to sign the double-vote and accepts: + /// + /// - `validator_index: u64` + /// - `message: &[u8]` + /// - `epoch: Epoch` + /// - `domain: Domain` + /// + /// Where domain is a domain "constant" (e.g., `spec.domain_attestation`). + pub fn double_vote(proposer_index: u64, signer: F, spec: &ChainSpec) -> ProposerSlashing + where + F: Fn(u64, &[u8], Epoch, Domain) -> Signature, + { + let slot = Slot::new(0); + let shard = 0; + + let mut proposal_1 = Proposal { + slot, + shard, + block_root: Hash256::from_low_u64_le(1), + signature: Signature::empty_signature(), + }; + + let mut proposal_2 = Proposal { + slot, + shard, + block_root: Hash256::from_low_u64_le(2), + signature: Signature::empty_signature(), + }; + + proposal_1.signature = { + let message = proposal_1.signed_root(); + let epoch = slot.epoch(spec.slots_per_epoch); + signer(proposer_index, &message[..], epoch, Domain::Proposal) + }; + + proposal_2.signature = { + let message = proposal_2.signed_root(); + let epoch = slot.epoch(spec.slots_per_epoch); + signer(proposer_index, &message[..], epoch, Domain::Proposal) + }; + + ProposerSlashing { + proposer_index, + proposal_1, + proposal_2, + } + } +} diff --git a/eth2/types/src/readers/block_reader.rs b/eth2/types/src/readers/block_reader.rs index bcb2d0e63..93157a1a3 100644 --- a/eth2/types/src/readers/block_reader.rs +++ b/eth2/types/src/readers/block_reader.rs @@ -13,7 +13,6 @@ pub trait BeaconBlockReader: Debug + PartialEq { fn slot(&self) -> Slot; fn parent_root(&self) -> Hash256; fn state_root(&self) -> Hash256; - fn canonical_root(&self) -> Hash256; fn into_beacon_block(self) -> Option; } @@ -30,10 +29,6 @@ impl BeaconBlockReader for BeaconBlock { self.state_root } - fn canonical_root(&self) -> Hash256 { - self.canonical_root() - } - fn into_beacon_block(self) -> Option { Some(self) } diff --git a/eth2/types/src/readers/state_reader.rs b/eth2/types/src/readers/state_reader.rs index 92a870855..e469bee57 100644 --- a/eth2/types/src/readers/state_reader.rs +++ b/eth2/types/src/readers/state_reader.rs @@ -1,4 +1,4 @@ -use crate::{BeaconState, Hash256, Slot}; +use crate::{BeaconState, Slot}; use std::fmt::Debug; /// The `BeaconStateReader` provides interfaces for reading a subset of fields of a `BeaconState`. @@ -11,7 +11,6 @@ use std::fmt::Debug; /// "future proofing". pub trait BeaconStateReader: Debug + PartialEq { fn slot(&self) -> Slot; - fn canonical_root(&self) -> Hash256; fn into_beacon_state(self) -> Option; } @@ -20,10 +19,6 @@ impl BeaconStateReader for BeaconState { self.slot } - fn canonical_root(&self) -> Hash256 { - self.canonical_root() - } - fn into_beacon_state(self) -> Option { Some(self) } diff --git a/eth2/types/src/shard_reassignment_record.rs b/eth2/types/src/shard_reassignment_record.rs index 511fe13ca..f5dfa8676 100644 --- a/eth2/types/src/shard_reassignment_record.rs +++ b/eth2/types/src/shard_reassignment_record.rs @@ -1,41 +1,21 @@ use crate::{test_utils::TestRandom, Slot}; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)] +#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] pub struct ShardReassignmentRecord { pub validator_index: u64, pub shard: u64, pub slot: Slot, } -impl TreeHash for ShardReassignmentRecord { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.validator_index.hash_tree_root_internal()); - result.append(&mut self.shard.hash_tree_root_internal()); - result.append(&mut self.slot.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for ShardReassignmentRecord { - fn random_for_test(rng: &mut T) -> Self { - Self { - validator_index: <_>::random_for_test(rng), - shard: <_>::random_for_test(rng), - slot: <_>::random_for_test(rng), - } - } -} - #[cfg(test)] mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/slashable_attestation.rs b/eth2/types/src/slashable_attestation.rs index 676954ec2..20ba76cdb 100644 --- a/eth2/types/src/slashable_attestation.rs +++ b/eth2/types/src/slashable_attestation.rs @@ -1,44 +1,124 @@ -use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, Bitfield}; +use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, Bitfield, ChainSpec}; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz::TreeHash; +use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use test_random_derive::TestRandom; -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)] +/// Details an attestation that can be slashable. +/// +/// To be included in an `AttesterSlashing`. +/// +/// Spec v0.4.0 +#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)] pub struct SlashableAttestation { + /// Lists validator registry indices, not committee indices. pub validator_indices: Vec, pub data: AttestationData, pub custody_bitfield: Bitfield, pub aggregate_signature: AggregateSignature, } -impl TreeHash for SlashableAttestation { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.validator_indices.hash_tree_root_internal()); - result.append(&mut self.data.hash_tree_root_internal()); - result.append(&mut self.custody_bitfield.hash_tree_root_internal()); - result.append(&mut self.aggregate_signature.hash_tree_root_internal()); - hash(&result) +impl SlashableAttestation { + /// Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target. + /// + /// Spec v0.4.0 + pub fn is_double_vote(&self, other: &SlashableAttestation, spec: &ChainSpec) -> bool { + self.data.slot.epoch(spec.slots_per_epoch) == other.data.slot.epoch(spec.slots_per_epoch) } -} -impl TestRandom for SlashableAttestation { - fn random_for_test(rng: &mut T) -> Self { - Self { - validator_indices: <_>::random_for_test(rng), - data: <_>::random_for_test(rng), - custody_bitfield: <_>::random_for_test(rng), - aggregate_signature: <_>::random_for_test(rng), - } + /// Check if ``attestation_data_1`` surrounds ``attestation_data_2``. + /// + /// Spec v0.4.0 + pub fn is_surround_vote(&self, other: &SlashableAttestation, spec: &ChainSpec) -> bool { + let source_epoch_1 = self.data.justified_epoch; + let source_epoch_2 = other.data.justified_epoch; + let target_epoch_1 = self.data.slot.epoch(spec.slots_per_epoch); + let target_epoch_2 = other.data.slot.epoch(spec.slots_per_epoch); + + (source_epoch_1 < source_epoch_2) & (target_epoch_2 < target_epoch_1) } } #[cfg(test)] mod tests { use super::*; + use crate::chain_spec::ChainSpec; + use crate::slot_epoch::{Epoch, Slot}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; + use ssz::{ssz_encode, Decodable, TreeHash}; + + #[test] + pub fn test_is_double_vote_true() { + let spec = ChainSpec::foundation(); + let slashable_vote_first = create_slashable_attestation(1, 1, &spec); + let slashable_vote_second = create_slashable_attestation(1, 1, &spec); + + assert_eq!( + slashable_vote_first.is_double_vote(&slashable_vote_second, &spec), + true + ) + } + + #[test] + pub fn test_is_double_vote_false() { + let spec = ChainSpec::foundation(); + let slashable_vote_first = create_slashable_attestation(1, 1, &spec); + let slashable_vote_second = create_slashable_attestation(2, 1, &spec); + + assert_eq!( + slashable_vote_first.is_double_vote(&slashable_vote_second, &spec), + false + ); + } + + #[test] + pub fn test_is_surround_vote_true() { + let spec = ChainSpec::foundation(); + let slashable_vote_first = create_slashable_attestation(2, 1, &spec); + let slashable_vote_second = create_slashable_attestation(1, 2, &spec); + + assert_eq!( + slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec), + true + ); + } + + #[test] + pub fn test_is_surround_vote_true_realistic() { + let spec = ChainSpec::foundation(); + let slashable_vote_first = create_slashable_attestation(4, 1, &spec); + let slashable_vote_second = create_slashable_attestation(3, 2, &spec); + + assert_eq!( + slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec), + true + ); + } + + #[test] + pub fn test_is_surround_vote_false_source_epoch_fails() { + let spec = ChainSpec::foundation(); + let slashable_vote_first = create_slashable_attestation(2, 2, &spec); + let slashable_vote_second = create_slashable_attestation(1, 1, &spec); + + assert_eq!( + slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec), + false + ); + } + + #[test] + pub fn test_is_surround_vote_false_target_epoch_fails() { + let spec = ChainSpec::foundation(); + let slashable_vote_first = create_slashable_attestation(1, 1, &spec); + let slashable_vote_second = create_slashable_attestation(2, 2, &spec); + + assert_eq!( + slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec), + false + ); + } #[test] pub fn test_ssz_round_trip() { @@ -62,4 +142,17 @@ mod tests { // TODO: Add further tests // https://github.com/sigp/lighthouse/issues/170 } + + fn create_slashable_attestation( + slot_factor: u64, + justified_epoch: u64, + spec: &ChainSpec, + ) -> SlashableAttestation { + let mut rng = XorShiftRng::from_seed([42; 16]); + let mut slashable_vote = SlashableAttestation::random_for_test(&mut rng); + + slashable_vote.data.slot = Slot::new(slot_factor * spec.slots_per_epoch); + slashable_vote.data.justified_epoch = Epoch::new(justified_epoch); + slashable_vote + } } diff --git a/eth2/types/src/slashable_vote_data.rs b/eth2/types/src/slashable_vote_data.rs index bdd1d0619..73cf91c61 100644 --- a/eth2/types/src/slashable_vote_data.rs +++ b/eth2/types/src/slashable_vote_data.rs @@ -4,10 +4,10 @@ use crate::test_utils::TestRandom; use bls::AggregateSignature; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)] +#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] pub struct SlashableVoteData { pub custody_bit_0_indices: Vec, pub custody_bit_1_indices: Vec, @@ -36,35 +36,12 @@ impl SlashableVoteData { } } -impl TreeHash for SlashableVoteData { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.custody_bit_0_indices.hash_tree_root_internal()); - result.append(&mut self.custody_bit_1_indices.hash_tree_root_internal()); - result.append(&mut self.data.hash_tree_root_internal()); - result.append(&mut self.aggregate_signature.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for SlashableVoteData { - fn random_for_test(rng: &mut T) -> Self { - Self { - custody_bit_0_indices: <_>::random_for_test(rng), - custody_bit_1_indices: <_>::random_for_test(rng), - data: <_>::random_for_test(rng), - aggregate_signature: <_>::random_for_test(rng), - } - } -} - #[cfg(test)] mod tests { use super::*; use crate::chain_spec::ChainSpec; use crate::slot_epoch::{Epoch, Slot}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; #[test] pub fn test_is_double_vote_true() { @@ -138,28 +115,7 @@ mod tests { ); } - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = SlashableVoteData::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = SlashableVoteData::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(SlashableVoteData); fn create_slashable_vote_data( slot_factor: u64, diff --git a/eth2/types/src/slot_epoch.rs b/eth2/types/src/slot_epoch.rs index eb5a8dced..7753027a6 100644 --- a/eth2/types/src/slot_epoch.rs +++ b/eth2/types/src/slot_epoch.rs @@ -35,8 +35,8 @@ impl Slot { Slot(slot) } - pub fn epoch(self, epoch_length: u64) -> Epoch { - Epoch::from(self.0 / epoch_length) + pub fn epoch(self, slots_per_epoch: u64) -> Epoch { + Epoch::from(self.0 / slots_per_epoch) } pub fn height(self, genesis_slot: Slot) -> SlotHeight { @@ -57,44 +57,45 @@ impl Epoch { Epoch(u64::max_value()) } - pub fn start_slot(self, epoch_length: u64) -> Slot { - Slot::from(self.0.saturating_mul(epoch_length)) + pub fn start_slot(self, slots_per_epoch: u64) -> Slot { + Slot::from(self.0.saturating_mul(slots_per_epoch)) } - pub fn end_slot(self, epoch_length: u64) -> Slot { + pub fn end_slot(self, slots_per_epoch: u64) -> Slot { Slot::from( self.0 .saturating_add(1) - .saturating_mul(epoch_length) + .saturating_mul(slots_per_epoch) .saturating_sub(1), ) } - pub fn slot_iter(&self, epoch_length: u64) -> SlotIter { + pub fn slot_iter(&self, slots_per_epoch: u64) -> SlotIter { SlotIter { - current: self.start_slot(epoch_length), + current_iteration: 0, epoch: self, - epoch_length, + slots_per_epoch, } } } pub struct SlotIter<'a> { - current: Slot, + current_iteration: u64, epoch: &'a Epoch, - epoch_length: u64, + slots_per_epoch: u64, } impl<'a> Iterator for SlotIter<'a> { type Item = Slot; fn next(&mut self) -> Option { - if self.current == self.epoch.end_slot(self.epoch_length) { + if self.current_iteration >= self.slots_per_epoch { None } else { - let previous = self.current; - self.current += 1; - Some(previous) + let start_slot = self.epoch.start_slot(self.slots_per_epoch); + let previous = self.current_iteration; + self.current_iteration += 1; + Some(start_slot + previous) } } } @@ -115,4 +116,22 @@ mod epoch_tests { use ssz::ssz_encode; all_tests!(Epoch); + + #[test] + fn slot_iter() { + let slots_per_epoch = 8; + + let epoch = Epoch::new(0); + + let mut slots = vec![]; + for slot in epoch.slot_iter(slots_per_epoch) { + slots.push(slot); + } + + assert_eq!(slots.len(), slots_per_epoch as usize); + + for i in 0..slots_per_epoch { + assert_eq!(Slot::from(i), slots[i as usize]) + } + } } diff --git a/eth2/types/src/slot_epoch_macros.rs b/eth2/types/src/slot_epoch_macros.rs index b0550f2f8..2148b6cc2 100644 --- a/eth2/types/src/slot_epoch_macros.rs +++ b/eth2/types/src/slot_epoch_macros.rs @@ -20,25 +20,6 @@ macro_rules! impl_from_into_u64 { }; } -// need to truncate for some fork-choice algorithms -macro_rules! impl_into_u32 { - ($main: ident) => { - impl Into for $main { - fn into(self) -> u32 { - assert!(self.0 < u64::from(std::u32::MAX), "Lossy conversion to u32"); - self.0 as u32 - } - } - - impl $main { - pub fn as_u32(&self) -> u32 { - assert!(self.0 < u64::from(std::u32::MAX), "Lossy conversion to u32"); - self.0 as u32 - } - } - }; -} - macro_rules! impl_from_into_usize { ($main: ident) => { impl From for $main { diff --git a/eth2/types/src/slot_height.rs b/eth2/types/src/slot_height.rs index afa0ff775..1739227a4 100644 --- a/eth2/types/src/slot_height.rs +++ b/eth2/types/src/slot_height.rs @@ -13,7 +13,6 @@ use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssi pub struct SlotHeight(u64); impl_common!(SlotHeight); -impl_into_u32!(SlotHeight); // SlotHeight can be converted to u32 impl SlotHeight { pub fn new(slot: u64) -> SlotHeight { @@ -24,8 +23,8 @@ impl SlotHeight { Slot::from(self.0.saturating_add(genesis_slot.as_u64())) } - pub fn epoch(self, genesis_slot: u64, epoch_length: u64) -> Epoch { - Epoch::from(self.0.saturating_add(genesis_slot) / epoch_length) + pub fn epoch(self, genesis_slot: u64, slots_per_epoch: u64) -> Epoch { + Epoch::from(self.0.saturating_add(genesis_slot) / slots_per_epoch) } pub fn max_value() -> SlotHeight { diff --git a/eth2/types/src/test_utils/address.rs b/eth2/types/src/test_utils/address.rs index 2d60b72da..13de2dec9 100644 --- a/eth2/types/src/test_utils/address.rs +++ b/eth2/types/src/test_utils/address.rs @@ -6,6 +6,6 @@ impl TestRandom for Address { fn random_for_test(rng: &mut T) -> Self { let mut key_bytes = vec![0; 20]; rng.fill_bytes(&mut key_bytes); - Address::from(&key_bytes[..]) + Address::from_slice(&key_bytes[..]) } } diff --git a/eth2/types/src/test_utils/hash256.rs b/eth2/types/src/test_utils/hash256.rs index 98f5e7899..a227679da 100644 --- a/eth2/types/src/test_utils/hash256.rs +++ b/eth2/types/src/test_utils/hash256.rs @@ -6,6 +6,6 @@ impl TestRandom for Hash256 { fn random_for_test(rng: &mut T) -> Self { let mut key_bytes = vec![0; 32]; rng.fill_bytes(&mut key_bytes); - Hash256::from(&key_bytes[..]) + Hash256::from_slice(&key_bytes[..]) } } diff --git a/eth2/types/src/test_utils/macros.rs b/eth2/types/src/test_utils/macros.rs new file mode 100644 index 000000000..b7c0a6522 --- /dev/null +++ b/eth2/types/src/test_utils/macros.rs @@ -0,0 +1,34 @@ +#[cfg(test)] +#[macro_export] +macro_rules! ssz_tests { + ($type: ident) => { + #[test] + pub fn test_ssz_round_trip() { + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::{ssz_encode, Decodable}; + + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = $type::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = $type::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root_internal() { + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::TreeHash; + + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = $type::random_for_test(&mut rng); + + let result = original.hash_tree_root_internal(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } + }; +} diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index eb54f2a53..82e060fca 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -17,6 +17,12 @@ where fn random_for_test(rng: &mut T) -> Self; } +impl TestRandom for bool { + fn random_for_test(rng: &mut T) -> Self { + (rng.next_u32() % 2) == 1 + } +} + impl TestRandom for u64 { fn random_for_test(rng: &mut T) -> Self { rng.next_u64() diff --git a/eth2/types/src/test_utils/signature.rs b/eth2/types/src/test_utils/signature.rs index 9ec7aec60..d9995835a 100644 --- a/eth2/types/src/test_utils/signature.rs +++ b/eth2/types/src/test_utils/signature.rs @@ -8,6 +8,6 @@ impl TestRandom for Signature { let mut message = vec![0; 32]; rng.fill_bytes(&mut message); - Signature::new(&message, &secret_key) + Signature::new(&message, 0, &secret_key) } } diff --git a/eth2/types/src/transfer.rs b/eth2/types/src/transfer.rs new file mode 100644 index 000000000..0382dee11 --- /dev/null +++ b/eth2/types/src/transfer.rs @@ -0,0 +1,52 @@ +use super::Slot; +use crate::test_utils::TestRandom; +use bls::{PublicKey, Signature}; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::TreeHash; +use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use test_random_derive::TestRandom; + +/// The data submitted to the deposit contract. +/// +/// Spec v0.4.0 +#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)] +pub struct Transfer { + pub from: u64, + pub to: u64, + pub amount: u64, + pub fee: u64, + pub slot: Slot, + pub pubkey: PublicKey, + pub signature: Signature, +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::{ssz_encode, Decodable, TreeHash}; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Transfer::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root_internal() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Transfer::random_for_test(&mut rng); + + let result = original.hash_tree_root_internal(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/validator.rs b/eth2/types/src/validator.rs index b832283a0..43701ca05 100644 --- a/eth2/types/src/validator.rs +++ b/eth2/types/src/validator.rs @@ -1,62 +1,37 @@ use crate::{test_utils::TestRandom, Epoch, Hash256, PublicKey}; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; -const STATUS_FLAG_INITIATED_EXIT: u8 = 1; -const STATUS_FLAG_WITHDRAWABLE: u8 = 2; - -#[derive(Debug, PartialEq, Clone, Copy, Serialize)] -pub enum StatusFlags { - InitiatedExit, - Withdrawable, -} - -struct StatusFlagsDecodeError; - -impl From for DecodeError { - fn from(_: StatusFlagsDecodeError) -> DecodeError { - DecodeError::Invalid - } -} - -/// Handles the serialization logic for the `status_flags` field of the `Validator`. -fn status_flag_to_byte(flag: Option) -> u8 { - if let Some(flag) = flag { - match flag { - StatusFlags::InitiatedExit => STATUS_FLAG_INITIATED_EXIT, - StatusFlags::Withdrawable => STATUS_FLAG_WITHDRAWABLE, - } - } else { - 0 - } -} - -/// Handles the deserialization logic for the `status_flags` field of the `Validator`. -fn status_flag_from_byte(flag: u8) -> Result, StatusFlagsDecodeError> { - match flag { - 0 => Ok(None), - 1 => Ok(Some(StatusFlags::InitiatedExit)), - 2 => Ok(Some(StatusFlags::Withdrawable)), - _ => Err(StatusFlagsDecodeError), - } -} - -#[derive(Debug, Clone, PartialEq, Serialize)] +/// Information about a `BeaconChain` validator. +/// +/// Spec v0.4.0 +#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TestRandom, TreeHash)] pub struct Validator { pub pubkey: PublicKey, pub withdrawal_credentials: Hash256, pub activation_epoch: Epoch, pub exit_epoch: Epoch, - pub withdrawal_epoch: Epoch, - pub penalized_epoch: Epoch, - pub status_flags: Option, + pub withdrawable_epoch: Epoch, + pub initiated_exit: bool, + pub slashed: bool, } impl Validator { - /// This predicate indicates if the validator represented by this record is considered "active" at `slot`. - pub fn is_active_at(&self, slot: Epoch) -> bool { - self.activation_epoch <= slot && slot < self.exit_epoch + /// Returns `true` if the validator is considered active at some epoch. + pub fn is_active_at(&self, epoch: Epoch) -> bool { + self.activation_epoch <= epoch && epoch < self.exit_epoch + } + + /// Returns `true` if the validator is considered exited at some epoch. + pub fn is_exited_at(&self, epoch: Epoch) -> bool { + self.exit_epoch <= epoch + } + + /// Returns `true` if the validator is able to withdraw at some epoch. + pub fn is_withdrawable_at(&self, epoch: Epoch) -> bool { + self.withdrawable_epoch <= epoch } } @@ -68,85 +43,9 @@ impl Default for Validator { withdrawal_credentials: Hash256::default(), activation_epoch: Epoch::from(std::u64::MAX), exit_epoch: Epoch::from(std::u64::MAX), - withdrawal_epoch: Epoch::from(std::u64::MAX), - penalized_epoch: Epoch::from(std::u64::MAX), - status_flags: None, - } - } -} - -impl TestRandom for StatusFlags { - fn random_for_test(rng: &mut T) -> Self { - let options = vec![StatusFlags::InitiatedExit, StatusFlags::Withdrawable]; - options[(rng.next_u32() as usize) % options.len()] - } -} - -impl Encodable for Validator { - fn ssz_append(&self, s: &mut SszStream) { - s.append(&self.pubkey); - s.append(&self.withdrawal_credentials); - s.append(&self.activation_epoch); - s.append(&self.exit_epoch); - s.append(&self.withdrawal_epoch); - s.append(&self.penalized_epoch); - s.append(&status_flag_to_byte(self.status_flags)); - } -} - -impl Decodable for Validator { - fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { - let (pubkey, i) = <_>::ssz_decode(bytes, i)?; - let (withdrawal_credentials, i) = <_>::ssz_decode(bytes, i)?; - let (activation_epoch, i) = <_>::ssz_decode(bytes, i)?; - let (exit_epoch, i) = <_>::ssz_decode(bytes, i)?; - let (withdrawal_epoch, i) = <_>::ssz_decode(bytes, i)?; - let (penalized_epoch, i) = <_>::ssz_decode(bytes, i)?; - let (status_flags_byte, i): (u8, usize) = <_>::ssz_decode(bytes, i)?; - - let status_flags = status_flag_from_byte(status_flags_byte)?; - - Ok(( - Self { - pubkey, - withdrawal_credentials, - activation_epoch, - exit_epoch, - withdrawal_epoch, - penalized_epoch, - status_flags, - }, - i, - )) - } -} - -impl TreeHash for Validator { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.pubkey.hash_tree_root_internal()); - result.append(&mut self.withdrawal_credentials.hash_tree_root_internal()); - result.append(&mut self.activation_epoch.hash_tree_root_internal()); - result.append(&mut self.exit_epoch.hash_tree_root_internal()); - result.append(&mut self.withdrawal_epoch.hash_tree_root_internal()); - result.append(&mut self.penalized_epoch.hash_tree_root_internal()); - result.append( - &mut u64::from(status_flag_to_byte(self.status_flags)).hash_tree_root_internal(), - ); - hash(&result) - } -} - -impl TestRandom for Validator { - fn random_for_test(rng: &mut T) -> Self { - Self { - pubkey: <_>::random_for_test(rng), - withdrawal_credentials: <_>::random_for_test(rng), - activation_epoch: <_>::random_for_test(rng), - exit_epoch: <_>::random_for_test(rng), - withdrawal_epoch: <_>::random_for_test(rng), - penalized_epoch: <_>::random_for_test(rng), - status_flags: Some(<_>::random_for_test(rng)), + withdrawable_epoch: Epoch::from(std::u64::MAX), + initiated_exit: false, + slashed: false, } } } @@ -155,7 +54,7 @@ impl TestRandom for Validator { mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::ssz_encode; + use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_ssz_round_trip() { diff --git a/eth2/types/src/validator_registry.rs b/eth2/types/src/validator_registry.rs index 20863dd72..7b55e78cb 100644 --- a/eth2/types/src/validator_registry.rs +++ b/eth2/types/src/validator_registry.rs @@ -4,6 +4,8 @@ use super::validator::*; use crate::Epoch; /// Given an indexed sequence of `validators`, return the indices corresponding to validators that are active at `epoch`. +/// +/// Spec v0.4.0 pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> Vec { validators .iter() diff --git a/eth2/types/src/validator_registry_delta_block.rs b/eth2/types/src/validator_registry_delta_block.rs index 14f9c6ce5..e9a075052 100644 --- a/eth2/types/src/validator_registry_delta_block.rs +++ b/eth2/types/src/validator_registry_delta_block.rs @@ -2,11 +2,11 @@ use crate::{test_utils::TestRandom, Hash256, Slot}; use bls::PublicKey; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, TreeHash}; -use ssz_derive::{Decode, Encode}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; // The information gathered from the PoW chain validator registration function. -#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode)] +#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TreeHash, TestRandom)] pub struct ValidatorRegistryDeltaBlock { pub latest_registry_delta_root: Hash256, pub validator_index: u32, @@ -28,56 +28,9 @@ impl Default for ValidatorRegistryDeltaBlock { } } -impl TreeHash for ValidatorRegistryDeltaBlock { - fn hash_tree_root_internal(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.latest_registry_delta_root.hash_tree_root_internal()); - result.append(&mut self.validator_index.hash_tree_root_internal()); - result.append(&mut self.pubkey.hash_tree_root_internal()); - result.append(&mut self.slot.hash_tree_root_internal()); - result.append(&mut self.flag.hash_tree_root_internal()); - hash(&result) - } -} - -impl TestRandom for ValidatorRegistryDeltaBlock { - fn random_for_test(rng: &mut T) -> Self { - Self { - latest_registry_delta_root: <_>::random_for_test(rng), - validator_index: <_>::random_for_test(rng), - pubkey: <_>::random_for_test(rng), - slot: <_>::random_for_test(rng), - flag: <_>::random_for_test(rng), - } - } -} - #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = ValidatorRegistryDeltaBlock::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = ValidatorRegistryDeltaBlock::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(ValidatorRegistryDeltaBlock); } diff --git a/eth2/types/src/voluntary_exit.rs b/eth2/types/src/voluntary_exit.rs new file mode 100644 index 000000000..58c3ae4c2 --- /dev/null +++ b/eth2/types/src/voluntary_exit.rs @@ -0,0 +1,47 @@ +use crate::{test_utils::TestRandom, Epoch}; +use bls::Signature; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::TreeHash; +use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use test_random_derive::TestRandom; + +/// An exit voluntarily submitted a validator who wishes to withdraw. +/// +/// Spec v0.4.0 +#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)] +pub struct VoluntaryExit { + pub epoch: Epoch, + pub validator_index: u64, + pub signature: Signature, +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::{ssz_encode, Decodable, TreeHash}; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = VoluntaryExit::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root_internal() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = VoluntaryExit::random_for_test(&mut rng); + + let result = original.hash_tree_root_internal(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/utils/bls/Cargo.toml b/eth2/utils/bls/Cargo.toml index 465510c59..7a436307b 100644 --- a/eth2/utils/bls/Cargo.toml +++ b/eth2/utils/bls/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -bls-aggregates = { git = "https://github.com/sigp/signature-schemes", tag = "v0.3.0" } +bls-aggregates = { git = "https://github.com/sigp/signature-schemes", tag = "0.5.2" } hashing = { path = "../hashing" } hex = "0.3" serde = "1.0" diff --git a/eth2/utils/bls/src/aggregate_public_key.rs b/eth2/utils/bls/src/aggregate_public_key.rs new file mode 100644 index 000000000..2174a43cb --- /dev/null +++ b/eth2/utils/bls/src/aggregate_public_key.rs @@ -0,0 +1,24 @@ +use super::PublicKey; +use bls_aggregates::AggregatePublicKey as RawAggregatePublicKey; + +/// A single BLS signature. +/// +/// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ +/// serialization). +#[derive(Debug, Clone, Default)] +pub struct AggregatePublicKey(RawAggregatePublicKey); + +impl AggregatePublicKey { + pub fn new() -> Self { + AggregatePublicKey(RawAggregatePublicKey::new()) + } + + pub fn add(&mut self, public_key: &PublicKey) { + self.0.add(public_key.as_raw()) + } + + /// Returns the underlying signature. + pub fn as_raw(&self) -> &RawAggregatePublicKey { + &self.0 + } +} diff --git a/eth2/utils/bls/src/aggregate_signature.rs b/eth2/utils/bls/src/aggregate_signature.rs index b606a5ebd..2d8776353 100644 --- a/eth2/utils/bls/src/aggregate_signature.rs +++ b/eth2/utils/bls/src/aggregate_signature.rs @@ -1,5 +1,7 @@ use super::{AggregatePublicKey, Signature}; -use bls_aggregates::AggregateSignature as RawAggregateSignature; +use bls_aggregates::{ + AggregatePublicKey as RawAggregatePublicKey, AggregateSignature as RawAggregateSignature, +}; use serde::ser::{Serialize, Serializer}; use ssz::{ decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash, @@ -27,8 +29,44 @@ impl AggregateSignature { /// /// Only returns `true` if the set of keys in the `AggregatePublicKey` match the set of keys /// that signed the `AggregateSignature`. - pub fn verify(&self, msg: &[u8], aggregate_public_key: &AggregatePublicKey) -> bool { - self.0.verify(msg, aggregate_public_key) + pub fn verify( + &self, + msg: &[u8], + domain: u64, + aggregate_public_key: &AggregatePublicKey, + ) -> bool { + self.0.verify(msg, domain, aggregate_public_key.as_raw()) + } + + /// Verify this AggregateSignature against multiple AggregatePublickeys with multiple Messages. + /// + /// All PublicKeys related to a Message should be aggregated into one AggregatePublicKey. + /// Each AggregatePublicKey has a 1:1 ratio with a 32 byte Message. + pub fn verify_multiple( + &self, + messages: &[&[u8]], + domain: u64, + aggregate_public_keys: &[&AggregatePublicKey], + ) -> bool { + // TODO: the API for `RawAggregatePublicKey` shoudn't need to take an owned + // `AggregatePublicKey`. There is an issue to fix this, but in the meantime we need to + // clone. + // + // https://github.com/sigp/signature-schemes/issues/10 + let aggregate_public_keys: Vec = aggregate_public_keys + .iter() + .map(|pk| pk.as_raw()) + .cloned() + .collect(); + + // Messages are concatenated into one long message. + let mut msg: Vec = vec![]; + for message in messages { + msg.extend_from_slice(message); + } + + self.0 + .verify_multiple(&msg[..], domain, &aggregate_public_keys[..]) } } @@ -73,7 +111,7 @@ mod tests { let keypair = Keypair::random(); let mut original = AggregateSignature::new(); - original.add(&Signature::new(&[42, 42], &keypair.sk)); + original.add(&Signature::new(&[42, 42], 0, &keypair.sk)); let bytes = ssz_encode(&original); let (decoded, _) = AggregateSignature::ssz_decode(&bytes, 0).unwrap(); diff --git a/eth2/utils/bls/src/lib.rs b/eth2/utils/bls/src/lib.rs index 646047d18..bb109b0a1 100644 --- a/eth2/utils/bls/src/lib.rs +++ b/eth2/utils/bls/src/lib.rs @@ -1,52 +1,53 @@ extern crate bls_aggregates; -extern crate hashing; extern crate ssz; +mod aggregate_public_key; mod aggregate_signature; mod keypair; mod public_key; mod secret_key; mod signature; +pub use crate::aggregate_public_key::AggregatePublicKey; pub use crate::aggregate_signature::AggregateSignature; pub use crate::keypair::Keypair; pub use crate::public_key::PublicKey; pub use crate::secret_key::SecretKey; pub use crate::signature::Signature; -pub use self::bls_aggregates::AggregatePublicKey; - -pub const BLS_AGG_SIG_BYTE_SIZE: usize = 97; +pub const BLS_AGG_SIG_BYTE_SIZE: usize = 96; use hashing::hash; use ssz::ssz_encode; -use std::default::Default; - -fn extend_if_needed(hash: &mut Vec) { - // NOTE: bls_aggregates crate demands 48 bytes, this may be removed as we get closer to production - hash.resize(48, Default::default()) -} /// For some signature and public key, ensure that the signature message was the public key and it /// was signed by the secret key that corresponds to that public key. pub fn verify_proof_of_possession(sig: &Signature, pubkey: &PublicKey) -> bool { - let mut hash = hash(&ssz_encode(pubkey)); - extend_if_needed(&mut hash); - sig.verify_hashed(&hash, &pubkey) + // TODO: replace this function with state.validate_proof_of_possession + // https://github.com/sigp/lighthouse/issues/239 + sig.verify(&ssz_encode(pubkey), 0, &pubkey) } +// TODO: Update this method +// https://github.com/sigp/lighthouse/issues/239 pub fn create_proof_of_possession(keypair: &Keypair) -> Signature { - let mut hash = hash(&ssz_encode(&keypair.pk)); - extend_if_needed(&mut hash); - Signature::new_hashed(&hash, &keypair.sk) + Signature::new(&ssz_encode(&keypair.pk), 0, &keypair.sk) +} + +/// Returns the withdrawal credentials for a given public key. +pub fn get_withdrawal_credentials(pubkey: &PublicKey, prefix_byte: u8) -> Vec { + let hashed = hash(&ssz_encode(pubkey)); + let mut prefixed = vec![prefix_byte]; + prefixed.extend_from_slice(&hashed[1..]); + + prefixed } pub fn bls_verify_aggregate( pubkey: &AggregatePublicKey, message: &[u8], signature: &AggregateSignature, - _domain: u64, + domain: u64, ) -> bool { - // TODO: add domain - signature.verify(message, pubkey) + signature.verify(message, domain, pubkey) } diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index 511681957..c0c31ef27 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -14,24 +14,34 @@ pub struct Signature(RawSignature); impl Signature { /// Instantiate a new Signature from a message and a SecretKey. - pub fn new(msg: &[u8], sk: &SecretKey) -> Self { - Signature(RawSignature::new(msg, sk.as_raw())) + pub fn new(msg: &[u8], domain: u64, sk: &SecretKey) -> Self { + Signature(RawSignature::new(msg, domain, sk.as_raw())) } /// Instantiate a new Signature from a message and a SecretKey, where the message has already /// been hashed. - pub fn new_hashed(msg_hashed: &[u8], sk: &SecretKey) -> Self { - Signature(RawSignature::new_hashed(msg_hashed, sk.as_raw())) + pub fn new_hashed(x_real_hashed: &[u8], x_imaginary_hashed: &[u8], sk: &SecretKey) -> Self { + Signature(RawSignature::new_hashed( + x_real_hashed, + x_imaginary_hashed, + sk.as_raw(), + )) } /// Verify the Signature against a PublicKey. - pub fn verify(&self, msg: &[u8], pk: &PublicKey) -> bool { - self.0.verify(msg, pk.as_raw()) + pub fn verify(&self, msg: &[u8], domain: u64, pk: &PublicKey) -> bool { + self.0.verify(msg, domain, pk.as_raw()) } /// Verify the Signature against a PublicKey, where the message has already been hashed. - pub fn verify_hashed(&self, msg_hash: &[u8], pk: &PublicKey) -> bool { - self.0.verify_hashed(msg_hash, pk.as_raw()) + pub fn verify_hashed( + &self, + x_real_hashed: &[u8], + x_imaginary_hashed: &[u8], + pk: &PublicKey, + ) -> bool { + self.0 + .verify_hashed(x_real_hashed, x_imaginary_hashed, pk.as_raw()) } /// Returns the underlying signature. @@ -41,7 +51,9 @@ impl Signature { /// Returns a new empty signature. pub fn empty_signature() -> Self { - let empty: Vec = vec![0; 97]; + let mut empty: Vec = vec![0; 96]; + // TODO: Modify the way flags are used (b_flag should not be used for empty_signature in the future) + empty[0] += u8::pow(2, 6); Signature(RawSignature::from_bytes(&empty).unwrap()) } } @@ -85,7 +97,7 @@ mod tests { pub fn test_ssz_round_trip() { let keypair = Keypair::random(); - let original = Signature::new(&[42, 42], &keypair.sk); + let original = Signature::new(&[42, 42], 0, &keypair.sk); let bytes = ssz_encode(&original); let (decoded, _) = Signature::ssz_decode(&bytes, 0).unwrap(); @@ -99,9 +111,13 @@ mod tests { let sig_as_bytes: Vec = sig.as_raw().as_bytes(); - assert_eq!(sig_as_bytes.len(), 97); - for one_byte in sig_as_bytes.iter() { - assert_eq!(*one_byte, 0); + assert_eq!(sig_as_bytes.len(), 96); + for (i, one_byte) in sig_as_bytes.iter().enumerate() { + if i == 0 { + assert_eq!(*one_byte, u8::pow(2, 6)); + } else { + assert_eq!(*one_byte, 0); + } } } } diff --git a/eth2/utils/fisher_yates_shuffle/Cargo.toml b/eth2/utils/fisher_yates_shuffle/Cargo.toml index 7d33c2e91..ff1f64608 100644 --- a/eth2/utils/fisher_yates_shuffle/Cargo.toml +++ b/eth2/utils/fisher_yates_shuffle/Cargo.toml @@ -4,8 +4,13 @@ version = "0.1.0" authors = ["Paul Hauner "] edition = "2018" -[dependencies] -hashing = { path = "../hashing" } +[[bench]] +name = "benches" +harness = false [dev-dependencies] +criterion = "0.2" yaml-rust = "0.4.2" + +[dependencies] +hashing = { path = "../hashing" } diff --git a/eth2/utils/fisher_yates_shuffle/benches/benches.rs b/eth2/utils/fisher_yates_shuffle/benches/benches.rs new file mode 100644 index 000000000..9aa1885ab --- /dev/null +++ b/eth2/utils/fisher_yates_shuffle/benches/benches.rs @@ -0,0 +1,55 @@ +use criterion::Criterion; +use criterion::{black_box, criterion_group, criterion_main, Benchmark}; +use fisher_yates_shuffle::shuffle; + +fn get_list(n: usize) -> Vec { + let mut list = Vec::with_capacity(n); + for i in 0..n { + list.push(i) + } + assert_eq!(list.len(), n); + list +} + +fn shuffles(c: &mut Criterion) { + c.bench( + "whole list shuffle", + Benchmark::new("8 elements", move |b| { + let seed = vec![42; 32]; + let list = get_list(8); + b.iter_with_setup(|| list.clone(), |list| black_box(shuffle(&seed, list))) + }), + ); + + c.bench( + "whole list shuffle", + Benchmark::new("16 elements", move |b| { + let seed = vec![42; 32]; + let list = get_list(16); + b.iter_with_setup(|| list.clone(), |list| black_box(shuffle(&seed, list))) + }), + ); + + c.bench( + "whole list shuffle", + Benchmark::new("512 elements", move |b| { + let seed = vec![42; 32]; + let list = get_list(512); + b.iter_with_setup(|| list.clone(), |list| black_box(shuffle(&seed, list))) + }) + .sample_size(10), + ); + + c.bench( + "whole list shuffle", + Benchmark::new("16384 elements", move |b| { + let seed = vec![42; 32]; + let list = get_list(16_384); + b.iter_with_setup(|| list.clone(), |list| black_box(shuffle(&seed, list))) + }) + .sample_size(10), + ); +} + +criterion_group!(benches, shuffles); +criterion_main!(benches); diff --git a/eth2/utils/merkle_proof/Cargo.toml b/eth2/utils/merkle_proof/Cargo.toml new file mode 100644 index 000000000..b7cd81216 --- /dev/null +++ b/eth2/utils/merkle_proof/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "merkle_proof" +version = "0.1.0" +authors = ["Michael Sproul "] +edition = "2018" + +[dependencies] +ethereum-types = "0.5" +hashing = { path = "../hashing" } diff --git a/eth2/utils/merkle_proof/src/lib.rs b/eth2/utils/merkle_proof/src/lib.rs new file mode 100644 index 000000000..5ff8f79e6 --- /dev/null +++ b/eth2/utils/merkle_proof/src/lib.rs @@ -0,0 +1,148 @@ +use ethereum_types::H256; +use hashing::hash; + +/// Verify a proof that `leaf` exists at `index` in a Merkle tree rooted at `root`. +/// +/// The `branch` argument is the main component of the proof: it should be a list of internal +/// node hashes such that the root can be reconstructed (in bottom-up order). +pub fn verify_merkle_proof( + leaf: H256, + branch: &[H256], + depth: usize, + index: usize, + root: H256, +) -> bool { + if branch.len() == depth { + merkle_root_from_branch(leaf, branch, depth, index) == root + } else { + false + } +} + +/// Compute a root hash from a leaf and a Merkle proof. +fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usize) -> H256 { + assert_eq!(branch.len(), depth, "proof length should equal depth"); + + let mut merkle_root = leaf.as_bytes().to_vec(); + + for (i, leaf) in branch.iter().enumerate().take(depth) { + let ith_bit = (index >> i) & 0x01; + if ith_bit == 1 { + let input = concat(leaf.as_bytes().to_vec(), merkle_root); + merkle_root = hash(&input); + } else { + let mut input = merkle_root; + input.extend_from_slice(leaf.as_bytes()); + merkle_root = hash(&input); + } + } + + H256::from_slice(&merkle_root) +} + +/// Concatenate two vectors. +fn concat(mut vec1: Vec, mut vec2: Vec) -> Vec { + vec1.append(&mut vec2); + vec1 +} + +#[cfg(test)] +mod tests { + use super::*; + + fn hash_concat(h1: H256, h2: H256) -> H256 { + H256::from_slice(&hash(&concat( + h1.as_bytes().to_vec(), + h2.as_bytes().to_vec(), + ))) + } + + #[test] + fn verify_small_example() { + // Construct a small merkle tree manually + let leaf_b00 = H256::from([0xAA; 32]); + let leaf_b01 = H256::from([0xBB; 32]); + let leaf_b10 = H256::from([0xCC; 32]); + let leaf_b11 = H256::from([0xDD; 32]); + + let node_b0x = hash_concat(leaf_b00, leaf_b01); + let node_b1x = hash_concat(leaf_b10, leaf_b11); + + let root = hash_concat(node_b0x, node_b1x); + + // Run some proofs + assert!(verify_merkle_proof( + leaf_b00, + &[leaf_b01, node_b1x], + 2, + 0b00, + root + )); + assert!(verify_merkle_proof( + leaf_b01, + &[leaf_b00, node_b1x], + 2, + 0b01, + root + )); + assert!(verify_merkle_proof( + leaf_b10, + &[leaf_b11, node_b0x], + 2, + 0b10, + root + )); + assert!(verify_merkle_proof( + leaf_b11, + &[leaf_b10, node_b0x], + 2, + 0b11, + root + )); + assert!(verify_merkle_proof( + leaf_b11, + &[leaf_b10], + 1, + 0b11, + node_b1x + )); + + // Ensure that incorrect proofs fail + // Zero-length proof + assert!(!verify_merkle_proof(leaf_b01, &[], 2, 0b01, root)); + // Proof in reverse order + assert!(!verify_merkle_proof( + leaf_b01, + &[node_b1x, leaf_b00], + 2, + 0b01, + root + )); + // Proof too short + assert!(!verify_merkle_proof(leaf_b01, &[leaf_b00], 2, 0b01, root)); + // Wrong index + assert!(!verify_merkle_proof( + leaf_b01, + &[leaf_b00, node_b1x], + 2, + 0b10, + root + )); + // Wrong root + assert!(!verify_merkle_proof( + leaf_b01, + &[leaf_b00, node_b1x], + 2, + 0b01, + node_b1x + )); + } + + #[test] + fn verify_zero_depth() { + let leaf = H256::from([0xD6; 32]); + let junk = H256::from([0xD7; 32]); + assert!(verify_merkle_proof(leaf, &[], 0, 0, leaf)); + assert!(!verify_merkle_proof(leaf, &[], 0, 7, junk)); + } +} diff --git a/eth2/utils/ssz/Cargo.toml b/eth2/utils/ssz/Cargo.toml index 25326cb5b..f13db5def 100644 --- a/eth2/utils/ssz/Cargo.toml +++ b/eth2/utils/ssz/Cargo.toml @@ -6,5 +6,5 @@ edition = "2018" [dependencies] bytes = "0.4.9" -ethereum-types = "0.4.0" +ethereum-types = "0.5" hashing = { path = "../hashing" } diff --git a/eth2/utils/ssz/src/impl_decode.rs b/eth2/utils/ssz/src/impl_decode.rs index 845381484..7af6a7ab2 100644 --- a/eth2/utils/ssz/src/impl_decode.rs +++ b/eth2/utils/ssz/src/impl_decode.rs @@ -59,7 +59,7 @@ impl Decodable for H256 { if bytes.len() < 32 || bytes.len() - 32 < index { Err(DecodeError::TooShort) } else { - Ok((H256::from(&bytes[index..(index + 32)]), index + 32)) + Ok((H256::from_slice(&bytes[index..(index + 32)]), index + 32)) } } } @@ -69,7 +69,7 @@ impl Decodable for Address { if bytes.len() < 20 || bytes.len() - 20 < index { Err(DecodeError::TooShort) } else { - Ok((Address::from(&bytes[index..(index + 20)]), index + 20)) + Ok((Address::from_slice(&bytes[index..(index + 20)]), index + 20)) } } } @@ -95,7 +95,7 @@ mod tests { */ let input = vec![42_u8; 32]; let (decoded, i) = H256::ssz_decode(&input, 0).unwrap(); - assert_eq!(decoded.to_vec(), input); + assert_eq!(decoded.as_bytes(), &input[..]); assert_eq!(i, 32); /* @@ -104,7 +104,7 @@ mod tests { let mut input = vec![42_u8; 32]; input.push(12); let (decoded, i) = H256::ssz_decode(&input, 0).unwrap(); - assert_eq!(decoded.to_vec()[..], input[0..32]); + assert_eq!(decoded.as_bytes(), &input[0..32]); assert_eq!(i, 32); /* diff --git a/eth2/utils/ssz/src/impl_encode.rs b/eth2/utils/ssz/src/impl_encode.rs index 7e7d7cecb..33332ecea 100644 --- a/eth2/utils/ssz/src/impl_encode.rs +++ b/eth2/utils/ssz/src/impl_encode.rs @@ -55,13 +55,13 @@ impl Encodable for bool { impl Encodable for H256 { fn ssz_append(&self, s: &mut SszStream) { - s.append_encoded_raw(&self.to_vec()); + s.append_encoded_raw(self.as_bytes()); } } impl Encodable for Address { fn ssz_append(&self, s: &mut SszStream) { - s.append_encoded_raw(&self.to_vec()); + s.append_encoded_raw(self.as_bytes()); } } diff --git a/eth2/utils/ssz/src/impl_tree_hash.rs b/eth2/utils/ssz/src/impl_tree_hash.rs index 7c3dae596..54bd7c139 100644 --- a/eth2/utils/ssz/src/impl_tree_hash.rs +++ b/eth2/utils/ssz/src/impl_tree_hash.rs @@ -32,6 +32,12 @@ impl TreeHash for usize { } } +impl TreeHash for bool { + fn hash_tree_root_internal(&self) -> Vec { + ssz_encode(self) + } +} + impl TreeHash for Address { fn hash_tree_root_internal(&self) -> Vec { ssz_encode(self) diff --git a/eth2/utils/ssz/src/lib.rs b/eth2/utils/ssz/src/lib.rs index a6baa35a7..7c29667af 100644 --- a/eth2/utils/ssz/src/lib.rs +++ b/eth2/utils/ssz/src/lib.rs @@ -12,6 +12,7 @@ extern crate ethereum_types; pub mod decode; pub mod encode; +mod signed_root; pub mod tree_hash; mod impl_decode; @@ -20,6 +21,7 @@ mod impl_tree_hash; pub use crate::decode::{decode_ssz, decode_ssz_list, Decodable, DecodeError}; pub use crate::encode::{Encodable, SszStream}; +pub use crate::signed_root::SignedRoot; pub use crate::tree_hash::{merkle_hash, TreeHash}; pub use hashing::hash; diff --git a/eth2/utils/ssz/src/signed_root.rs b/eth2/utils/ssz/src/signed_root.rs new file mode 100644 index 000000000..f7aeca4af --- /dev/null +++ b/eth2/utils/ssz/src/signed_root.rs @@ -0,0 +1,5 @@ +use crate::TreeHash; + +pub trait SignedRoot: TreeHash { + fn signed_root(&self) -> Vec; +} diff --git a/eth2/utils/ssz/src/tree_hash.rs b/eth2/utils/ssz/src/tree_hash.rs index bb05f01db..7c1ab35e9 100644 --- a/eth2/utils/ssz/src/tree_hash.rs +++ b/eth2/utils/ssz/src/tree_hash.rs @@ -7,9 +7,7 @@ pub trait TreeHash { fn hash_tree_root_internal(&self) -> Vec; fn hash_tree_root(&self) -> Vec { let mut result = self.hash_tree_root_internal(); - if result.len() < HASHSIZE { - zpad(&mut result, HASHSIZE); - } + zpad(&mut result, HASHSIZE); result } } diff --git a/eth2/utils/ssz_derive/src/lib.rs b/eth2/utils/ssz_derive/src/lib.rs index 1bc5caef1..0d2e17f76 100644 --- a/eth2/utils/ssz_derive/src/lib.rs +++ b/eth2/utils/ssz_derive/src/lib.rs @@ -2,6 +2,7 @@ //! //! - `#[derive(Encode)]` //! - `#[derive(Decode)]` +//! - `#[derive(TreeHash)]` //! //! These macros provide SSZ encoding/decoding for a `struct`. Fields are encoded/decoded in the //! order they are defined. @@ -126,3 +127,109 @@ pub fn ssz_decode_derive(input: TokenStream) -> TokenStream { }; output.into() } + +/// Implements `ssz::TreeHash` for some `struct`. +/// +/// Fields are processed in the order they are defined. +#[proc_macro_derive(TreeHash)] +pub fn ssz_tree_hash_derive(input: TokenStream) -> TokenStream { + let item = parse_macro_input!(input as DeriveInput); + + let name = &item.ident; + + let struct_data = match &item.data { + syn::Data::Struct(s) => s, + _ => panic!("ssz_derive only supports structs."), + }; + + let field_idents = get_named_field_idents(&struct_data); + + let output = quote! { + impl ssz::TreeHash for #name { + fn hash_tree_root_internal(&self) -> Vec { + let mut list: Vec> = Vec::new(); + #( + list.push(self.#field_idents.hash_tree_root_internal()); + )* + + ssz::merkle_hash(&mut list) + } + } + }; + output.into() +} + +/// Returns `true` if some `Ident` should be considered to be a signature type. +fn type_ident_is_signature(ident: &syn::Ident) -> bool { + match ident.to_string().as_ref() { + "Signature" => true, + "AggregateSignature" => true, + _ => false, + } +} + +/// Takes a `Field` where the type (`ty`) portion is a path (e.g., `types::Signature`) and returns +/// the final `Ident` in that path. +/// +/// E.g., for `types::Signature` returns `Signature`. +fn final_type_ident(field: &syn::Field) -> &syn::Ident { + match &field.ty { + syn::Type::Path(path) => &path.path.segments.last().unwrap().value().ident, + _ => panic!("ssz_derive only supports Path types."), + } +} + +/// Implements `ssz::TreeHash` for some `struct`, whilst excluding any fields following and +/// including a field that is of type "Signature" or "AggregateSignature". +/// +/// See: +/// https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md#signed-roots +/// +/// This is a rather horrendous macro, it will read the type of the object as a string and decide +/// if it's a signature by matching that string against "Signature" or "AggregateSignature". So, +/// it's important that you use those exact words as your type -- don't alias it to something else. +/// +/// If you can think of a better way to do this, please make an issue! +/// +/// Fields are processed in the order they are defined. +#[proc_macro_derive(SignedRoot)] +pub fn ssz_signed_root_derive(input: TokenStream) -> TokenStream { + let item = parse_macro_input!(input as DeriveInput); + + let name = &item.ident; + + let struct_data = match &item.data { + syn::Data::Struct(s) => s, + _ => panic!("ssz_derive only supports structs."), + }; + + let mut field_idents: Vec<&syn::Ident> = vec![]; + + for field in struct_data.fields.iter() { + let final_type_ident = final_type_ident(&field); + + if type_ident_is_signature(final_type_ident) { + break; + } else { + let ident = field + .ident + .as_ref() + .expect("ssz_derive only supports named_struct fields."); + field_idents.push(ident); + } + } + + let output = quote! { + impl ssz::SignedRoot for #name { + fn signed_root(&self) -> Vec { + let mut list: Vec> = Vec::new(); + #( + list.push(self.#field_idents.hash_tree_root_internal()); + )* + + ssz::merkle_hash(&mut list) + } + } + }; + output.into() +} diff --git a/eth2/utils/swap_or_not_shuffle/Cargo.toml b/eth2/utils/swap_or_not_shuffle/Cargo.toml index 272abf608..3a866da92 100644 --- a/eth2/utils/swap_or_not_shuffle/Cargo.toml +++ b/eth2/utils/swap_or_not_shuffle/Cargo.toml @@ -4,12 +4,17 @@ version = "0.1.0" authors = ["Paul Hauner "] edition = "2018" +[[bench]] +name = "benches" +harness = false + +[dev-dependencies] +criterion = "0.2" +yaml-rust = "0.4.2" +hex = "0.3" +ethereum-types = "0.5" + [dependencies] bytes = "0.4" hashing = { path = "../hashing" } int_to_bytes = { path = "../int_to_bytes" } - -[dev-dependencies] -yaml-rust = "0.4.2" -hex = "0.3" -ethereum-types = "0.5" diff --git a/eth2/utils/swap_or_not_shuffle/benches/benches.rs b/eth2/utils/swap_or_not_shuffle/benches/benches.rs new file mode 100644 index 000000000..0502e6fc4 --- /dev/null +++ b/eth2/utils/swap_or_not_shuffle/benches/benches.rs @@ -0,0 +1,92 @@ +use criterion::Criterion; +use criterion::{black_box, criterion_group, criterion_main, Benchmark}; +use swap_or_not_shuffle::{get_permutated_index, shuffle_list as fast_shuffle}; + +const SHUFFLE_ROUND_COUNT: u8 = 90; + +fn shuffle_list(seed: &[u8], list_size: usize) -> Vec { + let mut output = Vec::with_capacity(list_size); + for i in 0..list_size { + output.push(get_permutated_index(i, list_size, seed, SHUFFLE_ROUND_COUNT).unwrap()); + } + output +} + +fn shuffles(c: &mut Criterion) { + c.bench_function("single swap", move |b| { + let seed = vec![42; 32]; + b.iter(|| black_box(get_permutated_index(0, 10, &seed, SHUFFLE_ROUND_COUNT))) + }); + + c.bench_function("whole list of size 8", move |b| { + let seed = vec![42; 32]; + b.iter(|| black_box(shuffle_list(&seed, 8))) + }); + + c.bench( + "whole list shuffle", + Benchmark::new("8 elements", move |b| { + let seed = vec![42; 32]; + b.iter(|| black_box(shuffle_list(&seed, 8))) + }), + ); + + c.bench( + "whole list shuffle", + Benchmark::new("16 elements", move |b| { + let seed = vec![42; 32]; + b.iter(|| black_box(shuffle_list(&seed, 16))) + }), + ); + + c.bench( + "whole list shuffle", + Benchmark::new("512 elements", move |b| { + let seed = vec![42; 32]; + b.iter(|| black_box(shuffle_list(&seed, 512))) + }) + .sample_size(10), + ); + + c.bench( + "_fast_ whole list shuffle", + Benchmark::new("512 elements", move |b| { + let seed = vec![42; 32]; + let list: Vec = (0..512).collect(); + b.iter(|| black_box(fast_shuffle(list.clone(), SHUFFLE_ROUND_COUNT, &seed, true))) + }) + .sample_size(10), + ); + + c.bench( + "whole list shuffle", + Benchmark::new("16384 elements", move |b| { + let seed = vec![42; 32]; + b.iter(|| black_box(shuffle_list(&seed, 16_384))) + }) + .sample_size(10), + ); + + c.bench( + "_fast_ whole list shuffle", + Benchmark::new("16384 elements", move |b| { + let seed = vec![42; 32]; + let list: Vec = (0..16384).collect(); + b.iter(|| black_box(fast_shuffle(list.clone(), SHUFFLE_ROUND_COUNT, &seed, true))) + }) + .sample_size(10), + ); + + c.bench( + "_fast_ whole list shuffle", + Benchmark::new("4m elements", move |b| { + let seed = vec![42; 32]; + let list: Vec = (0..4_000_000).collect(); + b.iter(|| black_box(fast_shuffle(list.clone(), SHUFFLE_ROUND_COUNT, &seed, true))) + }) + .sample_size(10), + ); +} + +criterion_group!(benches, shuffles,); +criterion_main!(benches); diff --git a/eth2/utils/swap_or_not_shuffle/src/get_permutated_index.rs b/eth2/utils/swap_or_not_shuffle/src/get_permutated_index.rs new file mode 100644 index 000000000..37a82341e --- /dev/null +++ b/eth2/utils/swap_or_not_shuffle/src/get_permutated_index.rs @@ -0,0 +1,187 @@ +use bytes::Buf; +use hashing::hash; +use int_to_bytes::{int_to_bytes1, int_to_bytes4}; +use std::cmp::max; +use std::io::Cursor; + +/// Return `p(index)` in a pseudorandom permutation `p` of `0...list_size-1` with ``seed`` as entropy. +/// +/// Utilizes 'swap or not' shuffling found in +/// https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf +/// See the 'generalized domain' algorithm on page 3. +/// +/// Note: this function is significantly slower than the `shuffle_list` function in this crate. +/// Using `get_permutated_list` to shuffle an entire list, index by index, has been observed to be +/// 250x slower than `shuffle_list`. Therefore, this function is only useful when shuffling a small +/// portion of a much larger list. +/// +/// Returns `None` under any of the following conditions: +/// - `list_size == 0` +/// - `index >= list_size` +/// - `list_size > 2**24` +/// - `list_size > usize::max_value() / 2` +pub fn get_permutated_index( + index: usize, + list_size: usize, + seed: &[u8], + shuffle_round_count: u8, +) -> Option { + if list_size == 0 + || index >= list_size + || list_size > usize::max_value() / 2 + || list_size > 2_usize.pow(24) + { + return None; + } + + let mut index = index; + for round in 0..shuffle_round_count { + let pivot = bytes_to_int64(&hash_with_round(seed, round)[..]) as usize % list_size; + index = do_round(seed, index, pivot, round, list_size)?; + } + Some(index) +} + +fn do_round(seed: &[u8], index: usize, pivot: usize, round: u8, list_size: usize) -> Option { + let flip = (pivot + list_size - index) % list_size; + let position = max(index, flip); + let source = hash_with_round_and_position(seed, round, position)?; + let byte = source[(position % 256) / 8]; + let bit = (byte >> (position % 8)) % 2; + Some(if bit == 1 { flip } else { index }) +} + +fn hash_with_round_and_position(seed: &[u8], round: u8, position: usize) -> Option> { + let mut seed = seed.to_vec(); + seed.append(&mut int_to_bytes1(round)); + /* + * Note: the specification has an implicit assertion in `int_to_bytes4` that `position / 256 < + * 2**24`. For efficiency, we do not check for that here as it is checked in `get_permutated_index`. + */ + seed.append(&mut int_to_bytes4((position / 256) as u32)); + Some(hash(&seed[..])) +} + +fn hash_with_round(seed: &[u8], round: u8) -> Vec { + let mut seed = seed.to_vec(); + seed.append(&mut int_to_bytes1(round)); + hash(&seed[..]) +} + +fn bytes_to_int64(bytes: &[u8]) -> u64 { + let mut cursor = Cursor::new(bytes); + cursor.get_u64_le() +} + +#[cfg(test)] +mod tests { + use super::*; + use ethereum_types::H256 as Hash256; + use hex; + use std::{fs::File, io::prelude::*, path::PathBuf}; + use yaml_rust::yaml; + + #[test] + #[ignore] + fn fuzz_test() { + let max_list_size = 2_usize.pow(24); + let test_runs = 1000; + + // Test at max list_size with the end index. + for _ in 0..test_runs { + let index = max_list_size - 1; + let list_size = max_list_size; + let seed = Hash256::random(); + let shuffle_rounds = 90; + + assert!(get_permutated_index(index, list_size, &seed[..], shuffle_rounds).is_some()); + } + + // Test at max list_size low indices. + for i in 0..test_runs { + let index = i; + let list_size = max_list_size; + let seed = Hash256::random(); + let shuffle_rounds = 90; + + assert!(get_permutated_index(index, list_size, &seed[..], shuffle_rounds).is_some()); + } + + // Test at max list_size high indices. + for i in 0..test_runs { + let index = max_list_size - 1 - i; + let list_size = max_list_size; + let seed = Hash256::random(); + let shuffle_rounds = 90; + + assert!(get_permutated_index(index, list_size, &seed[..], shuffle_rounds).is_some()); + } + } + + #[test] + fn returns_none_for_zero_length_list() { + assert_eq!(None, get_permutated_index(100, 0, &[42, 42], 90)); + } + + #[test] + fn returns_none_for_out_of_bounds_index() { + assert_eq!(None, get_permutated_index(100, 100, &[42, 42], 90)); + } + + #[test] + fn returns_none_for_too_large_list() { + assert_eq!( + None, + get_permutated_index(100, usize::max_value() / 2, &[42, 42], 90) + ); + } + + #[test] + fn test_vectors() { + /* + * Test vectors are generated here: + * + * https://github.com/ethereum/eth2.0-test-generators + */ + let mut file = { + let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + file_path_buf.push("src/specs/test_vector_permutated_index.yml"); + + File::open(file_path_buf).unwrap() + }; + + let mut yaml_str = String::new(); + + file.read_to_string(&mut yaml_str).unwrap(); + + let docs = yaml::YamlLoader::load_from_str(&yaml_str).unwrap(); + let doc = &docs[0]; + let test_cases = doc["test_cases"].as_vec().unwrap(); + + for (i, test_case) in test_cases.iter().enumerate() { + let index = test_case["index"].as_i64().unwrap() as usize; + let list_size = test_case["list_size"].as_i64().unwrap() as usize; + let permutated_index = test_case["permutated_index"].as_i64().unwrap() as usize; + let shuffle_round_count = test_case["shuffle_round_count"].as_i64().unwrap(); + let seed_string = test_case["seed"].clone().into_string().unwrap(); + let seed = hex::decode(seed_string.replace("0x", "")).unwrap(); + + let shuffle_round_count = if shuffle_round_count < (u8::max_value() as i64) { + shuffle_round_count as u8 + } else { + panic!("shuffle_round_count must be a u8") + }; + + assert_eq!( + Some(permutated_index), + get_permutated_index(index, list_size, &seed[..], shuffle_round_count), + "Failure on case #{} index: {}, list_size: {}, round_count: {}, seed: {}", + i, + index, + list_size, + shuffle_round_count, + seed_string, + ); + } + } +} diff --git a/eth2/utils/swap_or_not_shuffle/src/lib.rs b/eth2/utils/swap_or_not_shuffle/src/lib.rs index 753265f3e..57049fbdf 100644 --- a/eth2/utils/swap_or_not_shuffle/src/lib.rs +++ b/eth2/utils/swap_or_not_shuffle/src/lib.rs @@ -1,178 +1,21 @@ -use bytes::Buf; -use hashing::hash; -use int_to_bytes::{int_to_bytes1, int_to_bytes4}; -use std::cmp::max; -use std::io::Cursor; +//! Provides list-shuffling functions matching the Ethereum 2.0 specification. +//! +//! See +//! [get_permutated_index](https://github.com/ethereum/eth2.0-specs/blob/0.4.0/specs/core/0_beacon-chain.md#get_permuted_index) +//! for specifications. +//! +//! There are two functions exported by this crate: +//! +//! - `get_permutated_index`: given a single index, computes the index resulting from a shuffle. +//! Runs in less time than it takes to run `shuffle_list`. +//! - `shuffle_list`: shuffles an entire list in-place. Runs in less time than it takes to run +//! `get_permutated_index` on each index. +//! +//! In general, use `get_permutated_list` to calculate the shuffling of a small subset of a much +//! larger list (~250x larger is a good guide, but solid figures yet to be calculated). -/// Return `p(index)` in a pseudorandom permutation `p` of `0...list_size-1` with ``seed`` as entropy. -/// -/// Utilizes 'swap or not' shuffling found in -/// https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf -/// See the 'generalized domain' algorithm on page 3. -/// -/// Returns `None` under any of the following conditions: -/// - `list_size == 0` -/// - `index >= list_size` -/// - `list_size > 2**24` -/// - `list_size > usize::max_value() / 2` -pub fn get_permutated_index( - index: usize, - list_size: usize, - seed: &[u8], - shuffle_round_count: u8, -) -> Option { - if list_size == 0 - || index >= list_size - || list_size > usize::max_value() / 2 - || list_size > 2_usize.pow(24) - { - return None; - } +mod get_permutated_index; +mod shuffle_list; - let mut index = index; - for round in 0..shuffle_round_count { - let pivot = bytes_to_int64(&hash_with_round(seed, round)[..]) as usize % list_size; - let flip = (pivot + list_size - index) % list_size; - let position = max(index, flip); - let source = hash_with_round_and_position(seed, round, position)?; - let byte = source[(position % 256) / 8]; - let bit = (byte >> (position % 8)) % 2; - index = if bit == 1 { flip } else { index } - } - Some(index) -} - -fn hash_with_round_and_position(seed: &[u8], round: u8, position: usize) -> Option> { - let mut seed = seed.to_vec(); - seed.append(&mut int_to_bytes1(round)); - /* - * Note: the specification has an implicit assertion in `int_to_bytes4` that `position / 256 < - * 2**24`. For efficiency, we do not check for that here as it is checked in `get_permutated_index`. - */ - seed.append(&mut int_to_bytes4((position / 256) as u32)); - Some(hash(&seed[..])) -} - -fn hash_with_round(seed: &[u8], round: u8) -> Vec { - let mut seed = seed.to_vec(); - seed.append(&mut int_to_bytes1(round)); - hash(&seed[..]) -} - -fn bytes_to_int64(bytes: &[u8]) -> u64 { - let mut cursor = Cursor::new(bytes); - cursor.get_u64_le() -} - -#[cfg(test)] -mod tests { - use super::*; - use ethereum_types::H256 as Hash256; - use hex; - use std::{fs::File, io::prelude::*, path::PathBuf}; - use yaml_rust::yaml; - - #[test] - #[ignore] - fn fuzz_test() { - let max_list_size = 2_usize.pow(24); - let test_runs = 1000; - - // Test at max list_size with the end index. - for _ in 0..test_runs { - let index = max_list_size - 1; - let list_size = max_list_size; - let seed = Hash256::random(); - let shuffle_rounds = 90; - - assert!(get_permutated_index(index, list_size, &seed[..], shuffle_rounds).is_some()); - } - - // Test at max list_size low indices. - for i in 0..test_runs { - let index = i; - let list_size = max_list_size; - let seed = Hash256::random(); - let shuffle_rounds = 90; - - assert!(get_permutated_index(index, list_size, &seed[..], shuffle_rounds).is_some()); - } - - // Test at max list_size high indices. - for i in 0..test_runs { - let index = max_list_size - 1 - i; - let list_size = max_list_size; - let seed = Hash256::random(); - let shuffle_rounds = 90; - - assert!(get_permutated_index(index, list_size, &seed[..], shuffle_rounds).is_some()); - } - } - - #[test] - fn returns_none_for_zero_length_list() { - assert_eq!(None, get_permutated_index(100, 0, &[42, 42], 90)); - } - - #[test] - fn returns_none_for_out_of_bounds_index() { - assert_eq!(None, get_permutated_index(100, 100, &[42, 42], 90)); - } - - #[test] - fn returns_none_for_too_large_list() { - assert_eq!( - None, - get_permutated_index(100, usize::max_value() / 2, &[42, 42], 90) - ); - } - - #[test] - fn test_vectors() { - /* - * Test vectors are generated here: - * - * https://github.com/ethereum/eth2.0-test-generators - */ - let mut file = { - let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - file_path_buf.push("src/specs/test_vector_permutated_index.yml"); - - File::open(file_path_buf).unwrap() - }; - - let mut yaml_str = String::new(); - - file.read_to_string(&mut yaml_str).unwrap(); - - let docs = yaml::YamlLoader::load_from_str(&yaml_str).unwrap(); - let doc = &docs[0]; - let test_cases = doc["test_cases"].as_vec().unwrap(); - - for (i, test_case) in test_cases.iter().enumerate() { - let index = test_case["index"].as_i64().unwrap() as usize; - let list_size = test_case["list_size"].as_i64().unwrap() as usize; - let permutated_index = test_case["permutated_index"].as_i64().unwrap() as usize; - let shuffle_round_count = test_case["shuffle_round_count"].as_i64().unwrap(); - let seed_string = test_case["seed"].clone().into_string().unwrap(); - let seed = hex::decode(seed_string.replace("0x", "")).unwrap(); - - let shuffle_round_count = if shuffle_round_count < (u8::max_value() as i64) { - shuffle_round_count as u8 - } else { - panic!("shuffle_round_count must be a u8") - }; - - assert_eq!( - Some(permutated_index), - get_permutated_index(index, list_size, &seed[..], shuffle_round_count), - "Failure on case #{} index: {}, list_size: {}, round_count: {}, seed: {}", - i, - index, - list_size, - shuffle_round_count, - seed_string, - ); - } - } -} +pub use get_permutated_index::get_permutated_index; +pub use shuffle_list::shuffle_list; diff --git a/eth2/utils/swap_or_not_shuffle/src/shuffle_list.rs b/eth2/utils/swap_or_not_shuffle/src/shuffle_list.rs new file mode 100644 index 000000000..e7e1e18e6 --- /dev/null +++ b/eth2/utils/swap_or_not_shuffle/src/shuffle_list.rs @@ -0,0 +1,178 @@ +use bytes::Buf; +use hashing::hash; +use int_to_bytes::int_to_bytes4; +use std::io::Cursor; + +const SEED_SIZE: usize = 32; +const ROUND_SIZE: usize = 1; +const POSITION_WINDOW_SIZE: usize = 4; +const PIVOT_VIEW_SIZE: usize = SEED_SIZE + ROUND_SIZE; +const TOTAL_SIZE: usize = SEED_SIZE + ROUND_SIZE + POSITION_WINDOW_SIZE; + +/// Shuffles an entire list in-place. +/// +/// Note: this is equivalent to the `get_permutated_index` function, except it shuffles an entire +/// list not just a single index. With large lists this function has been observed to be 250x +/// faster than running `get_permutated_index` across an entire list. +/// +/// Credits to [@protolambda](https://github.com/protolambda) for defining this algorithm. +/// +/// Shuffles if `forwards == true`, otherwise un-shuffles. +/// +/// Returns `None` under any of the following conditions: +/// - `list_size == 0` +/// - `list_size > 2**24` +/// - `list_size > usize::max_value() / 2` +pub fn shuffle_list( + mut input: Vec, + rounds: u8, + seed: &[u8], + forwards: bool, +) -> Option> { + let list_size = input.len(); + + if input.is_empty() + || list_size > usize::max_value() / 2 + || list_size > 2_usize.pow(24) + || rounds == 0 + { + return None; + } + + let mut buf: Vec = Vec::with_capacity(TOTAL_SIZE); + + let mut r = if forwards { 0 } else { rounds - 1 }; + + buf.extend_from_slice(seed); + + loop { + buf.splice(SEED_SIZE.., vec![r]); + + let pivot = bytes_to_int64(&hash(&buf[0..PIVOT_VIEW_SIZE])[0..8]) as usize % list_size; + + let mirror = (pivot + 1) >> 1; + + buf.splice(PIVOT_VIEW_SIZE.., int_to_bytes4((pivot >> 8) as u32)); + let mut source = hash(&buf[..]); + let mut byte_v = source[(pivot & 0xff) >> 3]; + + for i in 0..mirror { + let j = pivot - i; + + if j & 0xff == 0xff { + buf.splice(PIVOT_VIEW_SIZE.., int_to_bytes4((j >> 8) as u32)); + source = hash(&buf[..]); + } + + if j & 0x07 == 0x07 { + byte_v = source[(j & 0xff) >> 3]; + } + let bit_v = (byte_v >> (j & 0x07)) & 0x01; + + if bit_v == 1 { + input.swap(i, j); + } + } + + let mirror = (pivot + list_size + 1) >> 1; + let end = list_size - 1; + + buf.splice(PIVOT_VIEW_SIZE.., int_to_bytes4((end >> 8) as u32)); + let mut source = hash(&buf[..]); + let mut byte_v = source[(end & 0xff) >> 3]; + + for (loop_iter, i) in ((pivot + 1)..mirror).enumerate() { + let j = end - loop_iter; + + if j & 0xff == 0xff { + buf.splice(PIVOT_VIEW_SIZE.., int_to_bytes4((j >> 8) as u32)); + source = hash(&buf[..]); + } + + if j & 0x07 == 0x07 { + byte_v = source[(j & 0xff) >> 3]; + } + let bit_v = (byte_v >> (j & 0x07)) & 0x01; + + if bit_v == 1 { + input.swap(i, j); + } + } + + if forwards { + r += 1; + if r == rounds { + break; + } + } else { + if r == 0 { + break; + } + r -= 1; + } + } + + Some(input) +} + +fn bytes_to_int64(bytes: &[u8]) -> u64 { + let mut cursor = Cursor::new(bytes); + cursor.get_u64_le() +} + +#[cfg(test)] +mod tests { + use super::*; + use hex; + use std::{fs::File, io::prelude::*, path::PathBuf}; + use yaml_rust::yaml; + + #[test] + fn returns_none_for_zero_length_list() { + assert_eq!(None, shuffle_list(vec![], 90, &[42, 42], true)); + } + + #[test] + fn test_vectors() { + let mut file = { + let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + file_path_buf.push("src/specs/test_vector_permutated_index.yml"); + + File::open(file_path_buf).unwrap() + }; + + let mut yaml_str = String::new(); + + file.read_to_string(&mut yaml_str).unwrap(); + + let docs = yaml::YamlLoader::load_from_str(&yaml_str).unwrap(); + let doc = &docs[0]; + let test_cases = doc["test_cases"].as_vec().unwrap(); + + for (i, test_case) in test_cases.iter().enumerate() { + let index = test_case["index"].as_i64().unwrap() as usize; + let list_size = test_case["list_size"].as_i64().unwrap() as usize; + let permutated_index = test_case["permutated_index"].as_i64().unwrap() as usize; + let shuffle_round_count = test_case["shuffle_round_count"].as_i64().unwrap(); + let seed_string = test_case["seed"].clone().into_string().unwrap(); + let seed = hex::decode(seed_string.replace("0x", "")).unwrap(); + + let shuffle_round_count = if shuffle_round_count < (u8::max_value() as i64) { + shuffle_round_count as u8 + } else { + panic!("shuffle_round_count must be a u8") + }; + + let list: Vec = (0..list_size).collect(); + + let shuffled = + shuffle_list(list.clone(), shuffle_round_count, &seed[..], true).unwrap(); + + assert_eq!( + list[index], shuffled[permutated_index], + "Failure on case #{} index: {}, list_size: {}, round_count: {}, seed: {}", + i, index, list_size, shuffle_round_count, seed_string + ); + } + } +} diff --git a/eth2/utils/test_random_derive/Cargo.toml b/eth2/utils/test_random_derive/Cargo.toml new file mode 100644 index 000000000..4559befaf --- /dev/null +++ b/eth2/utils/test_random_derive/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "test_random_derive" +version = "0.1.0" +authors = ["thojest "] +edition = "2018" +description = "Procedural derive macros for implementation of TestRandom trait" + +[lib] +proc-macro = true + +[dependencies] +syn = "0.15" +quote = "0.6" diff --git a/eth2/utils/test_random_derive/src/lib.rs b/eth2/utils/test_random_derive/src/lib.rs new file mode 100644 index 000000000..9a456606c --- /dev/null +++ b/eth2/utils/test_random_derive/src/lib.rs @@ -0,0 +1,43 @@ +extern crate proc_macro; + +use crate::proc_macro::TokenStream; +use quote::quote; +use syn::{parse_macro_input, DeriveInput}; + +#[proc_macro_derive(TestRandom)] +pub fn test_random_derive(input: TokenStream) -> TokenStream { + let derived_input = parse_macro_input!(input as DeriveInput); + let name = &derived_input.ident; + + let struct_data = match &derived_input.data { + syn::Data::Struct(s) => s, + _ => panic!("test_random_derive only supports structs."), + }; + + let field_names = get_named_field_idents(&struct_data); + + let output = quote! { + impl TestRandom for #name { + fn random_for_test(rng: &mut T) -> Self { + Self { + #( + #field_names: <_>::random_for_test(rng), + )* + } + } + } + }; + + output.into() +} + +fn get_named_field_idents(struct_data: &syn::DataStruct) -> Vec<(&syn::Ident)> { + struct_data + .fields + .iter() + .map(|f| match &f.ident { + Some(ref ident) => ident, + _ => panic!("test_random_derive only supports named struct fields."), + }) + .collect() +} diff --git a/validator_client/src/block_producer_service/beacon_block_grpc_client.rs b/validator_client/src/block_producer_service/beacon_block_grpc_client.rs index 6bf3005d4..6ce5c0fa0 100644 --- a/validator_client/src/block_producer_service/beacon_block_grpc_client.rs +++ b/validator_client/src/block_producer_service/beacon_block_grpc_client.rs @@ -63,7 +63,8 @@ impl BeaconNode for BeaconBlockGrpcClient { attester_slashings: vec![], attestations: vec![], deposits: vec![], - exits: vec![], + voluntary_exits: vec![], + transfers: vec![], }, })) } else { diff --git a/validator_client/src/block_producer_service/mod.rs b/validator_client/src/block_producer_service/mod.rs index bd1e691cb..91e7606a7 100644 --- a/validator_client/src/block_producer_service/mod.rs +++ b/validator_client/src/block_producer_service/mod.rs @@ -50,6 +50,9 @@ impl BlockProducerServi Ok(BlockProducerPollOutcome::ValidatorIsUnknown(slot)) => { error!(self.log, "The Beacon Node does not recognise the validator"; "slot" => slot) } + Ok(BlockProducerPollOutcome::UnableToGetFork(slot)) => { + error!(self.log, "Unable to get a `Fork` struct to generate signature domains"; "slot" => slot) + } }; std::thread::sleep(Duration::from_millis(self.poll_interval_millis)); diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 104a4bbe6..68405ed2f 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -1,11 +1,13 @@ use std::fs; use std::path::PathBuf; +use types::ChainSpec; /// Stores the core configuration for this validator instance. #[derive(Clone)] pub struct ClientConfig { pub data_dir: PathBuf, pub server: String, + pub spec: ChainSpec, } const DEFAULT_LIGHTHOUSE_DIR: &str = ".lighthouse-validators"; @@ -20,6 +22,11 @@ impl ClientConfig { fs::create_dir_all(&data_dir) .unwrap_or_else(|_| panic!("Unable to create {:?}", &data_dir)); let server = "localhost:50051".to_string(); - Self { data_dir, server } + let spec = ChainSpec::foundation(); + Self { + data_dir, + server, + spec, + } } } diff --git a/validator_client/src/duties/epoch_duties.rs b/validator_client/src/duties/epoch_duties.rs index 54a882f8d..35668b4a9 100644 --- a/validator_client/src/duties/epoch_duties.rs +++ b/validator_client/src/duties/epoch_duties.rs @@ -1,7 +1,7 @@ use block_proposer::{DutiesReader, DutiesReaderError}; use std::collections::HashMap; use std::sync::RwLock; -use types::{Epoch, Slot}; +use types::{Epoch, Fork, Slot}; /// The information required for a validator to propose and attest during some epoch. /// @@ -32,14 +32,14 @@ pub enum EpochDutiesMapError { /// Maps an `epoch` to some `EpochDuties` for a single validator. pub struct EpochDutiesMap { - pub epoch_length: u64, + pub slots_per_epoch: u64, pub map: RwLock>, } impl EpochDutiesMap { - pub fn new(epoch_length: u64) -> Self { + pub fn new(slots_per_epoch: u64) -> Self { Self { - epoch_length, + slots_per_epoch, map: RwLock::new(HashMap::new()), } } @@ -67,7 +67,7 @@ impl EpochDutiesMap { impl DutiesReader for EpochDutiesMap { fn is_block_production_slot(&self, slot: Slot) -> Result { - let epoch = slot.epoch(self.epoch_length); + let epoch = slot.epoch(self.slots_per_epoch); let map = self.map.read().map_err(|_| DutiesReaderError::Poisoned)?; let duties = map @@ -75,6 +75,17 @@ impl DutiesReader for EpochDutiesMap { .ok_or_else(|| DutiesReaderError::UnknownEpoch)?; Ok(duties.is_block_production_slot(slot)) } + + fn fork(&self) -> Result { + // TODO: this is garbage data. + // + // It will almost certainly cause signatures to fail verification. + Ok(Fork { + previous_version: 0, + current_version: 0, + epoch: Epoch::new(0), + }) + } } // TODO: add tests. diff --git a/validator_client/src/duties/mod.rs b/validator_client/src/duties/mod.rs index febab4755..29bd81d0a 100644 --- a/validator_client/src/duties/mod.rs +++ b/validator_client/src/duties/mod.rs @@ -61,7 +61,7 @@ impl DutiesManager { .map_err(|_| Error::SlotClockError)? .ok_or(Error::SlotUnknowable)?; - let epoch = slot.epoch(self.spec.epoch_length); + let epoch = slot.epoch(self.spec.slots_per_epoch); if let Some(duties) = self.beacon_node.request_shuffling(epoch, &self.pubkey)? { // If these duties were known, check to see if they're updates or identical. @@ -112,7 +112,7 @@ mod tests { #[test] pub fn polling() { let spec = Arc::new(ChainSpec::foundation()); - let duties_map = Arc::new(EpochDutiesMap::new(spec.epoch_length)); + let duties_map = Arc::new(EpochDutiesMap::new(spec.slots_per_epoch)); let keypair = Keypair::random(); let slot_clock = Arc::new(TestingSlotClock::new(0)); let beacon_node = Arc::new(TestBeaconNode::default()); diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index c835300b5..ebab8538c 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -43,6 +43,16 @@ fn main() { .help("Address to connect to BeaconNode.") .takes_value(true), ) + .arg( + Arg::with_name("spec") + .long("spec") + .value_name("spec") + .short("s") + .help("Configuration of Beacon Chain") + .takes_value(true) + .possible_values(&["foundation", "few_validators"]) + .default_value("foundation"), + ) .get_matches(); let mut config = ClientConfig::default(); @@ -62,6 +72,17 @@ fn main() { } } + // TODO: Permit loading a custom spec from file. + // Custom spec + if let Some(spec_str) = matches.value_of("spec") { + match spec_str { + "foundation" => config.spec = ChainSpec::foundation(), + "few_validators" => config.spec = ChainSpec::few_validators(), + // Should be impossible due to clap's `possible_values(..)` function. + _ => unreachable!(), + }; + } + // Log configuration info!(log, ""; "data_dir" => &config.data_dir.to_str(), @@ -81,11 +102,8 @@ fn main() { Arc::new(ValidatorServiceClient::new(ch)) }; - // Ethereum - // - // TODO: Permit loading a custom spec from file. - // https://github.com/sigp/lighthouse/issues/160 - let spec = Arc::new(ChainSpec::foundation()); + // Spec + let spec = Arc::new(config.spec.clone()); // Clock for determining the present slot. // TODO: this shouldn't be a static time, instead it should be pulled from the beacon node. @@ -93,13 +111,13 @@ fn main() { let genesis_time = 1_549_935_547; let slot_clock = { info!(log, "Genesis time"; "unix_epoch_seconds" => genesis_time); - let clock = SystemTimeSlotClock::new(genesis_time, spec.slot_duration) + let clock = SystemTimeSlotClock::new(genesis_time, spec.seconds_per_slot) .expect("Unable to instantiate SystemTimeSlotClock."); Arc::new(clock) }; - let poll_interval_millis = spec.slot_duration * 1000 / 10; // 10% epoch time precision. - info!(log, "Starting block producer service"; "polls_per_epoch" => spec.slot_duration * 1000 / poll_interval_millis); + let poll_interval_millis = spec.seconds_per_slot * 1000 / 10; // 10% epoch time precision. + info!(log, "Starting block producer service"; "polls_per_epoch" => spec.seconds_per_slot * 1000 / poll_interval_millis); /* * Start threads. @@ -111,7 +129,7 @@ fn main() { for keypair in keypairs { info!(log, "Starting validator services"; "validator" => keypair.pk.concatenated_hex_id()); - let duties_map = Arc::new(EpochDutiesMap::new(spec.epoch_length)); + let duties_map = Arc::new(EpochDutiesMap::new(spec.slots_per_epoch)); // Spawn a new thread to maintain the validator's `EpochDuties`. let duties_manager_thread = {