Merge pull request #283 from sigp/v0.4.0-types

v0.4.0 spec update
This commit is contained in:
Age Manning 2019-03-08 11:13:05 +11:00 committed by GitHub
commit 3cf2359244
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
97 changed files with 4565 additions and 2746 deletions

View File

@ -1,10 +1,8 @@
use log::trace; use log::trace;
use state_processing::validate_attestation_without_signature; use ssz::TreeHash;
use state_processing::per_block_processing::validate_attestation_without_signature;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use types::{ use types::*;
AggregateSignature, Attestation, AttestationData, BeaconState, BeaconStateError, Bitfield,
ChainSpec, FreeAttestation, Signature,
};
const PHASE_0_CUSTODY_BIT: bool = false; const PHASE_0_CUSTODY_BIT: bool = false;
@ -84,11 +82,11 @@ impl AttestationAggregator {
/// - The signature is verified against that of the validator at `validator_index`. /// - The signature is verified against that of the validator at `validator_index`.
pub fn process_free_attestation( pub fn process_free_attestation(
&mut self, &mut self,
cached_state: &BeaconState, state: &BeaconState,
free_attestation: &FreeAttestation, free_attestation: &FreeAttestation,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Outcome, BeaconStateError> { ) -> Result<Outcome, BeaconStateError> {
let attestation_duties = match cached_state.attestation_slot_and_shard_for_validator( let attestation_duties = match state.attestation_slot_and_shard_for_validator(
free_attestation.validator_index as usize, free_attestation.validator_index as usize,
spec, spec,
) { ) {
@ -119,9 +117,13 @@ impl AttestationAggregator {
invalid_outcome!(Message::BadShard); invalid_outcome!(Message::BadShard);
} }
let signable_message = free_attestation.data.signable_message(PHASE_0_CUSTODY_BIT); let signable_message = AttestationDataAndCustodyBit {
data: free_attestation.data.clone(),
custody_bit: PHASE_0_CUSTODY_BIT,
}
.hash_tree_root();
let validator_record = match cached_state let validator_record = match state
.validator_registry .validator_registry
.get(free_attestation.validator_index as usize) .get(free_attestation.validator_index as usize)
{ {
@ -131,9 +133,7 @@ impl AttestationAggregator {
if !free_attestation.signature.verify( if !free_attestation.signature.verify(
&signable_message, &signable_message,
cached_state spec.get_domain(state.current_epoch(spec), Domain::Attestation, &state.fork),
.fork
.get_domain(cached_state.current_epoch(spec), spec.domain_attestation),
&validator_record.pubkey, &validator_record.pubkey,
) { ) {
invalid_outcome!(Message::BadSignature); invalid_outcome!(Message::BadSignature);

View File

@ -1,5 +1,6 @@
use crate::attestation_aggregator::{AttestationAggregator, Outcome as AggregationOutcome}; use crate::attestation_aggregator::{AttestationAggregator, Outcome as AggregationOutcome};
use crate::checkpoint::CheckPoint; use crate::checkpoint::CheckPoint;
use crate::errors::{BeaconChainError as Error, BlockProductionError};
use db::{ use db::{
stores::{BeaconBlockStore, BeaconStateStore}, stores::{BeaconBlockStore, BeaconStateStore},
ClientDB, DBError, ClientDB, DBError,
@ -10,7 +11,8 @@ use parking_lot::{RwLock, RwLockReadGuard};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use ssz::ssz_encode; use ssz::ssz_encode;
use state_processing::{ use state_processing::{
BlockProcessable, BlockProcessingError, SlotProcessable, SlotProcessingError, per_block_processing, per_block_processing_without_verifying_block_signature,
per_slot_processing, BlockProcessingError, SlotProcessingError,
}; };
use std::sync::Arc; use std::sync::Arc;
use types::{ use types::{
@ -18,18 +20,6 @@ use types::{
*, *,
}; };
#[derive(Debug, PartialEq)]
pub enum Error {
InsufficientValidators,
BadRecentBlockRoots,
BeaconStateError(BeaconStateError),
DBInconsistent(String),
DBError(String),
ForkChoiceError(ForkChoiceError),
MissingBeaconBlock(Hash256),
MissingBeaconState(Hash256),
}
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum ValidBlock { pub enum ValidBlock {
/// The block was successfully processed. /// The block was successfully processed.
@ -65,7 +55,8 @@ pub struct BeaconChain<T: ClientDB + Sized, U: SlotClock, F: ForkChoice> {
pub slot_clock: U, pub slot_clock: U,
pub attestation_aggregator: RwLock<AttestationAggregator>, pub attestation_aggregator: RwLock<AttestationAggregator>,
pub deposits_for_inclusion: RwLock<Vec<Deposit>>, pub deposits_for_inclusion: RwLock<Vec<Deposit>>,
pub exits_for_inclusion: RwLock<Vec<Exit>>, pub exits_for_inclusion: RwLock<Vec<VoluntaryExit>>,
pub transfers_for_inclusion: RwLock<Vec<Transfer>>,
pub proposer_slashings_for_inclusion: RwLock<Vec<ProposerSlashing>>, pub proposer_slashings_for_inclusion: RwLock<Vec<ProposerSlashing>>,
pub attester_slashings_for_inclusion: RwLock<Vec<AttesterSlashing>>, pub attester_slashings_for_inclusion: RwLock<Vec<AttesterSlashing>>,
canonical_head: RwLock<CheckPoint>, canonical_head: RwLock<CheckPoint>,
@ -82,6 +73,7 @@ where
F: ForkChoice, F: ForkChoice,
{ {
/// Instantiate a new Beacon Chain, from genesis. /// Instantiate a new Beacon Chain, from genesis.
#[allow(clippy::too_many_arguments)] // Will be re-factored in the coming weeks.
pub fn genesis( pub fn genesis(
state_store: Arc<BeaconStateStore<T>>, state_store: Arc<BeaconStateStore<T>>,
block_store: Arc<BeaconBlockStore<T>>, block_store: Arc<BeaconBlockStore<T>>,
@ -136,6 +128,7 @@ where
attestation_aggregator, attestation_aggregator,
deposits_for_inclusion: RwLock::new(vec![]), deposits_for_inclusion: RwLock::new(vec![]),
exits_for_inclusion: RwLock::new(vec![]), exits_for_inclusion: RwLock::new(vec![]),
transfers_for_inclusion: RwLock::new(vec![]),
proposer_slashings_for_inclusion: RwLock::new(vec![]), proposer_slashings_for_inclusion: RwLock::new(vec![]),
attester_slashings_for_inclusion: RwLock::new(vec![]), attester_slashings_for_inclusion: RwLock::new(vec![]),
state: RwLock::new(genesis_state), state: RwLock::new(genesis_state),
@ -214,9 +207,7 @@ where
let state_slot = self.state.read().slot; let state_slot = self.state.read().slot;
let head_block_root = self.head().beacon_block_root; let head_block_root = self.head().beacon_block_root;
for _ in state_slot.as_u64()..slot.as_u64() { for _ in state_slot.as_u64()..slot.as_u64() {
self.state per_slot_processing(&mut *self.state.write(), head_block_root, &self.spec)?;
.write()
.per_slot_processing(head_block_root, &self.spec)?;
} }
Ok(()) Ok(())
} }
@ -314,7 +305,7 @@ where
.state .state
.read() .read()
.get_block_root( .get_block_root(
justified_epoch.start_slot(self.spec.epoch_length), justified_epoch.start_slot(self.spec.slots_per_epoch),
&self.spec, &self.spec,
) )
.ok_or_else(|| Error::BadRecentBlockRoots)?; .ok_or_else(|| Error::BadRecentBlockRoots)?;
@ -333,10 +324,10 @@ where
shard, shard,
beacon_block_root: self.head().beacon_block_root, beacon_block_root: self.head().beacon_block_root,
epoch_boundary_root, epoch_boundary_root,
shard_block_root: Hash256::zero(), crosslink_data_root: Hash256::zero(),
latest_crosslink: Crosslink { latest_crosslink: Crosslink {
epoch: self.state.read().slot.epoch(self.spec.epoch_length), epoch: self.state.read().slot.epoch(self.spec.slots_per_epoch),
shard_block_root: Hash256::zero(), crosslink_data_root: Hash256::zero(),
}, },
justified_epoch, justified_epoch,
justified_block_root, justified_block_root,
@ -411,7 +402,7 @@ where
} }
/// Accept some exit and queue it for inclusion in an appropriate block. /// Accept some exit and queue it for inclusion in an appropriate block.
pub fn receive_exit_for_inclusion(&self, exit: Exit) { pub fn receive_exit_for_inclusion(&self, exit: VoluntaryExit) {
// TODO: exits are not checked for validity; check them. // TODO: exits are not checked for validity; check them.
// //
// https://github.com/sigp/lighthouse/issues/276 // https://github.com/sigp/lighthouse/issues/276
@ -419,7 +410,7 @@ where
} }
/// Return a vec of exits suitable for inclusion in some block. /// Return a vec of exits suitable for inclusion in some block.
pub fn get_exits_for_block(&self) -> Vec<Exit> { pub fn get_exits_for_block(&self) -> Vec<VoluntaryExit> {
// TODO: exits are indiscriminately included; check them for validity. // TODO: exits are indiscriminately included; check them for validity.
// //
// https://github.com/sigp/lighthouse/issues/275 // https://github.com/sigp/lighthouse/issues/275
@ -430,7 +421,7 @@ where
/// inclusion queue. /// inclusion queue.
/// ///
/// This ensures that `Deposits` are not included twice in successive blocks. /// This ensures that `Deposits` are not included twice in successive blocks.
pub fn set_exits_as_included(&self, included_exits: &[Exit]) { pub fn set_exits_as_included(&self, included_exits: &[VoluntaryExit]) {
// TODO: method does not take forks into account; consider this. // TODO: method does not take forks into account; consider this.
let mut indices_to_delete = vec![]; let mut indices_to_delete = vec![];
@ -448,6 +439,44 @@ where
} }
} }
/// Accept some transfer and queue it for inclusion in an appropriate block.
pub fn receive_transfer_for_inclusion(&self, transfer: Transfer) {
// TODO: transfers are not checked for validity; check them.
//
// https://github.com/sigp/lighthouse/issues/276
self.transfers_for_inclusion.write().push(transfer);
}
/// Return a vec of transfers suitable for inclusion in some block.
pub fn get_transfers_for_block(&self) -> Vec<Transfer> {
// TODO: transfers are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.transfers_for_inclusion.read().clone()
}
/// Takes a list of `Deposits` that were included in recent blocks and removes them from the
/// inclusion queue.
///
/// This ensures that `Deposits` are not included twice in successive blocks.
pub fn set_transfers_as_included(&self, included_transfers: &[Transfer]) {
// TODO: method does not take forks into account; consider this.
let mut indices_to_delete = vec![];
for included in included_transfers {
for (i, for_inclusion) in self.transfers_for_inclusion.read().iter().enumerate() {
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let transfers_for_inclusion = &mut self.transfers_for_inclusion.write();
for i in indices_to_delete {
transfers_for_inclusion.remove(i);
}
}
/// Accept some proposer slashing and queue it for inclusion in an appropriate block. /// Accept some proposer slashing and queue it for inclusion in an appropriate block.
pub fn receive_proposer_slashing_for_inclusion(&self, proposer_slashing: ProposerSlashing) { pub fn receive_proposer_slashing_for_inclusion(&self, proposer_slashing: ProposerSlashing) {
// TODO: proposer_slashings are not checked for validity; check them. // TODO: proposer_slashings are not checked for validity; check them.
@ -647,7 +676,7 @@ where
// Transition the parent state to the present slot. // Transition the parent state to the present slot.
let mut state = parent_state; let mut state = parent_state;
for _ in state.slot.as_u64()..present_slot.as_u64() { for _ in state.slot.as_u64()..present_slot.as_u64() {
if let Err(e) = state.per_slot_processing(parent_block_root, &self.spec) { if let Err(e) = per_slot_processing(&mut state, parent_block_root, &self.spec) {
return Ok(BlockProcessingOutcome::InvalidBlock( return Ok(BlockProcessingOutcome::InvalidBlock(
InvalidBlock::SlotProcessingError(e), InvalidBlock::SlotProcessingError(e),
)); ));
@ -656,7 +685,7 @@ where
// Apply the received block to its parent state (which has been transitioned into this // Apply the received block to its parent state (which has been transitioned into this
// slot). // slot).
if let Err(e) = state.per_block_processing(&block, &self.spec) { if let Err(e) = per_block_processing(&mut state, &block, &self.spec) {
return Ok(BlockProcessingOutcome::InvalidBlock( return Ok(BlockProcessingOutcome::InvalidBlock(
InvalidBlock::PerBlockProcessingError(e), InvalidBlock::PerBlockProcessingError(e),
)); ));
@ -676,6 +705,8 @@ where
// Update the inclusion queues so they aren't re-submitted. // Update the inclusion queues so they aren't re-submitted.
self.set_deposits_as_included(&block.body.deposits[..]); self.set_deposits_as_included(&block.body.deposits[..]);
self.set_transfers_as_included(&block.body.transfers[..]);
self.set_exits_as_included(&block.body.voluntary_exits[..]);
self.set_proposer_slashings_as_included(&block.body.proposer_slashings[..]); self.set_proposer_slashings_as_included(&block.body.proposer_slashings[..]);
self.set_attester_slashings_as_included(&block.body.attester_slashings[..]); self.set_attester_slashings_as_included(&block.body.attester_slashings[..]);
@ -701,7 +732,10 @@ where
/// ///
/// The produced block will not be inherently valid, it must be signed by a block producer. /// The produced block will not be inherently valid, it must be signed by a block producer.
/// Block signing is out of the scope of this function and should be done by a separate program. /// Block signing is out of the scope of this function and should be done by a separate program.
pub fn produce_block(&self, randao_reveal: Signature) -> Option<(BeaconBlock, BeaconState)> { pub fn produce_block(
&self,
randao_reveal: Signature,
) -> Result<(BeaconBlock, BeaconState), BlockProductionError> {
debug!("Producing block at slot {}...", self.state.read().slot); debug!("Producing block at slot {}...", self.state.read().slot);
let mut state = self.state.read().clone(); let mut state = self.state.read().clone();
@ -718,7 +752,9 @@ where
attestations.len() attestations.len()
); );
let parent_root = *state.get_block_root(state.slot.saturating_sub(1_u64), &self.spec)?; let parent_root = *state
.get_block_root(state.slot.saturating_sub(1_u64), &self.spec)
.ok_or_else(|| BlockProductionError::UnableToGetBlockRootFromState)?;
let mut block = BeaconBlock { let mut block = BeaconBlock {
slot: state.slot, slot: state.slot,
@ -736,27 +772,20 @@ where
attester_slashings: self.get_attester_slashings_for_block(), attester_slashings: self.get_attester_slashings_for_block(),
attestations, attestations,
deposits: self.get_deposits_for_block(), deposits: self.get_deposits_for_block(),
exits: self.get_exits_for_block(), voluntary_exits: self.get_exits_for_block(),
transfers: self.get_transfers_for_block(),
}, },
}; };
trace!("BeaconChain::produce_block: updating state for new block.",); trace!("BeaconChain::produce_block: updating state for new block.",);
let result = per_block_processing_without_verifying_block_signature(&mut state, &block, &self.spec)?;
state.per_block_processing_without_verifying_block_signature(&block, &self.spec);
debug!(
"BeaconNode::produce_block: state processing result: {:?}",
result
);
result.ok()?;
let state_root = state.canonical_root(); let state_root = state.canonical_root();
block.state_root = state_root; block.state_root = state_root;
trace!("Block produced."); Ok((block, state))
Some((block, state))
} }
// TODO: Left this as is, modify later // TODO: Left this as is, modify later

View File

@ -0,0 +1,33 @@
use fork_choice::ForkChoiceError;
use state_processing::BlockProcessingError;
use types::*;
macro_rules! easy_from_to {
($from: ident, $to: ident) => {
impl From<$from> for $to {
fn from(e: $from) -> $to {
$to::$from(e)
}
}
};
}
#[derive(Debug, PartialEq)]
pub enum BeaconChainError {
InsufficientValidators,
BadRecentBlockRoots,
BeaconStateError(BeaconStateError),
DBInconsistent(String),
DBError(String),
ForkChoiceError(ForkChoiceError),
MissingBeaconBlock(Hash256),
MissingBeaconState(Hash256),
}
#[derive(Debug, PartialEq)]
pub enum BlockProductionError {
UnableToGetBlockRootFromState,
BlockProcessingError(BlockProcessingError),
}
easy_from_to!(BlockProcessingError, BlockProductionError);

View File

@ -1,9 +1,9 @@
mod attestation_aggregator; mod attestation_aggregator;
mod beacon_chain; mod beacon_chain;
mod checkpoint; mod checkpoint;
mod errors;
pub use self::beacon_chain::{ pub use self::beacon_chain::{BeaconChain, BlockProcessingOutcome, InvalidBlock, ValidBlock};
BeaconChain, BlockProcessingOutcome, Error, InvalidBlock, ValidBlock,
};
pub use self::checkpoint::CheckPoint; pub use self::checkpoint::CheckPoint;
pub use self::errors::BeaconChainError;
pub use fork_choice::{ForkChoice, ForkChoiceAlgorithm, ForkChoiceError}; pub use fork_choice::{ForkChoice, ForkChoiceAlgorithm, ForkChoiceError};

View File

@ -27,7 +27,7 @@ fork: tchaikovsky
version: 1.0 version: 1.0
test_cases: test_cases:
- config: - config:
epoch_length: 64 slots_per_epoch: 64
deposits_for_chain_start: 1000 deposits_for_chain_start: 1000
num_slots: 64 num_slots: 64
skip_slots: [2, 3] skip_slots: [2, 3]

View File

@ -11,7 +11,7 @@ fn mid_epoch_state_transition(c: &mut Criterion) {
let validator_count = 1000; let validator_count = 1000;
let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count); let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count);
let epoch_depth = (rig.spec.epoch_length * 2) + (rig.spec.epoch_length / 2); let epoch_depth = (rig.spec.slots_per_epoch * 2) + (rig.spec.slots_per_epoch / 2);
for _ in 0..epoch_depth { for _ in 0..epoch_depth {
rig.advance_chain_with_block(); rig.advance_chain_with_block();
@ -19,7 +19,7 @@ fn mid_epoch_state_transition(c: &mut Criterion) {
let state = rig.beacon_chain.state.read().clone(); let state = rig.beacon_chain.state.read().clone();
assert!((state.slot + 1) % rig.spec.epoch_length != 0); assert!((state.slot + 1) % rig.spec.slots_per_epoch != 0);
c.bench_function("mid-epoch state transition 10k validators", move |b| { c.bench_function("mid-epoch state transition 10k validators", move |b| {
let state = state.clone(); let state = state.clone();
@ -36,7 +36,7 @@ fn epoch_boundary_state_transition(c: &mut Criterion) {
let validator_count = 10000; let validator_count = 10000;
let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count); let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count);
let epoch_depth = rig.spec.epoch_length * 2; let epoch_depth = rig.spec.slots_per_epoch * 2;
for _ in 0..(epoch_depth - 1) { for _ in 0..(epoch_depth - 1) {
rig.advance_chain_with_block(); rig.advance_chain_with_block();
@ -44,7 +44,7 @@ fn epoch_boundary_state_transition(c: &mut Criterion) {
let state = rig.beacon_chain.state.read().clone(); let state = rig.beacon_chain.state.read().clone();
assert_eq!((state.slot + 1) % rig.spec.epoch_length, 0); assert_eq!((state.slot + 1) % rig.spec.slots_per_epoch, 0);
c.bench( c.bench(
"routines", "routines",

View File

@ -5,23 +5,28 @@ fork: tchaikovsky
version: 1.0 version: 1.0
test_cases: test_cases:
- config: - config:
epoch_length: 64 slots_per_epoch: 64
deposits_for_chain_start: 1000 deposits_for_chain_start: 1000
num_slots: 64 num_slots: 64
skip_slots: [2, 3] skip_slots: [2, 3]
deposits: deposits:
# At slot 1, create a new validator deposit of 32 ETH. # At slot 1, create a new validator deposit of 5 ETH.
- slot: 1 - slot: 1
amount: 32 amount: 5000000000
# Trigger more deposits... # Trigger more deposits...
- slot: 3 - slot: 3
amount: 32 amount: 5000000000
- slot: 5 - slot: 5
amount: 32 amount: 32000000000
exits: exits:
# At slot 10, submit an exit for validator #50. # At slot 10, submit an exit for validator #50.
- slot: 10 - slot: 10
validator_index: 50 validator_index: 50
transfers:
- slot: 6
from: 1000
to: 1001
amount: 5000000000
proposer_slashings: proposer_slashings:
# At slot 2, trigger a proposer slashing for validator #42. # At slot 2, trigger a proposer slashing for validator #42.
- slot: 2 - slot: 2
@ -44,4 +49,11 @@ test_cases:
slashed_validators: [11, 12, 13, 14, 42] slashed_validators: [11, 12, 13, 14, 42]
exited_validators: [] exited_validators: []
exit_initiated_validators: [50] exit_initiated_validators: [50]
balances:
- validator_index: 1000
comparison: "eq"
balance: 0
- validator_index: 1001
comparison: "eq"
balance: 10000000000

View File

@ -1,7 +1,7 @@
use super::ValidatorHarness; use super::ValidatorHarness;
use beacon_chain::{BeaconChain, BlockProcessingOutcome}; use beacon_chain::{BeaconChain, BlockProcessingOutcome};
pub use beacon_chain::{CheckPoint, Error as BeaconChainError}; pub use beacon_chain::{BeaconChainError, CheckPoint};
use bls::create_proof_of_possession; use bls::{create_proof_of_possession, get_withdrawal_credentials};
use db::{ use db::{
stores::{BeaconBlockStore, BeaconStateStore}, stores::{BeaconBlockStore, BeaconStateStore},
MemoryDB, MemoryDB,
@ -67,7 +67,13 @@ impl BeaconChainHarness {
timestamp: genesis_time - 1, timestamp: genesis_time - 1,
deposit_input: DepositInput { deposit_input: DepositInput {
pubkey: keypair.pk.clone(), pubkey: keypair.pk.clone(),
withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. // Validator can withdraw using their main keypair.
withdrawal_credentials: Hash256::from_slice(
&get_withdrawal_credentials(
&keypair.pk,
spec.bls_withdrawal_prefix_byte,
)[..],
),
proof_of_possession: create_proof_of_possession(&keypair), proof_of_possession: create_proof_of_possession(&keypair),
}, },
}, },
@ -125,13 +131,13 @@ impl BeaconChainHarness {
let nth_slot = slot let nth_slot = slot
- slot - slot
.epoch(self.spec.epoch_length) .epoch(self.spec.slots_per_epoch)
.start_slot(self.spec.epoch_length); .start_slot(self.spec.slots_per_epoch);
let nth_epoch = slot.epoch(self.spec.epoch_length) - self.spec.genesis_epoch; let nth_epoch = slot.epoch(self.spec.slots_per_epoch) - self.spec.genesis_epoch;
debug!( debug!(
"Advancing BeaconChain to slot {}, epoch {} (epoch height: {}, slot {} in epoch.).", "Advancing BeaconChain to slot {}, epoch {} (epoch height: {}, slot {} in epoch.).",
slot, slot,
slot.epoch(self.spec.epoch_length), slot.epoch(self.spec.slots_per_epoch),
nth_epoch, nth_epoch,
nth_slot nth_slot
); );
@ -250,16 +256,13 @@ impl BeaconChainHarness {
validator_index: usize, validator_index: usize,
message: &[u8], message: &[u8],
epoch: Epoch, epoch: Epoch,
domain_type: u64, domain_type: Domain,
) -> Option<Signature> { ) -> Option<Signature> {
let validator = self.validators.get(validator_index)?; let validator = self.validators.get(validator_index)?;
let domain = self let domain = self
.beacon_chain .spec
.state .get_domain(epoch, domain_type, &self.beacon_chain.state.read().fork);
.read()
.fork
.get_domain(epoch, domain_type);
Some(Signature::new(message, domain, &validator.keypair.sk)) Some(Signature::new(message, domain, &validator.keypair.sk))
} }
@ -285,10 +288,15 @@ impl BeaconChainHarness {
/// Note: the `ValidatorHarness` for this validator continues to exist. Once it is exited it /// Note: the `ValidatorHarness` for this validator continues to exist. Once it is exited it
/// will stop receiving duties from the beacon chain and just do nothing when prompted to /// will stop receiving duties from the beacon chain and just do nothing when prompted to
/// produce/attest. /// produce/attest.
pub fn add_exit(&mut self, exit: Exit) { pub fn add_exit(&mut self, exit: VoluntaryExit) {
self.beacon_chain.receive_exit_for_inclusion(exit); self.beacon_chain.receive_exit_for_inclusion(exit);
} }
/// Submit an transfer to the `BeaconChain` for inclusion in some block.
pub fn add_transfer(&mut self, transfer: Transfer) {
self.beacon_chain.receive_transfer_for_inclusion(transfer);
}
/// Submit a proposer slashing to the `BeaconChain` for inclusion in some block. /// Submit a proposer slashing to the `BeaconChain` for inclusion in some block.
pub fn add_proposer_slashing(&mut self, proposer_slashing: ProposerSlashing) { pub fn add_proposer_slashing(&mut self, proposer_slashing: ProposerSlashing) {
self.beacon_chain self.beacon_chain

View File

@ -3,9 +3,11 @@
use crate::beacon_chain_harness::BeaconChainHarness; use crate::beacon_chain_harness::BeaconChainHarness;
use beacon_chain::CheckPoint; use beacon_chain::CheckPoint;
use bls::{create_proof_of_possession, get_withdrawal_credentials};
use log::{info, warn}; use log::{info, warn};
use ssz::TreeHash; use ssz::SignedRoot;
use types::*; use types::*;
use types::{ use types::{
attester_slashing::AttesterSlashingBuilder, proposer_slashing::ProposerSlashingBuilder, attester_slashing::AttesterSlashingBuilder, proposer_slashing::ProposerSlashingBuilder,
}; };
@ -55,18 +57,19 @@ impl TestCase {
/// Return a `ChainSpec::foundation()`. /// Return a `ChainSpec::foundation()`.
/// ///
/// If specified in `config`, returns it with a modified `epoch_length`. /// If specified in `config`, returns it with a modified `slots_per_epoch`.
fn spec(&self) -> ChainSpec { fn spec(&self) -> ChainSpec {
let mut spec = ChainSpec::foundation(); let mut spec = ChainSpec::foundation();
if let Some(n) = self.config.epoch_length { if let Some(n) = self.config.slots_per_epoch {
spec.epoch_length = n; spec.slots_per_epoch = n;
} }
spec spec
} }
/// Executes the test case, returning an `ExecutionResult`. /// Executes the test case, returning an `ExecutionResult`.
#[allow(clippy::cyclomatic_complexity)]
pub fn execute(&self) -> ExecutionResult { pub fn execute(&self) -> ExecutionResult {
let spec = self.spec(); let spec = self.spec();
let validator_count = self.config.deposits_for_chain_start; let validator_count = self.config.deposits_for_chain_start;
@ -81,14 +84,20 @@ impl TestCase {
info!("Starting simulation across {} slots...", slots); info!("Starting simulation across {} slots...", slots);
// -1 slots because genesis counts as a slot. // Start at 1 because genesis counts as a slot.
for slot_height in 0..slots - 1 { for slot_height in 1..slots {
// Used to ensure that deposits in the same slot have incremental deposit indices.
let mut deposit_index_offset = 0;
// Feed deposits to the BeaconChain. // Feed deposits to the BeaconChain.
if let Some(ref deposits) = self.config.deposits { if let Some(ref deposits) = self.config.deposits {
for (slot, deposit, keypair) in deposits { for (slot, amount) in deposits {
if *slot == slot_height { if *slot == slot_height {
info!("Including deposit at slot height {}.", slot_height); info!("Including deposit at slot height {}.", slot_height);
harness.add_deposit(deposit.clone(), Some(keypair.clone())); let (deposit, keypair) =
build_deposit(&harness, *amount, deposit_index_offset);
harness.add_deposit(deposit, Some(keypair.clone()));
deposit_index_offset += 1;
} }
} }
} }
@ -136,6 +145,20 @@ impl TestCase {
} }
} }
// Feed transfers to the BeaconChain.
if let Some(ref transfers) = self.config.transfers {
for (slot, from, to, amount) in transfers {
if *slot == slot_height {
info!(
"Including transfer at slot height {} from validator {}.",
slot_height, from
);
let transfer = build_transfer(&harness, *from, *to, *amount);
harness.add_transfer(transfer);
}
}
}
// Build a block or skip a slot. // Build a block or skip a slot.
match self.config.skip_slots { match self.config.skip_slots {
Some(ref skip_slots) if skip_slots.contains(&slot_height) => { Some(ref skip_slots) if skip_slots.contains(&slot_height) => {
@ -189,7 +212,7 @@ impl TestCase {
for state_check in state_checks { for state_check in state_checks {
let adjusted_state_slot = let adjusted_state_slot =
state.slot - spec.genesis_epoch.start_slot(spec.epoch_length); state.slot - spec.genesis_epoch.start_slot(spec.slots_per_epoch);
if state_check.slot == adjusted_state_slot { if state_check.slot == adjusted_state_slot {
state_check.assert_valid(state, spec); state_check.assert_valid(state, spec);
@ -200,29 +223,84 @@ impl TestCase {
} }
} }
fn build_exit(harness: &BeaconChainHarness, validator_index: u64) -> Exit { /// Builds a `Deposit` this is valid for the given `BeaconChainHarness` at its next slot.
fn build_transfer(harness: &BeaconChainHarness, from: u64, to: u64, amount: u64) -> Transfer {
let slot = harness.beacon_chain.state.read().slot + 1;
let mut transfer = Transfer {
from,
to,
amount,
fee: 0,
slot,
pubkey: harness.validators[from as usize].keypair.pk.clone(),
signature: Signature::empty_signature(),
};
let message = transfer.signed_root();
let epoch = slot.epoch(harness.spec.slots_per_epoch);
transfer.signature = harness
.validator_sign(from as usize, &message[..], epoch, Domain::Transfer)
.expect("Unable to sign Transfer");
transfer
}
/// Builds a `Deposit` this is valid for the given `BeaconChainHarness`.
///
/// `index_offset` is used to ensure that `deposit.index == state.index` when adding multiple
/// deposits.
fn build_deposit(
harness: &BeaconChainHarness,
amount: u64,
index_offset: u64,
) -> (Deposit, Keypair) {
let keypair = Keypair::random();
let proof_of_possession = create_proof_of_possession(&keypair);
let index = harness.beacon_chain.state.read().deposit_index + index_offset;
let withdrawal_credentials = Hash256::from_slice(
&get_withdrawal_credentials(&keypair.pk, harness.spec.bls_withdrawal_prefix_byte)[..],
);
let deposit = Deposit {
// Note: `branch` and `index` will need to be updated once the spec defines their
// validity.
branch: vec![],
index,
deposit_data: DepositData {
amount,
timestamp: 1,
deposit_input: DepositInput {
pubkey: keypair.pk.clone(),
withdrawal_credentials,
proof_of_possession,
},
},
};
(deposit, keypair)
}
/// Builds a `VoluntaryExit` this is valid for the given `BeaconChainHarness`.
fn build_exit(harness: &BeaconChainHarness, validator_index: u64) -> VoluntaryExit {
let epoch = harness let epoch = harness
.beacon_chain .beacon_chain
.state .state
.read() .read()
.current_epoch(&harness.spec); .current_epoch(&harness.spec);
let mut exit = Exit { let mut exit = VoluntaryExit {
epoch, epoch,
validator_index, validator_index,
signature: Signature::empty_signature(), signature: Signature::empty_signature(),
}; };
let message = exit.hash_tree_root(); let message = exit.signed_root();
exit.signature = harness exit.signature = harness
.validator_sign( .validator_sign(validator_index as usize, &message[..], epoch, Domain::Exit)
validator_index as usize, .expect("Unable to sign VoluntaryExit");
&message[..],
epoch,
harness.spec.domain_exit,
)
.expect("Unable to sign Exit");
exit exit
} }
@ -234,20 +312,20 @@ fn build_double_vote_attester_slashing(
harness: &BeaconChainHarness, harness: &BeaconChainHarness,
validator_indices: &[u64], validator_indices: &[u64],
) -> AttesterSlashing { ) -> AttesterSlashing {
let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: u64| { let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| {
harness harness
.validator_sign(validator_index as usize, message, epoch, domain) .validator_sign(validator_index as usize, message, epoch, domain)
.expect("Unable to sign AttesterSlashing") .expect("Unable to sign AttesterSlashing")
}; };
AttesterSlashingBuilder::double_vote(validator_indices, signer, &harness.spec) AttesterSlashingBuilder::double_vote(validator_indices, signer)
} }
/// Builds an `ProposerSlashing` for some `validator_index`. /// Builds an `ProposerSlashing` for some `validator_index`.
/// ///
/// Signs the message using a `BeaconChainHarness`. /// Signs the message using a `BeaconChainHarness`.
fn build_proposer_slashing(harness: &BeaconChainHarness, validator_index: u64) -> ProposerSlashing { fn build_proposer_slashing(harness: &BeaconChainHarness, validator_index: u64) -> ProposerSlashing {
let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: u64| { let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| {
harness harness
.validator_sign(validator_index as usize, message, epoch, domain) .validator_sign(validator_index as usize, message, epoch, domain)
.expect("Unable to sign AttesterSlashing") .expect("Unable to sign AttesterSlashing")

View File

@ -1,15 +1,17 @@
use super::yaml_helpers::{as_u64, as_usize, as_vec_u64}; use super::yaml_helpers::{as_u64, as_usize, as_vec_u64};
use bls::create_proof_of_possession;
use types::*; use types::*;
use yaml_rust::Yaml; use yaml_rust::Yaml;
pub type ValidatorIndex = u64; pub type ValidatorIndex = u64;
pub type ValidatorIndices = Vec<u64>; pub type ValidatorIndices = Vec<u64>;
pub type GweiAmount = u64;
pub type DepositTuple = (SlotHeight, Deposit, Keypair); pub type DepositTuple = (SlotHeight, GweiAmount);
pub type ExitTuple = (SlotHeight, ValidatorIndex); pub type ExitTuple = (SlotHeight, ValidatorIndex);
pub type ProposerSlashingTuple = (SlotHeight, ValidatorIndex); pub type ProposerSlashingTuple = (SlotHeight, ValidatorIndex);
pub type AttesterSlashingTuple = (SlotHeight, ValidatorIndices); pub type AttesterSlashingTuple = (SlotHeight, ValidatorIndices);
/// (slot_height, from, to, amount)
pub type TransferTuple = (SlotHeight, ValidatorIndex, ValidatorIndex, GweiAmount);
/// Defines the execution of a `BeaconStateHarness` across a series of slots. /// Defines the execution of a `BeaconStateHarness` across a series of slots.
#[derive(Debug)] #[derive(Debug)]
@ -17,7 +19,7 @@ pub struct Config {
/// Initial validators. /// Initial validators.
pub deposits_for_chain_start: usize, pub deposits_for_chain_start: usize,
/// Number of slots in an epoch. /// Number of slots in an epoch.
pub epoch_length: Option<u64>, pub slots_per_epoch: Option<u64>,
/// Number of slots to build before ending execution. /// Number of slots to build before ending execution.
pub num_slots: u64, pub num_slots: u64,
/// Number of slots that should be skipped due to inactive validator. /// Number of slots that should be skipped due to inactive validator.
@ -30,6 +32,8 @@ pub struct Config {
pub attester_slashings: Option<Vec<AttesterSlashingTuple>>, pub attester_slashings: Option<Vec<AttesterSlashingTuple>>,
/// Exits to be including during execution. /// Exits to be including during execution.
pub exits: Option<Vec<ExitTuple>>, pub exits: Option<Vec<ExitTuple>>,
/// Transfers to be including during execution.
pub transfers: Option<Vec<TransferTuple>>,
} }
impl Config { impl Config {
@ -40,17 +44,34 @@ impl Config {
Self { Self {
deposits_for_chain_start: as_usize(&yaml, "deposits_for_chain_start") deposits_for_chain_start: as_usize(&yaml, "deposits_for_chain_start")
.expect("Must specify validator count"), .expect("Must specify validator count"),
epoch_length: as_u64(&yaml, "epoch_length"), slots_per_epoch: as_u64(&yaml, "slots_per_epoch"),
num_slots: as_u64(&yaml, "num_slots").expect("Must specify `config.num_slots`"), num_slots: as_u64(&yaml, "num_slots").expect("Must specify `config.num_slots`"),
skip_slots: as_vec_u64(yaml, "skip_slots"), skip_slots: as_vec_u64(yaml, "skip_slots"),
deposits: parse_deposits(&yaml), deposits: parse_deposits(&yaml),
proposer_slashings: parse_proposer_slashings(&yaml), proposer_slashings: parse_proposer_slashings(&yaml),
attester_slashings: parse_attester_slashings(&yaml), attester_slashings: parse_attester_slashings(&yaml),
exits: parse_exits(&yaml), exits: parse_exits(&yaml),
transfers: parse_transfers(&yaml),
} }
} }
} }
/// Parse the `transfers` section of the YAML document.
fn parse_transfers(yaml: &Yaml) -> Option<Vec<TransferTuple>> {
let mut tuples = vec![];
for exit in yaml["transfers"].as_vec()? {
let slot = as_u64(exit, "slot").expect("Incomplete transfer (slot)");
let from = as_u64(exit, "from").expect("Incomplete transfer (from)");
let to = as_u64(exit, "to").expect("Incomplete transfer (to)");
let amount = as_u64(exit, "amount").expect("Incomplete transfer (amount)");
tuples.push((SlotHeight::from(slot), from, to, amount));
}
Some(tuples)
}
/// Parse the `attester_slashings` section of the YAML document. /// Parse the `attester_slashings` section of the YAML document.
fn parse_exits(yaml: &Yaml) -> Option<Vec<ExitTuple>> { fn parse_exits(yaml: &Yaml) -> Option<Vec<ExitTuple>> {
let mut tuples = vec![]; let mut tuples = vec![];
@ -101,30 +122,10 @@ fn parse_deposits(yaml: &Yaml) -> Option<Vec<DepositTuple>> {
let mut deposits = vec![]; let mut deposits = vec![];
for deposit in yaml["deposits"].as_vec()? { for deposit in yaml["deposits"].as_vec()? {
let keypair = Keypair::random();
let proof_of_possession = create_proof_of_possession(&keypair);
let slot = as_u64(deposit, "slot").expect("Incomplete deposit (slot)"); let slot = as_u64(deposit, "slot").expect("Incomplete deposit (slot)");
let amount = let amount = as_u64(deposit, "amount").expect("Incomplete deposit (amount)");
as_u64(deposit, "amount").expect("Incomplete deposit (amount)") * 1_000_000_000;
let deposit = Deposit { deposits.push((SlotHeight::from(slot), amount))
// Note: `branch` and `index` will need to be updated once the spec defines their
// validity.
branch: vec![],
index: 0,
deposit_data: DepositData {
amount,
timestamp: 1,
deposit_input: DepositInput {
pubkey: keypair.pk.clone(),
withdrawal_credentials: Hash256::zero(),
proof_of_possession,
},
},
};
deposits.push((SlotHeight::from(slot), deposit, keypair));
} }
Some(deposits) Some(deposits)

View File

@ -3,6 +3,11 @@ use log::info;
use types::*; use types::*;
use yaml_rust::Yaml; use yaml_rust::Yaml;
type ValidatorIndex = u64;
type BalanceGwei = u64;
type BalanceCheckTuple = (ValidatorIndex, String, BalanceGwei);
/// Tests to be conducted upon a `BeaconState` object generated during the execution of a /// Tests to be conducted upon a `BeaconState` object generated during the execution of a
/// `TestCase`. /// `TestCase`.
#[derive(Debug)] #[derive(Debug)]
@ -17,6 +22,8 @@ pub struct StateCheck {
pub exited_validators: Option<Vec<u64>>, pub exited_validators: Option<Vec<u64>>,
/// A list of validator indices which have had an exit initiated. Must be in ascending order. /// A list of validator indices which have had an exit initiated. Must be in ascending order.
pub exit_initiated_validators: Option<Vec<u64>>, pub exit_initiated_validators: Option<Vec<u64>>,
/// A list of balances to check.
pub balances: Option<Vec<BalanceCheckTuple>>,
} }
impl StateCheck { impl StateCheck {
@ -30,6 +37,7 @@ impl StateCheck {
slashed_validators: as_vec_u64(&yaml, "slashed_validators"), slashed_validators: as_vec_u64(&yaml, "slashed_validators"),
exited_validators: as_vec_u64(&yaml, "exited_validators"), exited_validators: as_vec_u64(&yaml, "exited_validators"),
exit_initiated_validators: as_vec_u64(&yaml, "exit_initiated_validators"), exit_initiated_validators: as_vec_u64(&yaml, "exit_initiated_validators"),
balances: parse_balances(&yaml),
} }
} }
@ -39,14 +47,14 @@ impl StateCheck {
/// ///
/// Panics with an error message if any test fails. /// Panics with an error message if any test fails.
pub fn assert_valid(&self, state: &BeaconState, spec: &ChainSpec) { pub fn assert_valid(&self, state: &BeaconState, spec: &ChainSpec) {
let state_epoch = state.slot.epoch(spec.epoch_length); let state_epoch = state.slot.epoch(spec.slots_per_epoch);
info!("Running state check for slot height {}.", self.slot); info!("Running state check for slot height {}.", self.slot);
// Check the state slot. // Check the state slot.
assert_eq!( assert_eq!(
self.slot, self.slot,
state.slot - spec.genesis_epoch.start_slot(spec.epoch_length), state.slot - spec.genesis_epoch.start_slot(spec.slots_per_epoch),
"State slot is invalid." "State slot is invalid."
); );
@ -66,7 +74,7 @@ impl StateCheck {
.iter() .iter()
.enumerate() .enumerate()
.filter_map(|(i, validator)| { .filter_map(|(i, validator)| {
if validator.is_penalized_at(state_epoch) { if validator.slashed {
Some(i as u64) Some(i as u64)
} else { } else {
None None
@ -108,7 +116,7 @@ impl StateCheck {
.iter() .iter()
.enumerate() .enumerate()
.filter_map(|(i, validator)| { .filter_map(|(i, validator)| {
if validator.has_initiated_exit() { if validator.initiated_exit {
Some(i as u64) Some(i as u64)
} else { } else {
None None
@ -124,5 +132,47 @@ impl StateCheck {
exit_initiated_validators exit_initiated_validators
); );
} }
// Check validator balances.
if let Some(ref balances) = self.balances {
for (index, comparison, expected) in balances {
let actual = *state
.validator_balances
.get(*index as usize)
.expect("Balance check specifies unknown validator");
let result = match comparison.as_ref() {
"eq" => actual == *expected,
_ => panic!("Unknown balance comparison (use `eq`)"),
};
assert!(
result,
format!(
"Validator balance for {}: {} !{} {}.",
index, actual, comparison, expected
)
);
info!("OK: validator balance for {:?}.", index);
} }
} }
}
}
/// Parse the `transfers` section of the YAML document.
fn parse_balances(yaml: &Yaml) -> Option<Vec<BalanceCheckTuple>> {
let mut tuples = vec![];
for exit in yaml["balances"].as_vec()? {
let from =
as_u64(exit, "validator_index").expect("Incomplete balance check (validator_index)");
let comparison = exit["comparison"]
.clone()
.into_string()
.expect("Incomplete balance check (amount)");
let balance = as_u64(exit, "balance").expect("Incomplete balance check (balance)");
tuples.push((from, comparison, balance));
}
Some(tuples)
}

View File

@ -80,8 +80,8 @@ impl<T: ClientDB, U: SlotClock, F: ForkChoice> BeaconBlockNode for DirectBeaconN
let (block, _state) = self let (block, _state) = self
.beacon_chain .beacon_chain
.produce_block(randao_reveal.clone()) .produce_block(randao_reveal.clone())
.ok_or_else(|| { .map_err(|e| {
BeaconBlockNodeError::RemoteFailure("Did not produce block.".to_string()) BeaconBlockNodeError::RemoteFailure(format!("Did not produce block: {:?}", e))
})?; })?;
if block.slot == slot { if block.slot == slot {

View File

@ -9,7 +9,7 @@ use db::ClientDB;
use fork_choice::ForkChoice; use fork_choice::ForkChoice;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::sync::Arc; use std::sync::Arc;
use types::{PublicKey, Slot}; use types::{Fork, PublicKey, Slot};
/// Connects directly to a borrowed `BeaconChain` and reads attester/proposer duties directly from /// Connects directly to a borrowed `BeaconChain` and reads attester/proposer duties directly from
/// it. /// it.
@ -40,6 +40,10 @@ impl<T: ClientDB, U: SlotClock, F: ForkChoice> ProducerDutiesReader for DirectDu
Err(_) => Err(ProducerDutiesReaderError::UnknownEpoch), Err(_) => Err(ProducerDutiesReaderError::UnknownEpoch),
} }
} }
fn fork(&self) -> Result<Fork, ProducerDutiesReaderError> {
Ok(self.beacon_chain.state.read().fork.clone())
}
} }
impl<T: ClientDB, U: SlotClock, F: ForkChoice> AttesterDutiesReader for DirectDuties<T, U, F> { impl<T: ClientDB, U: SlotClock, F: ForkChoice> AttesterDutiesReader for DirectDuties<T, U, F> {

View File

@ -28,24 +28,28 @@ pub enum AttestationProduceError {
PollError(AttestationPollError), PollError(AttestationPollError),
} }
type TestingBlockProducer = BlockProducer<
TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
LocalSigner,
>;
type TestingAttester = Attester<
TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
LocalSigner,
>;
/// A `BlockProducer` and `Attester` which sign using a common keypair. /// A `BlockProducer` and `Attester` which sign using a common keypair.
/// ///
/// The test validator connects directly to a borrowed `BeaconChain` struct. It is useful for /// The test validator connects directly to a borrowed `BeaconChain` struct. It is useful for
/// testing that the core proposer and attester logic is functioning. Also for supporting beacon /// testing that the core proposer and attester logic is functioning. Also for supporting beacon
/// chain tests. /// chain tests.
pub struct ValidatorHarness { pub struct ValidatorHarness {
pub block_producer: BlockProducer< pub block_producer: TestingBlockProducer,
TestingSlotClock, pub attester: TestingAttester,
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
LocalSigner,
>,
pub attester: Attester<
TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
LocalSigner,
>,
pub spec: Arc<ChainSpec>, pub spec: Arc<ChainSpec>,
pub epoch_map: Arc<DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>>, pub epoch_map: Arc<DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>>,
pub keypair: Keypair, pub keypair: Keypair,

View File

@ -29,7 +29,7 @@ fn it_can_produce_past_first_epoch_boundary() {
debug!("Harness built, tests starting.."); debug!("Harness built, tests starting..");
let blocks = harness.spec.epoch_length * 2 + 1; let blocks = harness.spec.slots_per_epoch * 2 + 1;
for i in 0..blocks { for i in 0..blocks {
harness.advance_chain_with_block(); harness.advance_chain_with_block();

View File

@ -78,7 +78,7 @@ fn main() {
// Slot clock // Slot clock
let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past). let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past).
let slot_clock = SystemTimeSlotClock::new(genesis_time, spec.slot_duration) let slot_clock = SystemTimeSlotClock::new(genesis_time, spec.seconds_per_slot)
.expect("Unable to load SystemTimeSlotClock"); .expect("Unable to load SystemTimeSlotClock");
// Choose the fork choice // Choose the fork choice
let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone());

View File

@ -2,8 +2,9 @@ pub mod test_utils;
mod traits; mod traits;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use ssz::TreeHash;
use std::sync::Arc; use std::sync::Arc;
use types::{AttestationData, FreeAttestation, Signature, Slot}; use types::{AttestationData, AttestationDataAndCustodyBit, FreeAttestation, Signature, Slot};
pub use self::traits::{ pub use self::traits::{
BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer, BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer,
@ -137,10 +138,14 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> Attester<T, U, V,
fn sign_attestation_data(&mut self, attestation_data: &AttestationData) -> Option<Signature> { fn sign_attestation_data(&mut self, attestation_data: &AttestationData) -> Option<Signature> {
self.store_produce(attestation_data); self.store_produce(attestation_data);
self.signer.sign_attestation_message( let message = AttestationDataAndCustodyBit {
&attestation_data.signable_message(PHASE_0_CUSTODY_BIT)[..], data: attestation_data.clone(),
DOMAIN_ATTESTATION, custody_bit: PHASE_0_CUSTODY_BIT,
) }
.hash_tree_root();
self.signer
.sign_attestation_message(&message[..], DOMAIN_ATTESTATION)
} }
/// Returns `true` if signing some attestation_data is safe (non-slashable). /// Returns `true` if signing some attestation_data is safe (non-slashable).
@ -195,9 +200,9 @@ mod tests {
let beacon_node = Arc::new(SimulatedBeaconNode::default()); let beacon_node = Arc::new(SimulatedBeaconNode::default());
let signer = Arc::new(LocalSigner::new(Keypair::random())); let signer = Arc::new(LocalSigner::new(Keypair::random()));
let mut duties = EpochMap::new(spec.epoch_length); let mut duties = EpochMap::new(spec.slots_per_epoch);
let attest_slot = Slot::new(100); let attest_slot = Slot::new(100);
let attest_epoch = attest_slot / spec.epoch_length; let attest_epoch = attest_slot / spec.slots_per_epoch;
let attest_shard = 12; let attest_shard = 12;
duties.insert_attestation_shard(attest_slot, attest_shard); duties.insert_attestation_shard(attest_slot, attest_shard);
duties.set_validator_index(Some(2)); duties.set_validator_index(Some(2));
@ -243,7 +248,7 @@ mod tests {
); );
// In an epoch without known duties... // In an epoch without known duties...
let slot = (attest_epoch + 1) * spec.epoch_length; let slot = (attest_epoch + 1) * spec.slots_per_epoch;
slot_clock.set_slot(slot.into()); slot_clock.set_slot(slot.into());
assert_eq!( assert_eq!(
attester.poll(), attester.poll(),

View File

@ -3,22 +3,22 @@ use std::collections::HashMap;
use types::{Epoch, Slot}; use types::{Epoch, Slot};
pub struct EpochMap { pub struct EpochMap {
epoch_length: u64, slots_per_epoch: u64,
validator_index: Option<u64>, validator_index: Option<u64>,
map: HashMap<Epoch, (Slot, u64)>, map: HashMap<Epoch, (Slot, u64)>,
} }
impl EpochMap { impl EpochMap {
pub fn new(epoch_length: u64) -> Self { pub fn new(slots_per_epoch: u64) -> Self {
Self { Self {
epoch_length, slots_per_epoch,
validator_index: None, validator_index: None,
map: HashMap::new(), map: HashMap::new(),
} }
} }
pub fn insert_attestation_shard(&mut self, slot: Slot, shard: u64) { pub fn insert_attestation_shard(&mut self, slot: Slot, shard: u64) {
let epoch = slot.epoch(self.epoch_length); let epoch = slot.epoch(self.slots_per_epoch);
self.map.insert(epoch, (slot, shard)); self.map.insert(epoch, (slot, shard));
} }
@ -29,7 +29,7 @@ impl EpochMap {
impl DutiesReader for EpochMap { impl DutiesReader for EpochMap {
fn attestation_shard(&self, slot: Slot) -> Result<Option<u64>, DutiesReaderError> { fn attestation_shard(&self, slot: Slot) -> Result<Option<u64>, DutiesReaderError> {
let epoch = slot.epoch(self.epoch_length); let epoch = slot.epoch(self.slots_per_epoch);
match self.map.get(&epoch) { match self.map.get(&epoch) {
Some((attest_slot, attest_shard)) if *attest_slot == slot => Ok(Some(*attest_shard)), Some((attest_slot, attest_shard)) if *attest_slot == slot => Ok(Some(*attest_shard)),

View File

@ -1,10 +1,10 @@
pub mod test_utils; pub mod test_utils;
mod traits; mod traits;
use int_to_bytes::int_to_bytes32;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use ssz::{SignedRoot, TreeHash};
use std::sync::Arc; use std::sync::Arc;
use types::{BeaconBlock, ChainSpec, Slot}; use types::{BeaconBlock, ChainSpec, Domain, Hash256, Proposal, Slot};
pub use self::traits::{ pub use self::traits::{
BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer, BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer,
@ -28,6 +28,8 @@ pub enum PollOutcome {
SignerRejection(Slot), SignerRejection(Slot),
/// The public key for this validator is not an active validator. /// The public key for this validator is not an active validator.
ValidatorIsUnknown(Slot), ValidatorIsUnknown(Slot),
/// Unable to determine a `Fork` for signature domain generation.
UnableToGetFork(Slot),
} }
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@ -130,14 +132,20 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U
/// The slash-protection code is not yet implemented. There is zero protection against /// The slash-protection code is not yet implemented. There is zero protection against
/// slashing. /// slashing.
fn produce_block(&mut self, slot: Slot) -> Result<PollOutcome, Error> { fn produce_block(&mut self, slot: Slot) -> Result<PollOutcome, Error> {
let fork = match self.epoch_map.fork() {
Ok(fork) => fork,
Err(_) => return Ok(PollOutcome::UnableToGetFork(slot)),
};
let randao_reveal = { let randao_reveal = {
// TODO: add domain, etc to this message. Also ensure result matches `into_to_bytes32`. // TODO: add domain, etc to this message. Also ensure result matches `into_to_bytes32`.
let message = int_to_bytes32(slot.epoch(self.spec.epoch_length).as_u64()); let message = slot.epoch(self.spec.slots_per_epoch).hash_tree_root();
match self match self.signer.sign_randao_reveal(
.signer &message,
.sign_randao_reveal(&message, self.spec.domain_randao) self.spec
{ .get_domain(slot.epoch(self.spec.slots_per_epoch), Domain::Randao, &fork),
) {
None => return Ok(PollOutcome::SignerRejection(slot)), None => return Ok(PollOutcome::SignerRejection(slot)),
Some(signature) => signature, Some(signature) => signature,
} }
@ -148,7 +156,12 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U
.produce_beacon_block(slot, &randao_reveal)? .produce_beacon_block(slot, &randao_reveal)?
{ {
if self.safe_to_produce(&block) { if self.safe_to_produce(&block) {
if let Some(block) = self.sign_block(block) { let domain = self.spec.get_domain(
slot.epoch(self.spec.slots_per_epoch),
Domain::Proposal,
&fork,
);
if let Some(block) = self.sign_block(block, domain) {
self.beacon_node.publish_beacon_block(block)?; self.beacon_node.publish_beacon_block(block)?;
Ok(PollOutcome::BlockProduced(slot)) Ok(PollOutcome::BlockProduced(slot))
} else { } else {
@ -166,13 +179,20 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U
/// ///
/// Important: this function will not check to ensure the block is not slashable. This must be /// Important: this function will not check to ensure the block is not slashable. This must be
/// done upstream. /// done upstream.
fn sign_block(&mut self, mut block: BeaconBlock) -> Option<BeaconBlock> { fn sign_block(&mut self, mut block: BeaconBlock, domain: u64) -> Option<BeaconBlock> {
self.store_produce(&block); self.store_produce(&block);
match self.signer.sign_block_proposal( let proposal = Proposal {
&block.proposal_root(&self.spec)[..], slot: block.slot,
self.spec.domain_proposal, shard: self.spec.beacon_chain_shard_number,
) { block_root: Hash256::from_slice(&block.signed_root()[..]),
signature: block.signature.clone(),
};
match self
.signer
.sign_block_proposal(&proposal.signed_root()[..], domain)
{
None => None, None => None,
Some(signature) => { Some(signature) => {
block.signature = signature; block.signature = signature;
@ -233,9 +253,9 @@ mod tests {
let beacon_node = Arc::new(SimulatedBeaconNode::default()); let beacon_node = Arc::new(SimulatedBeaconNode::default());
let signer = Arc::new(LocalSigner::new(Keypair::random())); let signer = Arc::new(LocalSigner::new(Keypair::random()));
let mut epoch_map = EpochMap::new(spec.epoch_length); let mut epoch_map = EpochMap::new(spec.slots_per_epoch);
let produce_slot = Slot::new(100); let produce_slot = Slot::new(100);
let produce_epoch = produce_slot.epoch(spec.epoch_length); let produce_epoch = produce_slot.epoch(spec.slots_per_epoch);
epoch_map.map.insert(produce_epoch, produce_slot); epoch_map.map.insert(produce_epoch, produce_slot);
let epoch_map = Arc::new(epoch_map); let epoch_map = Arc::new(epoch_map);
@ -280,7 +300,7 @@ mod tests {
); );
// In an epoch without known duties... // In an epoch without known duties...
let slot = (produce_epoch.as_u64() + 1) * spec.epoch_length; let slot = (produce_epoch.as_u64() + 1) * spec.slots_per_epoch;
slot_clock.set_slot(slot); slot_clock.set_slot(slot);
assert_eq!( assert_eq!(
block_proposer.poll(), block_proposer.poll(),

View File

@ -1,16 +1,16 @@
use crate::{DutiesReader, DutiesReaderError}; use crate::{DutiesReader, DutiesReaderError};
use std::collections::HashMap; use std::collections::HashMap;
use types::{Epoch, Slot}; use types::{Epoch, Fork, Slot};
pub struct EpochMap { pub struct EpochMap {
epoch_length: u64, slots_per_epoch: u64,
pub map: HashMap<Epoch, Slot>, pub map: HashMap<Epoch, Slot>,
} }
impl EpochMap { impl EpochMap {
pub fn new(epoch_length: u64) -> Self { pub fn new(slots_per_epoch: u64) -> Self {
Self { Self {
epoch_length, slots_per_epoch,
map: HashMap::new(), map: HashMap::new(),
} }
} }
@ -18,11 +18,19 @@ impl EpochMap {
impl DutiesReader for EpochMap { impl DutiesReader for EpochMap {
fn is_block_production_slot(&self, slot: Slot) -> Result<bool, DutiesReaderError> { fn is_block_production_slot(&self, slot: Slot) -> Result<bool, DutiesReaderError> {
let epoch = slot.epoch(self.epoch_length); let epoch = slot.epoch(self.slots_per_epoch);
match self.map.get(&epoch) { match self.map.get(&epoch) {
Some(s) if *s == slot => Ok(true), Some(s) if *s == slot => Ok(true),
Some(s) if *s != slot => Ok(false), Some(s) if *s != slot => Ok(false),
_ => Err(DutiesReaderError::UnknownEpoch), _ => Err(DutiesReaderError::UnknownEpoch),
} }
} }
fn fork(&self) -> Result<Fork, DutiesReaderError> {
Ok(Fork {
previous_version: 0,
current_version: 0,
epoch: Epoch::new(0),
})
}
} }

View File

@ -1,4 +1,4 @@
use types::{BeaconBlock, Signature, Slot}; use types::{BeaconBlock, Fork, Signature, Slot};
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub enum BeaconNodeError { pub enum BeaconNodeError {
@ -40,6 +40,7 @@ pub enum DutiesReaderError {
/// Informs a validator of their duties (e.g., block production). /// Informs a validator of their duties (e.g., block production).
pub trait DutiesReader: Send + Sync { pub trait DutiesReader: Send + Sync {
fn is_block_production_slot(&self, slot: Slot) -> Result<bool, DutiesReaderError>; fn is_block_production_slot(&self, slot: Slot) -> Result<bool, DutiesReaderError>;
fn fork(&self) -> Result<Fork, DutiesReaderError>;
} }
/// Signs message using an internally-maintained private key. /// Signs message using an internally-maintained private key.

View File

@ -95,7 +95,7 @@ where
let active_validator_indices = get_active_validator_indices( let active_validator_indices = get_active_validator_indices(
&current_state.validator_registry[..], &current_state.validator_registry[..],
block_slot.epoch(spec.epoch_length), block_slot.epoch(spec.slots_per_epoch),
); );
for index in active_validator_indices { for index in active_validator_indices {
@ -379,7 +379,7 @@ impl<T: ClientDB + Sized> ForkChoice for BitwiseLMDGhost<T> {
trace!("Current Step: {}", step); trace!("Current Step: {}", step);
if let Some(clear_winner) = self.get_clear_winner( if let Some(clear_winner) = self.get_clear_winner(
&latest_votes, &latest_votes,
block_height - (block_height % u64::from(step)) + u64::from(step), block_height - (block_height % step) + step,
spec, spec,
) { ) {
current_head = clear_winner; current_head = clear_winner;

View File

@ -64,7 +64,7 @@ where
let active_validator_indices = get_active_validator_indices( let active_validator_indices = get_active_validator_indices(
&current_state.validator_registry[..], &current_state.validator_registry[..],
block_slot.epoch(spec.epoch_length), block_slot.epoch(spec.slots_per_epoch),
); );
for index in active_validator_indices { for index in active_validator_indices {

View File

@ -81,7 +81,8 @@ fn test_yaml_vectors(
attester_slashings: vec![], attester_slashings: vec![],
attestations: vec![], attestations: vec![],
deposits: vec![], deposits: vec![],
exits: vec![], voluntary_exits: vec![],
transfers: vec![],
}; };
// process the tests // process the tests
@ -249,9 +250,9 @@ fn setup_inital_state(
withdrawal_credentials: zero_hash, withdrawal_credentials: zero_hash,
activation_epoch: Epoch::from(0u64), activation_epoch: Epoch::from(0u64),
exit_epoch: spec.far_future_epoch, exit_epoch: spec.far_future_epoch,
withdrawal_epoch: spec.far_future_epoch, withdrawable_epoch: spec.far_future_epoch,
penalized_epoch: spec.far_future_epoch, initiated_exit: false,
status_flags: None, slashed: false,
}; };
// activate the validators // activate the validators
for _ in 0..no_validators { for _ in 0..no_validators {

View File

@ -10,14 +10,16 @@ harness = false
[dev-dependencies] [dev-dependencies]
criterion = "0.2" criterion = "0.2"
test_harness = { path = "../../beacon_node/beacon_chain/test_harness" }
env_logger = "0.6.0" env_logger = "0.6.0"
[dependencies] [dependencies]
bls = { path = "../utils/bls" }
hashing = { path = "../utils/hashing" } hashing = { path = "../utils/hashing" }
int_to_bytes = { path = "../utils/int_to_bytes" } int_to_bytes = { path = "../utils/int_to_bytes" }
integer-sqrt = "0.1" integer-sqrt = "0.1"
log = "0.4" log = "0.4"
merkle_proof = { path = "../utils/merkle_proof" }
ssz = { path = "../utils/ssz" } ssz = { path = "../utils/ssz" }
ssz_derive = { path = "../utils/ssz_derive" }
types = { path = "../types" } types = { path = "../types" }
rayon = "1.0" rayon = "1.0"

View File

@ -1,455 +0,0 @@
use self::verify_slashable_attestation::verify_slashable_attestation;
use crate::SlotProcessingError;
use hashing::hash;
use int_to_bytes::int_to_bytes32;
use log::{debug, trace};
use ssz::{ssz_encode, TreeHash};
use types::*;
mod verify_slashable_attestation;
const PHASE_0_CUSTODY_BIT: bool = false;
#[derive(Debug, PartialEq)]
pub enum Error {
DBError(String),
StateAlreadyTransitioned,
PresentSlotIsNone,
UnableToDecodeBlock,
MissingParentState(Hash256),
InvalidParentState(Hash256),
MissingBeaconBlock(Hash256),
InvalidBeaconBlock(Hash256),
MissingParentBlock(Hash256),
StateSlotMismatch,
BadBlockSignature,
BadRandaoSignature,
MaxProposerSlashingsExceeded,
BadProposerSlashing,
MaxAttesterSlashingsExceed,
MaxAttestationsExceeded,
BadAttesterSlashing,
InvalidAttestation(AttestationValidationError),
NoBlockRoot,
MaxDepositsExceeded,
BadDeposit,
MaxExitsExceeded,
BadExit,
BadCustodyReseeds,
BadCustodyChallenges,
BadCustodyResponses,
BeaconStateError(BeaconStateError),
SlotProcessingError(SlotProcessingError),
}
#[derive(Debug, PartialEq)]
pub enum AttestationValidationError {
IncludedTooEarly,
IncludedTooLate,
WrongJustifiedSlot,
WrongJustifiedRoot,
BadLatestCrosslinkRoot,
BadSignature,
ShardBlockRootNotZero,
NoBlockRoot,
BeaconStateError(BeaconStateError),
}
macro_rules! ensure {
($condition: expr, $result: expr) => {
if !$condition {
return Err($result);
}
};
}
pub trait BlockProcessable {
fn per_block_processing(&mut self, block: &BeaconBlock, spec: &ChainSpec) -> Result<(), Error>;
fn per_block_processing_without_verifying_block_signature(
&mut self,
block: &BeaconBlock,
spec: &ChainSpec,
) -> Result<(), Error>;
}
impl BlockProcessable for BeaconState {
fn per_block_processing(&mut self, block: &BeaconBlock, spec: &ChainSpec) -> Result<(), Error> {
per_block_processing_signature_optional(self, block, true, spec)
}
fn per_block_processing_without_verifying_block_signature(
&mut self,
block: &BeaconBlock,
spec: &ChainSpec,
) -> Result<(), Error> {
per_block_processing_signature_optional(self, block, false, spec)
}
}
fn per_block_processing_signature_optional(
mut state: &mut BeaconState,
block: &BeaconBlock,
verify_block_signature: bool,
spec: &ChainSpec,
) -> Result<(), Error> {
ensure!(block.slot == state.slot, Error::StateSlotMismatch);
// Building the previous epoch could be delayed until an attestation from a previous epoch is
// included. This is left for future optimisation.
state.build_epoch_cache(RelativeEpoch::Previous, spec)?;
state.build_epoch_cache(RelativeEpoch::Current, spec)?;
/*
* Proposer Signature
*/
let block_proposer_index = state.get_beacon_proposer_index(block.slot, spec)?;
let block_proposer = &state.validator_registry[block_proposer_index];
if verify_block_signature {
ensure!(
bls_verify(
&block_proposer.pubkey,
&block.proposal_root(spec)[..],
&block.signature,
get_domain(&state.fork, state.current_epoch(spec), spec.domain_proposal)
),
Error::BadBlockSignature
);
}
/*
* RANDAO
*/
ensure!(
bls_verify(
&block_proposer.pubkey,
&int_to_bytes32(state.current_epoch(spec).as_u64()),
&block.randao_reveal,
get_domain(&state.fork, state.current_epoch(spec), spec.domain_randao)
),
Error::BadRandaoSignature
);
// TODO: check this is correct.
let new_mix = {
let mut mix = state.latest_randao_mixes
[state.slot.as_usize() % spec.latest_randao_mixes_length]
.as_bytes()
.to_vec();
mix.append(&mut ssz_encode(&block.randao_reveal));
Hash256::from_slice(&hash(&mix)[..])
};
state.latest_randao_mixes[state.slot.as_usize() % spec.latest_randao_mixes_length] = new_mix;
/*
* Eth1 data
*/
// TODO: Eth1 data processing.
/*
* Proposer slashings
*/
ensure!(
block.body.proposer_slashings.len() as u64 <= spec.max_proposer_slashings,
Error::MaxProposerSlashingsExceeded
);
for proposer_slashing in &block.body.proposer_slashings {
let proposer = state
.validator_registry
.get(proposer_slashing.proposer_index as usize)
.ok_or(Error::BadProposerSlashing)?;
ensure!(
proposer_slashing.proposal_data_1.slot == proposer_slashing.proposal_data_2.slot,
Error::BadProposerSlashing
);
ensure!(
proposer_slashing.proposal_data_1.shard == proposer_slashing.proposal_data_2.shard,
Error::BadProposerSlashing
);
ensure!(
proposer_slashing.proposal_data_1.block_root
!= proposer_slashing.proposal_data_2.block_root,
Error::BadProposerSlashing
);
ensure!(
proposer.penalized_epoch > state.current_epoch(spec),
Error::BadProposerSlashing
);
ensure!(
bls_verify(
&proposer.pubkey,
&proposer_slashing.proposal_data_1.hash_tree_root(),
&proposer_slashing.proposal_signature_1,
get_domain(
&state.fork,
proposer_slashing
.proposal_data_1
.slot
.epoch(spec.epoch_length),
spec.domain_proposal
)
),
Error::BadProposerSlashing
);
ensure!(
bls_verify(
&proposer.pubkey,
&proposer_slashing.proposal_data_2.hash_tree_root(),
&proposer_slashing.proposal_signature_2,
get_domain(
&state.fork,
proposer_slashing
.proposal_data_2
.slot
.epoch(spec.epoch_length),
spec.domain_proposal
)
),
Error::BadProposerSlashing
);
state.penalize_validator(proposer_slashing.proposer_index as usize, spec)?;
}
/*
* Attester slashings
*/
ensure!(
block.body.attester_slashings.len() as u64 <= spec.max_attester_slashings,
Error::MaxAttesterSlashingsExceed
);
for attester_slashing in &block.body.attester_slashings {
verify_slashable_attestation(&mut state, &attester_slashing, spec)?;
}
/*
* Attestations
*/
ensure!(
block.body.attestations.len() as u64 <= spec.max_attestations,
Error::MaxAttestationsExceeded
);
debug!("Verifying {} attestations.", block.body.attestations.len());
for attestation in &block.body.attestations {
validate_attestation(&state, attestation, spec)?;
let pending_attestation = PendingAttestation {
data: attestation.data.clone(),
aggregation_bitfield: attestation.aggregation_bitfield.clone(),
custody_bitfield: attestation.custody_bitfield.clone(),
inclusion_slot: state.slot,
};
state.latest_attestations.push(pending_attestation);
}
/*
* Deposits
*/
ensure!(
block.body.deposits.len() as u64 <= spec.max_deposits,
Error::MaxDepositsExceeded
);
// TODO: verify deposit merkle branches.
for deposit in &block.body.deposits {
debug!(
"Processing deposit for pubkey {:?}",
deposit.deposit_data.deposit_input.pubkey
);
state
.process_deposit(
deposit.deposit_data.deposit_input.pubkey.clone(),
deposit.deposit_data.amount,
deposit
.deposit_data
.deposit_input
.proof_of_possession
.clone(),
deposit.deposit_data.deposit_input.withdrawal_credentials,
None,
spec,
)
.map_err(|_| Error::BadDeposit)?;
}
/*
* Exits
*/
ensure!(
block.body.exits.len() as u64 <= spec.max_exits,
Error::MaxExitsExceeded
);
for exit in &block.body.exits {
let validator = state
.validator_registry
.get(exit.validator_index as usize)
.ok_or(Error::BadExit)?;
ensure!(
validator.exit_epoch
> state.get_entry_exit_effect_epoch(state.current_epoch(spec), spec),
Error::BadExit
);
ensure!(state.current_epoch(spec) >= exit.epoch, Error::BadExit);
let exit_message = {
let exit_struct = Exit {
epoch: exit.epoch,
validator_index: exit.validator_index,
signature: spec.empty_signature.clone(),
};
exit_struct.hash_tree_root()
};
ensure!(
bls_verify(
&validator.pubkey,
&exit_message,
&exit.signature,
get_domain(&state.fork, exit.epoch, spec.domain_exit)
),
Error::BadProposerSlashing
);
state.initiate_validator_exit(exit.validator_index as usize);
}
debug!("State transition complete.");
Ok(())
}
pub fn validate_attestation(
state: &BeaconState,
attestation: &Attestation,
spec: &ChainSpec,
) -> Result<(), AttestationValidationError> {
validate_attestation_signature_optional(state, attestation, spec, true)
}
pub fn validate_attestation_without_signature(
state: &BeaconState,
attestation: &Attestation,
spec: &ChainSpec,
) -> Result<(), AttestationValidationError> {
validate_attestation_signature_optional(state, attestation, spec, false)
}
fn validate_attestation_signature_optional(
state: &BeaconState,
attestation: &Attestation,
spec: &ChainSpec,
verify_signature: bool,
) -> Result<(), AttestationValidationError> {
trace!(
"validate_attestation_signature_optional: attestation epoch: {}",
attestation.data.slot.epoch(spec.epoch_length)
);
ensure!(
attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot,
AttestationValidationError::IncludedTooEarly
);
ensure!(
attestation.data.slot + spec.epoch_length >= state.slot,
AttestationValidationError::IncludedTooLate
);
if attestation.data.slot >= state.current_epoch_start_slot(spec) {
ensure!(
attestation.data.justified_epoch == state.justified_epoch,
AttestationValidationError::WrongJustifiedSlot
);
} else {
ensure!(
attestation.data.justified_epoch == state.previous_justified_epoch,
AttestationValidationError::WrongJustifiedSlot
);
}
ensure!(
attestation.data.justified_block_root
== *state
.get_block_root(
attestation
.data
.justified_epoch
.start_slot(spec.epoch_length),
&spec
)
.ok_or(AttestationValidationError::NoBlockRoot)?,
AttestationValidationError::WrongJustifiedRoot
);
let potential_crosslink = Crosslink {
shard_block_root: attestation.data.shard_block_root,
epoch: attestation.data.slot.epoch(spec.epoch_length),
};
ensure!(
(attestation.data.latest_crosslink
== state.latest_crosslinks[attestation.data.shard as usize])
| (attestation.data.latest_crosslink == potential_crosslink),
AttestationValidationError::BadLatestCrosslinkRoot
);
if verify_signature {
let participants = state.get_attestation_participants(
&attestation.data,
&attestation.aggregation_bitfield,
spec,
)?;
trace!(
"slot: {}, shard: {}, participants: {:?}",
attestation.data.slot,
attestation.data.shard,
participants
);
let mut group_public_key = AggregatePublicKey::new();
for participant in participants {
group_public_key.add(&state.validator_registry[participant as usize].pubkey)
}
ensure!(
attestation.verify_signature(
&group_public_key,
PHASE_0_CUSTODY_BIT,
get_domain(
&state.fork,
attestation.data.slot.epoch(spec.epoch_length),
spec.domain_attestation,
)
),
AttestationValidationError::BadSignature
);
}
ensure!(
attestation.data.shard_block_root == spec.zero_hash,
AttestationValidationError::ShardBlockRootNotZero
);
Ok(())
}
fn get_domain(fork: &Fork, epoch: Epoch, domain_type: u64) -> u64 {
fork.get_domain(epoch, domain_type)
}
fn bls_verify(pubkey: &PublicKey, message: &[u8], signature: &Signature, domain: u64) -> bool {
signature.verify(message, domain, pubkey)
}
impl From<AttestationValidationError> for Error {
fn from(e: AttestationValidationError) -> Error {
Error::InvalidAttestation(e)
}
}
impl From<BeaconStateError> for Error {
fn from(e: BeaconStateError) -> Error {
Error::BeaconStateError(e)
}
}
impl From<SlotProcessingError> for Error {
fn from(e: SlotProcessingError) -> Error {
Error::SlotProcessingError(e)
}
}
impl From<BeaconStateError> for AttestationValidationError {
fn from(e: BeaconStateError) -> AttestationValidationError {
AttestationValidationError::BeaconStateError(e)
}
}

View File

@ -1,61 +0,0 @@
use super::Error;
use types::*;
macro_rules! ensure {
($condition: expr, $result: expr) => {
if !$condition {
return Err($result);
}
};
}
/// Returns `Ok(())` if some `AttesterSlashing` is valid to be included in some `BeaconState`,
/// otherwise returns an `Err`.
pub fn verify_slashable_attestation(
state: &mut BeaconState,
attester_slashing: &AttesterSlashing,
spec: &ChainSpec,
) -> Result<(), Error> {
let slashable_attestation_1 = &attester_slashing.slashable_attestation_1;
let slashable_attestation_2 = &attester_slashing.slashable_attestation_2;
ensure!(
slashable_attestation_1.data != slashable_attestation_2.data,
Error::BadAttesterSlashing
);
ensure!(
slashable_attestation_1.is_double_vote(slashable_attestation_2, spec)
| slashable_attestation_1.is_surround_vote(slashable_attestation_2, spec),
Error::BadAttesterSlashing
);
ensure!(
state.verify_slashable_attestation(&slashable_attestation_1, spec),
Error::BadAttesterSlashing
);
ensure!(
state.verify_slashable_attestation(&slashable_attestation_2, spec),
Error::BadAttesterSlashing
);
let mut slashable_indices = vec![];
for i in &slashable_attestation_1.validator_indices {
let validator = state
.validator_registry
.get(*i as usize)
.ok_or_else(|| Error::BadAttesterSlashing)?;
if slashable_attestation_1.validator_indices.contains(&i)
& !validator.is_penalized_at(state.current_epoch(spec))
{
slashable_indices.push(i);
}
}
ensure!(!slashable_indices.is_empty(), Error::BadAttesterSlashing);
for i in slashable_indices {
state.penalize_validator(*i as usize, spec)?;
}
Ok(())
}

View File

@ -1,723 +0,0 @@
use integer_sqrt::IntegerSquareRoot;
use log::{debug, trace};
use rayon::prelude::*;
use ssz::TreeHash;
use std::collections::{HashMap, HashSet};
use std::iter::FromIterator;
use types::{
validator_registry::get_active_validator_indices, BeaconState, BeaconStateError, ChainSpec,
Crosslink, Epoch, Hash256, InclusionError, PendingAttestation, RelativeEpoch,
};
mod tests;
macro_rules! safe_add_assign {
($a: expr, $b: expr) => {
$a = $a.saturating_add($b);
};
}
macro_rules! safe_sub_assign {
($a: expr, $b: expr) => {
$a = $a.saturating_sub($b);
};
}
#[derive(Debug, PartialEq)]
pub enum Error {
UnableToDetermineProducer,
NoBlockRoots,
BaseRewardQuotientIsZero,
NoRandaoSeed,
BeaconStateError(BeaconStateError),
InclusionError(InclusionError),
WinningRootError(WinningRootError),
}
#[derive(Debug, PartialEq)]
pub enum WinningRootError {
NoWinningRoot,
BeaconStateError(BeaconStateError),
}
#[derive(Clone)]
pub struct WinningRoot {
pub shard_block_root: Hash256,
pub attesting_validator_indices: Vec<usize>,
pub total_balance: u64,
pub total_attesting_balance: u64,
}
pub trait EpochProcessable {
fn per_epoch_processing(&mut self, spec: &ChainSpec) -> Result<(), Error>;
}
impl EpochProcessable for BeaconState {
// Cyclomatic complexity is ignored. It would be ideal to split this function apart, however it
// remains monolithic to allow for easier spec updates. Once the spec is more stable we can
// optimise.
#[allow(clippy::cyclomatic_complexity)]
fn per_epoch_processing(&mut self, spec: &ChainSpec) -> Result<(), Error> {
let current_epoch = self.current_epoch(spec);
let previous_epoch = self.previous_epoch(spec);
let next_epoch = self.next_epoch(spec);
debug!(
"Starting per-epoch processing on epoch {}...",
self.current_epoch(spec)
);
// Ensure all of the caches are built.
self.build_epoch_cache(RelativeEpoch::Previous, spec)?;
self.build_epoch_cache(RelativeEpoch::Current, spec)?;
self.build_epoch_cache(RelativeEpoch::Next, spec)?;
/*
* Validators attesting during the current epoch.
*/
let active_validator_indices = get_active_validator_indices(
&self.validator_registry,
self.slot.epoch(spec.epoch_length),
);
let current_total_balance = self.get_total_balance(&active_validator_indices[..], spec);
trace!(
"{} validators with a total balance of {} wei.",
active_validator_indices.len(),
current_total_balance
);
let current_epoch_attestations: Vec<&PendingAttestation> = self
.latest_attestations
.par_iter()
.filter(|a| {
(a.data.slot / spec.epoch_length).epoch(spec.epoch_length)
== self.current_epoch(spec)
})
.collect();
trace!(
"Current epoch attestations: {}",
current_epoch_attestations.len()
);
let current_epoch_boundary_attestations: Vec<&PendingAttestation> =
current_epoch_attestations
.par_iter()
.filter(
|a| match self.get_block_root(self.current_epoch_start_slot(spec), spec) {
Some(block_root) => {
(a.data.epoch_boundary_root == *block_root)
&& (a.data.justified_epoch == self.justified_epoch)
}
None => unreachable!(),
},
)
.cloned()
.collect();
let current_epoch_boundary_attester_indices = self
.get_attestation_participants_union(&current_epoch_boundary_attestations[..], spec)?;
let current_epoch_boundary_attesting_balance =
self.get_total_balance(&current_epoch_boundary_attester_indices[..], spec);
trace!(
"Current epoch boundary attesters: {}",
current_epoch_boundary_attester_indices.len()
);
/*
* Validators attesting during the previous epoch
*/
/*
* Validators that made an attestation during the previous epoch
*/
let previous_epoch_attestations: Vec<&PendingAttestation> = self
.latest_attestations
.par_iter()
.filter(|a| {
//TODO: ensure these saturating subs are correct.
(a.data.slot / spec.epoch_length).epoch(spec.epoch_length)
== self.previous_epoch(spec)
})
.collect();
debug!(
"previous epoch attestations: {}",
previous_epoch_attestations.len()
);
let previous_epoch_attester_indices =
self.get_attestation_participants_union(&previous_epoch_attestations[..], spec)?;
let previous_total_balance = self.get_total_balance(
&get_active_validator_indices(&self.validator_registry, previous_epoch),
spec,
);
/*
* Validators targetting the previous justified slot
*/
let previous_epoch_justified_attestations: Vec<&PendingAttestation> = {
let mut a: Vec<&PendingAttestation> = current_epoch_attestations
.iter()
.filter(|a| a.data.justified_epoch == self.previous_justified_epoch)
.cloned()
.collect();
let mut b: Vec<&PendingAttestation> = previous_epoch_attestations
.iter()
.filter(|a| a.data.justified_epoch == self.previous_justified_epoch)
.cloned()
.collect();
a.append(&mut b);
a
};
let previous_epoch_justified_attester_indices = self
.get_attestation_participants_union(&previous_epoch_justified_attestations[..], spec)?;
let previous_epoch_justified_attesting_balance =
self.get_total_balance(&previous_epoch_justified_attester_indices[..], spec);
/*
* Validators justifying the epoch boundary block at the start of the previous epoch
*/
let previous_epoch_boundary_attestations: Vec<&PendingAttestation> =
previous_epoch_justified_attestations
.iter()
.filter(
|a| match self.get_block_root(self.previous_epoch_start_slot(spec), spec) {
Some(block_root) => a.data.epoch_boundary_root == *block_root,
None => unreachable!(),
},
)
.cloned()
.collect();
let previous_epoch_boundary_attester_indices = self
.get_attestation_participants_union(&previous_epoch_boundary_attestations[..], spec)?;
let previous_epoch_boundary_attesting_balance =
self.get_total_balance(&previous_epoch_boundary_attester_indices[..], spec);
/*
* Validators attesting to the expected beacon chain head during the previous epoch.
*/
let previous_epoch_head_attestations: Vec<&PendingAttestation> =
previous_epoch_attestations
.iter()
.filter(|a| match self.get_block_root(a.data.slot, spec) {
Some(block_root) => a.data.beacon_block_root == *block_root,
None => unreachable!(),
})
.cloned()
.collect();
let previous_epoch_head_attester_indices =
self.get_attestation_participants_union(&previous_epoch_head_attestations[..], spec)?;
let previous_epoch_head_attesting_balance =
self.get_total_balance(&previous_epoch_head_attester_indices[..], spec);
debug!(
"previous_epoch_head_attester_balance of {} wei.",
previous_epoch_head_attesting_balance
);
/*
* Eth1 Data
*/
if self.next_epoch(spec) % spec.eth1_data_voting_period == 0 {
for eth1_data_vote in &self.eth1_data_votes {
if eth1_data_vote.vote_count * 2 > spec.eth1_data_voting_period {
self.latest_eth1_data = eth1_data_vote.eth1_data.clone();
}
}
self.eth1_data_votes = vec![];
}
/*
* Justification
*/
let mut new_justified_epoch = self.justified_epoch;
self.justification_bitfield <<= 1;
// If > 2/3 of the total balance attested to the previous epoch boundary
//
// - Set the 2nd bit of the bitfield.
// - Set the previous epoch to be justified.
if (3 * previous_epoch_boundary_attesting_balance) >= (2 * current_total_balance) {
self.justification_bitfield |= 2;
new_justified_epoch = previous_epoch;
trace!(">= 2/3 voted for previous epoch boundary");
}
// If > 2/3 of the total balance attested to the previous epoch boundary
//
// - Set the 1st bit of the bitfield.
// - Set the current epoch to be justified.
if (3 * current_epoch_boundary_attesting_balance) >= (2 * current_total_balance) {
self.justification_bitfield |= 1;
new_justified_epoch = current_epoch;
trace!(">= 2/3 voted for current epoch boundary");
}
// If:
//
// - All three epochs prior to this epoch have been justified.
// - The previous justified justified epoch was three epochs ago.
//
// Then, set the finalized epoch to be three epochs ago.
if ((self.justification_bitfield >> 1) % 8 == 0b111)
& (self.previous_justified_epoch == previous_epoch - 2)
{
self.finalized_epoch = self.previous_justified_epoch;
trace!("epoch - 3 was finalized (1st condition).");
}
// If:
//
// - Both two epochs prior to this epoch have been justified.
// - The previous justified epoch was two epochs ago.
//
// Then, set the finalized epoch to two epochs ago.
if ((self.justification_bitfield >> 1) % 4 == 0b11)
& (self.previous_justified_epoch == previous_epoch - 1)
{
self.finalized_epoch = self.previous_justified_epoch;
trace!("epoch - 2 was finalized (2nd condition).");
}
// If:
//
// - This epoch and the two prior have been justified.
// - The presently justified epoch was two epochs ago.
//
// Then, set the finalized epoch to two epochs ago.
if (self.justification_bitfield % 8 == 0b111) & (self.justified_epoch == previous_epoch - 1)
{
self.finalized_epoch = self.justified_epoch;
trace!("epoch - 2 was finalized (3rd condition).");
}
// If:
//
// - This epoch and the epoch prior to it have been justified.
// - Set the previous epoch to be justified.
//
// Then, set the finalized epoch to be the previous epoch.
if (self.justification_bitfield % 4 == 0b11) & (self.justified_epoch == previous_epoch) {
self.finalized_epoch = self.justified_epoch;
trace!("epoch - 1 was finalized (4th condition).");
}
self.previous_justified_epoch = self.justified_epoch;
self.justified_epoch = new_justified_epoch;
debug!(
"Finalized epoch {}, justified epoch {}.",
self.finalized_epoch, self.justified_epoch
);
/*
* Crosslinks
*/
// Cached for later lookups.
let mut winning_root_for_shards: HashMap<u64, Result<WinningRoot, WinningRootError>> =
HashMap::new();
// for slot in self.slot.saturating_sub(2 * spec.epoch_length)..self.slot {
for slot in self.previous_epoch(spec).slot_iter(spec.epoch_length) {
trace!(
"Finding winning root for slot: {} (epoch: {})",
slot,
slot.epoch(spec.epoch_length)
);
// Clone is used to remove the borrow. It becomes an issue later when trying to mutate
// `self.balances`.
let crosslink_committees_at_slot =
self.get_crosslink_committees_at_slot(slot, spec)?.clone();
for (crosslink_committee, shard) in crosslink_committees_at_slot {
let shard = shard as u64;
let winning_root = winning_root(
self,
shard,
&current_epoch_attestations,
&previous_epoch_attestations,
spec,
);
if let Ok(winning_root) = &winning_root {
let total_committee_balance =
self.get_total_balance(&crosslink_committee[..], spec);
if (3 * winning_root.total_attesting_balance) >= (2 * total_committee_balance) {
self.latest_crosslinks[shard as usize] = Crosslink {
epoch: current_epoch,
shard_block_root: winning_root.shard_block_root,
}
}
}
winning_root_for_shards.insert(shard, winning_root);
}
}
trace!(
"Found {} winning shard roots.",
winning_root_for_shards.len()
);
/*
* Rewards and Penalities
*/
let base_reward_quotient =
previous_total_balance.integer_sqrt() / spec.base_reward_quotient;
if base_reward_quotient == 0 {
return Err(Error::BaseRewardQuotientIsZero);
}
/*
* Justification and finalization
*/
let epochs_since_finality = next_epoch - self.finalized_epoch;
let previous_epoch_justified_attester_indices_hashset: HashSet<usize> =
HashSet::from_iter(previous_epoch_justified_attester_indices.iter().cloned());
let previous_epoch_boundary_attester_indices_hashset: HashSet<usize> =
HashSet::from_iter(previous_epoch_boundary_attester_indices.iter().cloned());
let previous_epoch_head_attester_indices_hashset: HashSet<usize> =
HashSet::from_iter(previous_epoch_head_attester_indices.iter().cloned());
let previous_epoch_attester_indices_hashset: HashSet<usize> =
HashSet::from_iter(previous_epoch_attester_indices.iter().cloned());
let active_validator_indices_hashset: HashSet<usize> =
HashSet::from_iter(active_validator_indices.iter().cloned());
debug!("previous epoch justified attesters: {}, previous epoch boundary attesters: {}, previous epoch head attesters: {}, previous epoch attesters: {}", previous_epoch_justified_attester_indices.len(), previous_epoch_boundary_attester_indices.len(), previous_epoch_head_attester_indices.len(), previous_epoch_attester_indices.len());
debug!("{} epochs since finality.", epochs_since_finality);
if epochs_since_finality <= 4 {
for index in 0..self.validator_balances.len() {
let base_reward = self.base_reward(index, base_reward_quotient, spec);
if previous_epoch_justified_attester_indices_hashset.contains(&index) {
safe_add_assign!(
self.validator_balances[index],
base_reward * previous_epoch_justified_attesting_balance
/ previous_total_balance
);
} else if active_validator_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], base_reward);
}
if previous_epoch_boundary_attester_indices_hashset.contains(&index) {
safe_add_assign!(
self.validator_balances[index],
base_reward * previous_epoch_boundary_attesting_balance
/ previous_total_balance
);
} else if active_validator_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], base_reward);
}
if previous_epoch_head_attester_indices_hashset.contains(&index) {
safe_add_assign!(
self.validator_balances[index],
base_reward * previous_epoch_head_attesting_balance
/ previous_total_balance
);
} else if active_validator_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], base_reward);
}
}
for index in previous_epoch_attester_indices {
let base_reward = self.base_reward(index, base_reward_quotient, spec);
let inclusion_distance =
self.inclusion_distance(&previous_epoch_attestations, index, spec)?;
safe_add_assign!(
self.validator_balances[index],
base_reward * spec.min_attestation_inclusion_delay / inclusion_distance
)
}
} else {
for index in 0..self.validator_balances.len() {
let inactivity_penalty = self.inactivity_penalty(
index,
epochs_since_finality,
base_reward_quotient,
spec,
);
if active_validator_indices_hashset.contains(&index) {
if !previous_epoch_justified_attester_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], inactivity_penalty);
}
if !previous_epoch_boundary_attester_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], inactivity_penalty);
}
if !previous_epoch_head_attester_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], inactivity_penalty);
}
if self.validator_registry[index].penalized_epoch <= current_epoch {
let base_reward = self.base_reward(index, base_reward_quotient, spec);
safe_sub_assign!(
self.validator_balances[index],
2 * inactivity_penalty + base_reward
);
}
}
}
for index in previous_epoch_attester_indices {
let base_reward = self.base_reward(index, base_reward_quotient, spec);
let inclusion_distance =
self.inclusion_distance(&previous_epoch_attestations, index, spec)?;
safe_sub_assign!(
self.validator_balances[index],
base_reward
- base_reward * spec.min_attestation_inclusion_delay / inclusion_distance
);
}
}
trace!("Processed validator justification and finalization rewards/penalities.");
/*
* Attestation inclusion
*/
for &index in &previous_epoch_attester_indices_hashset {
let inclusion_slot =
self.inclusion_slot(&previous_epoch_attestations[..], index, spec)?;
let proposer_index = self
.get_beacon_proposer_index(inclusion_slot, spec)
.map_err(|_| Error::UnableToDetermineProducer)?;
let base_reward = self.base_reward(proposer_index, base_reward_quotient, spec);
safe_add_assign!(
self.validator_balances[proposer_index],
base_reward / spec.includer_reward_quotient
);
}
trace!(
"Previous epoch attesters: {}.",
previous_epoch_attester_indices_hashset.len()
);
/*
* Crosslinks
*/
for slot in self.previous_epoch(spec).slot_iter(spec.epoch_length) {
// Clone is used to remove the borrow. It becomes an issue later when trying to mutate
// `self.balances`.
let crosslink_committees_at_slot =
self.get_crosslink_committees_at_slot(slot, spec)?.clone();
for (_crosslink_committee, shard) in crosslink_committees_at_slot {
let shard = shard as u64;
if let Some(Ok(winning_root)) = winning_root_for_shards.get(&shard) {
// TODO: remove the map.
let attesting_validator_indices: HashSet<usize> = HashSet::from_iter(
winning_root.attesting_validator_indices.iter().cloned(),
);
for index in 0..self.validator_balances.len() {
let base_reward = self.base_reward(index, base_reward_quotient, spec);
if attesting_validator_indices.contains(&index) {
safe_add_assign!(
self.validator_balances[index],
base_reward * winning_root.total_attesting_balance
/ winning_root.total_balance
);
} else {
safe_sub_assign!(self.validator_balances[index], base_reward);
}
}
for index in &winning_root.attesting_validator_indices {
let base_reward = self.base_reward(*index, base_reward_quotient, spec);
safe_add_assign!(
self.validator_balances[*index],
base_reward * winning_root.total_attesting_balance
/ winning_root.total_balance
);
}
}
}
}
/*
* Ejections
*/
self.process_ejections(spec);
/*
* Validator Registry
*/
self.previous_calculation_epoch = self.current_calculation_epoch;
self.previous_epoch_start_shard = self.current_epoch_start_shard;
debug!(
"setting previous_epoch_seed to : {}",
self.current_epoch_seed
);
self.previous_epoch_seed = self.current_epoch_seed;
let should_update_validator_registy = if self.finalized_epoch
> self.validator_registry_update_epoch
{
(0..self.get_current_epoch_committee_count(spec)).all(|i| {
let shard = (self.current_epoch_start_shard + i as u64) % spec.shard_count;
self.latest_crosslinks[shard as usize].epoch > self.validator_registry_update_epoch
})
} else {
false
};
if should_update_validator_registy {
trace!("updating validator registry.");
self.update_validator_registry(spec);
self.current_calculation_epoch = next_epoch;
self.current_epoch_start_shard = (self.current_epoch_start_shard
+ self.get_current_epoch_committee_count(spec) as u64)
% spec.shard_count;
self.current_epoch_seed = self.generate_seed(self.current_calculation_epoch, spec)?
} else {
trace!("not updating validator registry.");
let epochs_since_last_registry_update =
current_epoch - self.validator_registry_update_epoch;
if (epochs_since_last_registry_update > 1)
& epochs_since_last_registry_update.is_power_of_two()
{
self.current_calculation_epoch = next_epoch;
self.current_epoch_seed =
self.generate_seed(self.current_calculation_epoch, spec)?
}
}
self.process_penalties_and_exits(spec);
self.latest_index_roots[(next_epoch.as_usize() + spec.entry_exit_delay as usize)
% spec.latest_index_roots_length] = hash_tree_root(get_active_validator_indices(
&self.validator_registry,
next_epoch + Epoch::from(spec.entry_exit_delay),
));
self.latest_penalized_balances[next_epoch.as_usize() % spec.latest_penalized_exit_length] =
self.latest_penalized_balances
[current_epoch.as_usize() % spec.latest_penalized_exit_length];
self.latest_randao_mixes[next_epoch.as_usize() % spec.latest_randao_mixes_length] = self
.get_randao_mix(current_epoch, spec)
.and_then(|x| Some(*x))
.ok_or_else(|| Error::NoRandaoSeed)?;
self.latest_attestations = self
.latest_attestations
.iter()
.filter(|a| a.data.slot.epoch(spec.epoch_length) >= current_epoch)
.cloned()
.collect();
debug!("Epoch transition complete.");
Ok(())
}
}
fn hash_tree_root<T: TreeHash>(input: Vec<T>) -> Hash256 {
Hash256::from_slice(&input.hash_tree_root()[..])
}
fn winning_root(
state: &BeaconState,
shard: u64,
current_epoch_attestations: &[&PendingAttestation],
previous_epoch_attestations: &[&PendingAttestation],
spec: &ChainSpec,
) -> Result<WinningRoot, WinningRootError> {
let mut attestations = current_epoch_attestations.to_vec();
attestations.append(&mut previous_epoch_attestations.to_vec());
let mut candidates: HashMap<Hash256, WinningRoot> = HashMap::new();
let mut highest_seen_balance = 0;
for a in &attestations {
if a.data.shard != shard {
continue;
}
let shard_block_root = &a.data.shard_block_root;
if candidates.contains_key(shard_block_root) {
continue;
}
let attesting_validator_indices = attestations
.iter()
.try_fold::<_, _, Result<_, BeaconStateError>>(vec![], |mut acc, a| {
if (a.data.shard == shard) && (a.data.shard_block_root == *shard_block_root) {
acc.append(&mut state.get_attestation_participants(
&a.data,
&a.aggregation_bitfield,
spec,
)?);
}
Ok(acc)
})?;
let total_balance: u64 = attesting_validator_indices
.iter()
.fold(0, |acc, i| acc + state.get_effective_balance(*i, spec));
let total_attesting_balance: u64 = attesting_validator_indices
.iter()
.fold(0, |acc, i| acc + state.get_effective_balance(*i, spec));
if total_attesting_balance > highest_seen_balance {
highest_seen_balance = total_attesting_balance;
}
let candidate_root = WinningRoot {
shard_block_root: *shard_block_root,
attesting_validator_indices,
total_attesting_balance,
total_balance,
};
candidates.insert(*shard_block_root, candidate_root);
}
Ok(candidates
.iter()
.filter_map(|(_hash, candidate)| {
if candidate.total_attesting_balance == highest_seen_balance {
Some(candidate)
} else {
None
}
})
.min_by_key(|candidate| candidate.shard_block_root)
.ok_or_else(|| WinningRootError::NoWinningRoot)?
// TODO: avoid clone.
.clone())
}
impl From<InclusionError> for Error {
fn from(e: InclusionError) -> Error {
Error::InclusionError(e)
}
}
impl From<BeaconStateError> for Error {
fn from(e: BeaconStateError) -> Error {
Error::BeaconStateError(e)
}
}
impl From<BeaconStateError> for WinningRootError {
fn from(e: BeaconStateError) -> WinningRootError {
WinningRootError::BeaconStateError(e)
}
}

View File

@ -1,10 +1,13 @@
mod block_processable; #[macro_use]
mod epoch_processable; mod macros;
mod slot_processable;
pub use block_processable::{ pub mod per_block_processing;
validate_attestation, validate_attestation_without_signature, BlockProcessable, pub mod per_epoch_processing;
Error as BlockProcessingError, pub mod per_slot_processing;
pub use per_block_processing::{
errors::{BlockInvalid, BlockProcessingError},
per_block_processing, per_block_processing_without_verifying_block_signature,
}; };
pub use epoch_processable::{EpochProcessable, Error as EpochProcessingError}; pub use per_epoch_processing::{errors::EpochProcessingError, per_epoch_processing};
pub use slot_processable::{Error as SlotProcessingError, SlotProcessable}; pub use per_slot_processing::{per_slot_processing, Error as SlotProcessingError};

View File

@ -0,0 +1,24 @@
macro_rules! verify {
($condition: expr, $result: expr) => {
if !$condition {
return Err(Error::Invalid($result));
}
};
}
macro_rules! invalid {
($result: expr) => {
return Err(Error::Invalid($result));
};
}
macro_rules! safe_add_assign {
($a: expr, $b: expr) => {
$a = $a.saturating_add($b);
};
}
macro_rules! safe_sub_assign {
($a: expr, $b: expr) => {
$a = $a.saturating_sub($b);
};
}

View File

@ -0,0 +1,386 @@
use self::verify_proposer_slashing::verify_proposer_slashing;
use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex};
use hashing::hash;
use log::debug;
use ssz::{ssz_encode, SignedRoot, TreeHash};
use types::*;
pub use self::verify_attester_slashing::verify_attester_slashing;
pub use validate_attestation::{validate_attestation, validate_attestation_without_signature};
pub use verify_deposit::verify_deposit;
pub use verify_exit::verify_exit;
pub use verify_transfer::{execute_transfer, verify_transfer};
pub mod errors;
mod validate_attestation;
mod verify_attester_slashing;
mod verify_deposit;
mod verify_exit;
mod verify_proposer_slashing;
mod verify_slashable_attestation;
mod verify_transfer;
// Set to `true` to check the merkle proof that a deposit is in the eth1 deposit root.
//
// Presently disabled to make testing easier.
const VERIFY_DEPOSIT_MERKLE_PROOFS: bool = false;
/// Updates the state for a new block, whilst validating that the block is valid.
///
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
/// returns an error describing why the block was invalid or how the function failed to execute.
///
/// Spec v0.4.0
pub fn per_block_processing(
state: &mut BeaconState,
block: &BeaconBlock,
spec: &ChainSpec,
) -> Result<(), Error> {
per_block_processing_signature_optional(state, block, true, spec)
}
/// Updates the state for a new block, whilst validating that the block is valid, without actually
/// checking the block proposer signature.
///
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
/// returns an error describing why the block was invalid or how the function failed to execute.
///
/// Spec v0.4.0
pub fn per_block_processing_without_verifying_block_signature(
state: &mut BeaconState,
block: &BeaconBlock,
spec: &ChainSpec,
) -> Result<(), Error> {
per_block_processing_signature_optional(state, block, false, spec)
}
/// Updates the state for a new block, whilst validating that the block is valid, optionally
/// checking the block proposer signature.
///
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
/// returns an error describing why the block was invalid or how the function failed to execute.
///
/// Spec v0.4.0
fn per_block_processing_signature_optional(
mut state: &mut BeaconState,
block: &BeaconBlock,
should_verify_block_signature: bool,
spec: &ChainSpec,
) -> Result<(), Error> {
// Verify that `block.slot == state.slot`.
verify!(block.slot == state.slot, Invalid::StateSlotMismatch);
// Ensure the current epoch cache is built.
state.build_epoch_cache(RelativeEpoch::Current, spec)?;
if should_verify_block_signature {
verify_block_signature(&state, &block, &spec)?;
}
process_randao(&mut state, &block, &spec)?;
process_eth1_data(&mut state, &block.eth1_data)?;
process_proposer_slashings(&mut state, &block.body.proposer_slashings[..], spec)?;
process_attester_slashings(&mut state, &block.body.attester_slashings[..], spec)?;
process_attestations(&mut state, &block.body.attestations[..], spec)?;
process_deposits(&mut state, &block.body.deposits[..], spec)?;
process_exits(&mut state, &block.body.voluntary_exits[..], spec)?;
process_transfers(&mut state, &block.body.transfers[..], spec)?;
debug!("per_block_processing complete.");
Ok(())
}
/// Verifies the signature of a block.
///
/// Spec v0.4.0
pub fn verify_block_signature(
state: &BeaconState,
block: &BeaconBlock,
spec: &ChainSpec,
) -> Result<(), Error> {
let block_proposer =
&state.validator_registry[state.get_beacon_proposer_index(block.slot, spec)?];
let proposal = Proposal {
slot: block.slot,
shard: spec.beacon_chain_shard_number,
block_root: Hash256::from_slice(&block.signed_root()[..]),
signature: block.signature.clone(),
};
let domain = spec.get_domain(
block.slot.epoch(spec.slots_per_epoch),
Domain::Proposal,
&state.fork,
);
verify!(
proposal
.signature
.verify(&proposal.signed_root()[..], domain, &block_proposer.pubkey),
Invalid::BadSignature
);
Ok(())
}
/// Verifies the `randao_reveal` against the block's proposer pubkey and updates
/// `state.latest_randao_mixes`.
///
/// Spec v0.4.0
pub fn process_randao(
state: &mut BeaconState,
block: &BeaconBlock,
spec: &ChainSpec,
) -> Result<(), Error> {
// Let `proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)]`.
let block_proposer =
&state.validator_registry[state.get_beacon_proposer_index(block.slot, spec)?];
// Verify that `bls_verify(pubkey=proposer.pubkey,
// message_hash=hash_tree_root(get_current_epoch(state)), signature=block.randao_reveal,
// domain=get_domain(state.fork, get_current_epoch(state), DOMAIN_RANDAO))`.
verify!(
block.randao_reveal.verify(
&state.current_epoch(spec).hash_tree_root()[..],
spec.get_domain(
block.slot.epoch(spec.slots_per_epoch),
Domain::Randao,
&state.fork
),
&block_proposer.pubkey
),
Invalid::BadRandaoSignature
);
// Update the state's RANDAO mix with the one revealed in the block.
update_randao(state, &block.randao_reveal, spec)?;
Ok(())
}
/// Update the `state.eth1_data_votes` based upon the `eth1_data` provided.
///
/// Spec v0.4.0
pub fn process_eth1_data(state: &mut BeaconState, eth1_data: &Eth1Data) -> Result<(), Error> {
// Either increment the eth1_data vote count, or add a new eth1_data.
let matching_eth1_vote_index = state
.eth1_data_votes
.iter()
.position(|vote| vote.eth1_data == *eth1_data);
if let Some(index) = matching_eth1_vote_index {
state.eth1_data_votes[index].vote_count += 1;
} else {
state.eth1_data_votes.push(Eth1DataVote {
eth1_data: eth1_data.clone(),
vote_count: 1,
});
}
Ok(())
}
/// Updates the present randao mix.
///
/// Set `state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] =
/// xor(get_randao_mix(state, get_current_epoch(state)), hash(block.randao_reveal))`.
///
/// Spec v0.4.0
pub fn update_randao(
state: &mut BeaconState,
reveal: &Signature,
spec: &ChainSpec,
) -> Result<(), BeaconStateError> {
let hashed_reveal = {
let encoded_signature = ssz_encode(reveal);
Hash256::from_slice(&hash(&encoded_signature[..])[..])
};
let current_epoch = state.slot.epoch(spec.slots_per_epoch);
let current_mix = state
.get_randao_mix(current_epoch, spec)
.ok_or_else(|| BeaconStateError::InsufficientRandaoMixes)?;
let new_mix = *current_mix ^ hashed_reveal;
let index = current_epoch.as_usize() % spec.latest_randao_mixes_length;
if index < state.latest_randao_mixes.len() {
state.latest_randao_mixes[index] = new_mix;
Ok(())
} else {
Err(BeaconStateError::InsufficientRandaoMixes)
}
}
/// Validates each `ProposerSlashing` and updates the state, short-circuiting on an invalid object.
///
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
/// an `Err` describing the invalid object or cause of failure.
///
/// Spec v0.4.0
pub fn process_proposer_slashings(
state: &mut BeaconState,
proposer_slashings: &[ProposerSlashing],
spec: &ChainSpec,
) -> Result<(), Error> {
verify!(
proposer_slashings.len() as u64 <= spec.max_proposer_slashings,
Invalid::MaxProposerSlashingsExceeded
);
for (i, proposer_slashing) in proposer_slashings.iter().enumerate() {
verify_proposer_slashing(proposer_slashing, &state, spec)
.map_err(|e| e.into_with_index(i))?;
state.slash_validator(proposer_slashing.proposer_index as usize, spec)?;
}
Ok(())
}
/// Validates each `AttesterSlsashing` and updates the state, short-circuiting on an invalid object.
///
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
/// an `Err` describing the invalid object or cause of failure.
///
/// Spec v0.4.0
pub fn process_attester_slashings(
state: &mut BeaconState,
attester_slashings: &[AttesterSlashing],
spec: &ChainSpec,
) -> Result<(), Error> {
verify!(
attester_slashings.len() as u64 <= spec.max_attester_slashings,
Invalid::MaxAttesterSlashingsExceed
);
for (i, attester_slashing) in attester_slashings.iter().enumerate() {
let slashable_indices = verify_attester_slashing(&state, &attester_slashing, spec)
.map_err(|e| e.into_with_index(i))?;
for i in slashable_indices {
state.slash_validator(i as usize, spec)?;
}
}
Ok(())
}
/// Validates each `Attestation` and updates the state, short-circuiting on an invalid object.
///
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
/// an `Err` describing the invalid object or cause of failure.
///
/// Spec v0.4.0
pub fn process_attestations(
state: &mut BeaconState,
attestations: &[Attestation],
spec: &ChainSpec,
) -> Result<(), Error> {
verify!(
attestations.len() as u64 <= spec.max_attestations,
Invalid::MaxAttestationsExceeded
);
for (i, attestation) in attestations.iter().enumerate() {
// Build the previous epoch cache only if required by an attestation.
if attestation.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec) {
state.build_epoch_cache(RelativeEpoch::Previous, spec)?;
}
validate_attestation(state, attestation, spec).map_err(|e| e.into_with_index(i))?;
let pending_attestation = PendingAttestation {
data: attestation.data.clone(),
aggregation_bitfield: attestation.aggregation_bitfield.clone(),
custody_bitfield: attestation.custody_bitfield.clone(),
inclusion_slot: state.slot,
};
state.latest_attestations.push(pending_attestation);
}
Ok(())
}
/// Validates each `Deposit` and updates the state, short-circuiting on an invalid object.
///
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
/// an `Err` describing the invalid object or cause of failure.
///
/// Spec v0.4.0
pub fn process_deposits(
state: &mut BeaconState,
deposits: &[Deposit],
spec: &ChainSpec,
) -> Result<(), Error> {
verify!(
deposits.len() as u64 <= spec.max_deposits,
Invalid::MaxDepositsExceeded
);
for (i, deposit) in deposits.iter().enumerate() {
verify_deposit(state, deposit, VERIFY_DEPOSIT_MERKLE_PROOFS, spec)
.map_err(|e| e.into_with_index(i))?;
state
.process_deposit(
deposit.deposit_data.deposit_input.pubkey.clone(),
deposit.deposit_data.amount,
deposit
.deposit_data
.deposit_input
.proof_of_possession
.clone(),
deposit.deposit_data.deposit_input.withdrawal_credentials,
None,
spec,
)
.map_err(|_| Error::Invalid(Invalid::DepositProcessingFailed(i)))?;
state.deposit_index += 1;
}
Ok(())
}
/// Validates each `Exit` and updates the state, short-circuiting on an invalid object.
///
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
/// an `Err` describing the invalid object or cause of failure.
///
/// Spec v0.4.0
pub fn process_exits(
state: &mut BeaconState,
voluntary_exits: &[VoluntaryExit],
spec: &ChainSpec,
) -> Result<(), Error> {
verify!(
voluntary_exits.len() as u64 <= spec.max_voluntary_exits,
Invalid::MaxExitsExceeded
);
for (i, exit) in voluntary_exits.iter().enumerate() {
verify_exit(&state, exit, spec).map_err(|e| e.into_with_index(i))?;
state.initiate_validator_exit(exit.validator_index as usize);
}
Ok(())
}
/// Validates each `Transfer` and updates the state, short-circuiting on an invalid object.
///
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
/// an `Err` describing the invalid object or cause of failure.
///
/// Spec v0.4.0
pub fn process_transfers(
state: &mut BeaconState,
transfers: &[Transfer],
spec: &ChainSpec,
) -> Result<(), Error> {
verify!(
transfers.len() as u64 <= spec.max_transfers,
Invalid::MaxTransfersExceed
);
for (i, transfer) in transfers.iter().enumerate() {
verify_transfer(&state, transfer, spec).map_err(|e| e.into_with_index(i))?;
execute_transfer(state, transfer, spec).map_err(|e| e.into_with_index(i))?;
}
Ok(())
}

View File

@ -0,0 +1,387 @@
use types::*;
macro_rules! impl_from_beacon_state_error {
($type: ident) => {
impl From<BeaconStateError> for $type {
fn from(e: BeaconStateError) -> $type {
$type::BeaconStateError(e)
}
}
};
}
macro_rules! impl_into_with_index_with_beacon_error {
($error_type: ident, $invalid_type: ident) => {
impl IntoWithIndex<BlockProcessingError> for $error_type {
fn into_with_index(self, i: usize) -> BlockProcessingError {
match self {
$error_type::Invalid(e) => {
BlockProcessingError::Invalid(BlockInvalid::$invalid_type(i, e))
}
$error_type::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e),
}
}
}
};
}
macro_rules! impl_into_with_index_without_beacon_error {
($error_type: ident, $invalid_type: ident) => {
impl IntoWithIndex<BlockProcessingError> for $error_type {
fn into_with_index(self, i: usize) -> BlockProcessingError {
match self {
$error_type::Invalid(e) => {
BlockProcessingError::Invalid(BlockInvalid::$invalid_type(i, e))
}
}
}
}
};
}
/// A conversion that consumes `self` and adds an `index` variable to resulting struct.
///
/// Used here to allow converting an error into an upstream error that points to the object that
/// caused the error. For example, pointing to the index of an attestation that caused the
/// `AttestationInvalid` error.
pub trait IntoWithIndex<T>: Sized {
fn into_with_index(self, index: usize) -> T;
}
/*
* Block Validation
*/
/// The object is invalid or validation failed.
#[derive(Debug, PartialEq)]
pub enum BlockProcessingError {
/// Validation completed successfully and the object is invalid.
Invalid(BlockInvalid),
/// Encountered a `BeaconStateError` whilst attempting to determine validity.
BeaconStateError(BeaconStateError),
}
impl_from_beacon_state_error!(BlockProcessingError);
/// Describes why an object is invalid.
#[derive(Debug, PartialEq)]
pub enum BlockInvalid {
StateSlotMismatch,
BadSignature,
BadRandaoSignature,
MaxAttestationsExceeded,
MaxAttesterSlashingsExceed,
MaxProposerSlashingsExceeded,
MaxDepositsExceeded,
MaxExitsExceeded,
MaxTransfersExceed,
AttestationInvalid(usize, AttestationInvalid),
AttesterSlashingInvalid(usize, AttesterSlashingInvalid),
ProposerSlashingInvalid(usize, ProposerSlashingInvalid),
DepositInvalid(usize, DepositInvalid),
// TODO: merge this into the `DepositInvalid` error.
DepositProcessingFailed(usize),
ExitInvalid(usize, ExitInvalid),
TransferInvalid(usize, TransferInvalid),
}
impl Into<BlockProcessingError> for BlockInvalid {
fn into(self) -> BlockProcessingError {
BlockProcessingError::Invalid(self)
}
}
/*
* Attestation Validation
*/
/// The object is invalid or validation failed.
#[derive(Debug, PartialEq)]
pub enum AttestationValidationError {
/// Validation completed successfully and the object is invalid.
Invalid(AttestationInvalid),
/// Encountered a `BeaconStateError` whilst attempting to determine validity.
BeaconStateError(BeaconStateError),
}
/// Describes why an object is invalid.
#[derive(Debug, PartialEq)]
pub enum AttestationInvalid {
/// Attestation references a pre-genesis slot.
///
/// (genesis_slot, attestation_slot)
PreGenesis(Slot, Slot),
/// Attestation included before the inclusion delay.
///
/// (state_slot, inclusion_delay, attestation_slot)
IncludedTooEarly(Slot, u64, Slot),
/// Attestation slot is too far in the past to be included in a block.
///
/// (state_slot, attestation_slot)
IncludedTooLate(Slot, Slot),
/// Attestation justified epoch does not match the states current or previous justified epoch.
///
/// (attestation_justified_epoch, state_epoch, used_previous_epoch)
WrongJustifiedEpoch(Epoch, Epoch, bool),
/// Attestation justified epoch root does not match root known to the state.
///
/// (state_justified_root, attestation_justified_root)
WrongJustifiedRoot(Hash256, Hash256),
/// Attestation crosslink root does not match the state crosslink root for the attestations
/// slot.
BadLatestCrosslinkRoot,
/// The custody bitfield has some bits set `true`. This is not allowed in phase 0.
CustodyBitfieldHasSetBits,
/// There are no set bits on the attestation -- an attestation must be signed by at least one
/// validator.
AggregationBitfieldIsEmpty,
/// The custody bitfield length is not the smallest possible size to represent the committee.
///
/// (committee_len, bitfield_len)
BadCustodyBitfieldLength(usize, usize),
/// The aggregation bitfield length is not the smallest possible size to represent the committee.
///
/// (committee_len, bitfield_len)
BadAggregationBitfieldLength(usize, usize),
/// There was no known committee for the given shard in the given slot.
///
/// (attestation_data_shard, attestation_data_slot)
NoCommitteeForShard(u64, Slot),
/// The attestation signature verification failed.
BadSignature,
/// The shard block root was not set to zero. This is a phase 0 requirement.
ShardBlockRootNotZero,
}
impl_from_beacon_state_error!(AttestationValidationError);
impl_into_with_index_with_beacon_error!(AttestationValidationError, AttestationInvalid);
/*
* `AttesterSlashing` Validation
*/
/// The object is invalid or validation failed.
#[derive(Debug, PartialEq)]
pub enum AttesterSlashingValidationError {
/// Validation completed successfully and the object is invalid.
Invalid(AttesterSlashingInvalid),
/// Encountered a `BeaconStateError` whilst attempting to determine validity.
BeaconStateError(BeaconStateError),
}
/// Describes why an object is invalid.
#[derive(Debug, PartialEq)]
pub enum AttesterSlashingInvalid {
/// The attestation data is identical, an attestation cannot conflict with itself.
AttestationDataIdentical,
/// The attestations were not in conflict.
NotSlashable,
/// The first `SlashableAttestation` was invalid.
SlashableAttestation1Invalid(SlashableAttestationInvalid),
/// The second `SlashableAttestation` was invalid.
SlashableAttestation2Invalid(SlashableAttestationInvalid),
/// The validator index is unknown. One cannot slash one who does not exist.
UnknownValidator(u64),
/// There were no indices able to be slashed.
NoSlashableIndices,
}
impl_from_beacon_state_error!(AttesterSlashingValidationError);
impl_into_with_index_with_beacon_error!(AttesterSlashingValidationError, AttesterSlashingInvalid);
/*
* `SlashableAttestation` Validation
*/
/// The object is invalid or validation failed.
#[derive(Debug, PartialEq)]
pub enum SlashableAttestationValidationError {
/// Validation completed successfully and the object is invalid.
Invalid(SlashableAttestationInvalid),
}
/// Describes why an object is invalid.
#[derive(Debug, PartialEq)]
pub enum SlashableAttestationInvalid {
/// The custody bitfield has some bits set `true`. This is not allowed in phase 0.
CustodyBitfieldHasSetBits,
/// No validator indices were specified.
NoValidatorIndices,
/// The validator indices were not in increasing order.
///
/// The error occured between the given `index` and `index + 1`
BadValidatorIndicesOrdering(usize),
/// The custody bitfield length is not the smallest possible size to represent the validators.
///
/// (validators_len, bitfield_len)
BadCustodyBitfieldLength(usize, usize),
/// The number of slashable indices exceed the global maximum.
///
/// (max_indices, indices_given)
MaxIndicesExceed(usize, usize),
/// The validator index is unknown. One cannot slash one who does not exist.
UnknownValidator(u64),
/// The slashable attestation aggregate signature was not valid.
BadSignature,
}
impl Into<SlashableAttestationInvalid> for SlashableAttestationValidationError {
fn into(self) -> SlashableAttestationInvalid {
match self {
SlashableAttestationValidationError::Invalid(e) => e,
}
}
}
/*
* `ProposerSlashing` Validation
*/
/// The object is invalid or validation failed.
#[derive(Debug, PartialEq)]
pub enum ProposerSlashingValidationError {
/// Validation completed successfully and the object is invalid.
Invalid(ProposerSlashingInvalid),
}
/// Describes why an object is invalid.
#[derive(Debug, PartialEq)]
pub enum ProposerSlashingInvalid {
/// The proposer index is not a known validator.
ProposerUnknown(u64),
/// The two proposal have different slots.
///
/// (proposal_1_slot, proposal_2_slot)
ProposalSlotMismatch(Slot, Slot),
/// The two proposal have different shards.
///
/// (proposal_1_shard, proposal_2_shard)
ProposalShardMismatch(u64, u64),
/// The two proposal have different block roots.
///
/// (proposal_1_root, proposal_2_root)
ProposalBlockRootMismatch(Hash256, Hash256),
/// The specified proposer has already been slashed.
ProposerAlreadySlashed,
/// The first proposal signature was invalid.
BadProposal1Signature,
/// The second proposal signature was invalid.
BadProposal2Signature,
}
impl_into_with_index_without_beacon_error!(
ProposerSlashingValidationError,
ProposerSlashingInvalid
);
/*
* `Deposit` Validation
*/
/// The object is invalid or validation failed.
#[derive(Debug, PartialEq)]
pub enum DepositValidationError {
/// Validation completed successfully and the object is invalid.
Invalid(DepositInvalid),
}
/// Describes why an object is invalid.
#[derive(Debug, PartialEq)]
pub enum DepositInvalid {
/// The deposit index does not match the state index.
///
/// (state_index, deposit_index)
BadIndex(u64, u64),
/// The specified `branch` and `index` did not form a valid proof that the deposit is included
/// in the eth1 deposit root.
BadMerkleProof,
}
impl_into_with_index_without_beacon_error!(DepositValidationError, DepositInvalid);
/*
* `Exit` Validation
*/
/// The object is invalid or validation failed.
#[derive(Debug, PartialEq)]
pub enum ExitValidationError {
/// Validation completed successfully and the object is invalid.
Invalid(ExitInvalid),
}
/// Describes why an object is invalid.
#[derive(Debug, PartialEq)]
pub enum ExitInvalid {
/// The specified validator is not in the state's validator registry.
ValidatorUnknown(u64),
AlreadyExited,
/// The exit is for a future epoch.
///
/// (state_epoch, exit_epoch)
FutureEpoch(Epoch, Epoch),
/// The exit signature was not signed by the validator.
BadSignature,
}
impl_into_with_index_without_beacon_error!(ExitValidationError, ExitInvalid);
/*
* `Transfer` Validation
*/
/// The object is invalid or validation failed.
#[derive(Debug, PartialEq)]
pub enum TransferValidationError {
/// Validation completed successfully and the object is invalid.
Invalid(TransferInvalid),
/// Encountered a `BeaconStateError` whilst attempting to determine validity.
BeaconStateError(BeaconStateError),
}
/// Describes why an object is invalid.
#[derive(Debug, PartialEq)]
pub enum TransferInvalid {
/// The validator indicated by `transfer.from` is unknown.
FromValidatorUnknown(u64),
/// The validator indicated by `transfer.to` is unknown.
ToValidatorUnknown(u64),
/// The balance of `transfer.from` is insufficient.
///
/// (required, available)
FromBalanceInsufficient(u64, u64),
/// Adding `transfer.fee` to `transfer.amount` causes an overflow.
///
/// (transfer_fee, transfer_amount)
FeeOverflow(u64, u64),
/// This transfer would result in the `transfer.from` account to have `0 < balance <
/// min_deposit_amount`
///
/// (resulting_amount, min_deposit_amount)
InvalidResultingFromBalance(u64, u64),
/// The state slot does not match `transfer.slot`.
///
/// (state_slot, transfer_slot)
StateSlotMismatch(Slot, Slot),
/// The `transfer.from` validator has been activated and is not withdrawable.
///
/// (from_validator)
FromValidatorIneligableForTransfer(u64),
/// The validators withdrawal credentials do not match `transfer.pubkey`.
///
/// (state_credentials, transfer_pubkey_credentials)
WithdrawalCredentialsMismatch(Hash256, Hash256),
/// The deposit was not signed by `deposit.pubkey`.
BadSignature,
/// Overflow when adding to `transfer.to` balance.
///
/// (to_balance, transfer_amount)
ToBalanceOverflow(u64, u64),
/// Overflow when adding to beacon proposer balance.
///
/// (proposer_balance, transfer_fee)
ProposerBalanceOverflow(u64, u64),
}
impl_from_beacon_state_error!(TransferValidationError);
impl_into_with_index_with_beacon_error!(TransferValidationError, TransferInvalid);

View File

@ -0,0 +1,255 @@
use super::errors::{AttestationInvalid as Invalid, AttestationValidationError as Error};
use ssz::TreeHash;
use types::beacon_state::helpers::*;
use types::*;
/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the
/// given state.
///
/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity.
///
/// Spec v0.4.0
pub fn validate_attestation(
state: &BeaconState,
attestation: &Attestation,
spec: &ChainSpec,
) -> Result<(), Error> {
validate_attestation_signature_optional(state, attestation, spec, true)
}
/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the
/// given state, without validating the aggregate signature.
///
/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity.
///
/// Spec v0.4.0
pub fn validate_attestation_without_signature(
state: &BeaconState,
attestation: &Attestation,
spec: &ChainSpec,
) -> Result<(), Error> {
validate_attestation_signature_optional(state, attestation, spec, false)
}
/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the
/// given state, optionally validating the aggregate signature.
///
///
/// Spec v0.4.0
fn validate_attestation_signature_optional(
state: &BeaconState,
attestation: &Attestation,
spec: &ChainSpec,
verify_signature: bool,
) -> Result<(), Error> {
// Verify that `attestation.data.slot >= GENESIS_SLOT`.
verify!(
attestation.data.slot >= spec.genesis_slot,
Invalid::PreGenesis(spec.genesis_slot, attestation.data.slot)
);
// Verify that `attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot`.
verify!(
attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot,
Invalid::IncludedTooEarly(
state.slot,
spec.min_attestation_inclusion_delay,
attestation.data.slot
)
);
// Verify that `state.slot < attestation.data.slot + SLOTS_PER_EPOCH`.
verify!(
state.slot < attestation.data.slot + spec.slots_per_epoch,
Invalid::IncludedTooLate(state.slot, attestation.data.slot)
);
// Verify that `attestation.data.justified_epoch` is equal to `state.justified_epoch` if
// `slot_to_epoch(attestation.data.slot + 1) >= get_current_epoch(state) else
// state.previous_justified_epoch`.
if (attestation.data.slot + 1).epoch(spec.slots_per_epoch) >= state.current_epoch(spec) {
verify!(
attestation.data.justified_epoch == state.justified_epoch,
Invalid::WrongJustifiedEpoch(
attestation.data.justified_epoch,
state.justified_epoch,
false
)
);
} else {
verify!(
attestation.data.justified_epoch == state.previous_justified_epoch,
Invalid::WrongJustifiedEpoch(
attestation.data.justified_epoch,
state.previous_justified_epoch,
true
)
);
}
// Verify that `attestation.data.justified_block_root` is equal to `get_block_root(state,
// get_epoch_start_slot(attestation.data.justified_epoch))`.
let justified_block_root = *state
.get_block_root(
attestation
.data
.justified_epoch
.start_slot(spec.slots_per_epoch),
&spec,
)
.ok_or(BeaconStateError::InsufficientBlockRoots)?;
verify!(
attestation.data.justified_block_root == justified_block_root,
Invalid::WrongJustifiedRoot(justified_block_root, attestation.data.justified_block_root)
);
// Verify that either:
//
// (i)`state.latest_crosslinks[attestation.data.shard] == attestation.data.latest_crosslink`,
//
// (ii) `state.latest_crosslinks[attestation.data.shard] ==
// Crosslink(crosslink_data_root=attestation.data.crosslink_data_root,
// epoch=slot_to_epoch(attestation.data.slot))`.
let potential_crosslink = Crosslink {
crosslink_data_root: attestation.data.crosslink_data_root,
epoch: attestation.data.slot.epoch(spec.slots_per_epoch),
};
verify!(
(attestation.data.latest_crosslink
== state.latest_crosslinks[attestation.data.shard as usize])
| (state.latest_crosslinks[attestation.data.shard as usize] == potential_crosslink),
Invalid::BadLatestCrosslinkRoot
);
// Get the committee for this attestation
let (committee, _shard) = state
.get_crosslink_committees_at_slot(attestation.data.slot, spec)?
.iter()
.find(|(_committee, shard)| *shard == attestation.data.shard)
.ok_or_else(|| {
Error::Invalid(Invalid::NoCommitteeForShard(
attestation.data.shard,
attestation.data.slot,
))
})?;
// Custody bitfield is all zeros (phase 0 requirement).
verify!(
attestation.custody_bitfield.num_set_bits() == 0,
Invalid::CustodyBitfieldHasSetBits
);
// Custody bitfield length is correct.
verify!(
verify_bitfield_length(&attestation.custody_bitfield, committee.len()),
Invalid::BadCustodyBitfieldLength(committee.len(), attestation.custody_bitfield.len())
);
// Aggregation bitfield isn't empty.
verify!(
attestation.aggregation_bitfield.num_set_bits() != 0,
Invalid::AggregationBitfieldIsEmpty
);
// Aggregation bitfield length is correct.
verify!(
verify_bitfield_length(&attestation.aggregation_bitfield, committee.len()),
Invalid::BadAggregationBitfieldLength(
committee.len(),
attestation.aggregation_bitfield.len()
)
);
if verify_signature {
let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch);
verify!(
verify_attestation_signature(
state,
committee,
attestation_epoch,
&attestation.custody_bitfield,
&attestation.data,
&attestation.aggregate_signature,
spec
),
Invalid::BadSignature
);
}
// [TO BE REMOVED IN PHASE 1] Verify that `attestation.data.crosslink_data_root == ZERO_HASH`.
verify!(
attestation.data.crosslink_data_root == spec.zero_hash,
Invalid::ShardBlockRootNotZero
);
Ok(())
}
/// Verifies an aggregate signature for some given `AttestationData`, returning `true` if the
/// `aggregate_signature` is valid.
///
/// Returns `false` if:
/// - `aggregate_signature` was not signed correctly.
/// - `custody_bitfield` does not have a bit for each index of `committee`.
/// - A `validator_index` in `committee` is not in `state.validator_registry`.
///
/// Spec v0.4.0
fn verify_attestation_signature(
state: &BeaconState,
committee: &[usize],
attestation_epoch: Epoch,
custody_bitfield: &Bitfield,
attestation_data: &AttestationData,
aggregate_signature: &AggregateSignature,
spec: &ChainSpec,
) -> bool {
let mut aggregate_pubs = vec![AggregatePublicKey::new(); 2];
let mut message_exists = vec![false; 2];
for (i, v) in committee.iter().enumerate() {
let custody_bit = match custody_bitfield.get(i) {
Ok(bit) => bit,
// Invalidate signature if custody_bitfield.len() < committee
Err(_) => return false,
};
message_exists[custody_bit as usize] = true;
match state.validator_registry.get(*v as usize) {
Some(validator) => {
aggregate_pubs[custody_bit as usize].add(&validator.pubkey);
}
// Invalidate signature if validator index is unknown.
None => return false,
};
}
// Message when custody bitfield is `false`
let message_0 = AttestationDataAndCustodyBit {
data: attestation_data.clone(),
custody_bit: false,
}
.hash_tree_root();
// Message when custody bitfield is `true`
let message_1 = AttestationDataAndCustodyBit {
data: attestation_data.clone(),
custody_bit: true,
}
.hash_tree_root();
let mut messages = vec![];
let mut keys = vec![];
// If any validator signed a message with a `false` custody bit.
if message_exists[0] {
messages.push(&message_0[..]);
keys.push(&aggregate_pubs[0]);
}
// If any validator signed a message with a `true` custody bit.
if message_exists[1] {
messages.push(&message_1[..]);
keys.push(&aggregate_pubs[1]);
}
let domain = spec.get_domain(attestation_epoch, Domain::Attestation, &state.fork);
aggregate_signature.verify_multiple(&messages[..], domain, &keys[..])
}

View File

@ -0,0 +1,49 @@
use super::errors::{AttesterSlashingInvalid as Invalid, AttesterSlashingValidationError as Error};
use super::verify_slashable_attestation::verify_slashable_attestation;
use types::*;
/// Indicates if an `AttesterSlashing` is valid to be included in a block in the current epoch of the given
/// state.
///
/// Returns `Ok(())` if the `AttesterSlashing` is valid, otherwise indicates the reason for invalidity.
///
/// Spec v0.4.0
pub fn verify_attester_slashing(
state: &BeaconState,
attester_slashing: &AttesterSlashing,
spec: &ChainSpec,
) -> Result<Vec<u64>, Error> {
let slashable_attestation_1 = &attester_slashing.slashable_attestation_1;
let slashable_attestation_2 = &attester_slashing.slashable_attestation_2;
verify!(
slashable_attestation_1.data != slashable_attestation_2.data,
Invalid::AttestationDataIdentical
);
verify!(
slashable_attestation_1.is_double_vote(slashable_attestation_2, spec)
| slashable_attestation_1.is_surround_vote(slashable_attestation_2, spec),
Invalid::NotSlashable
);
verify_slashable_attestation(state, &slashable_attestation_1, spec)
.map_err(|e| Error::Invalid(Invalid::SlashableAttestation1Invalid(e.into())))?;
verify_slashable_attestation(state, &slashable_attestation_2, spec)
.map_err(|e| Error::Invalid(Invalid::SlashableAttestation2Invalid(e.into())))?;
let mut slashable_indices = vec![];
for i in &slashable_attestation_1.validator_indices {
let validator = state
.validator_registry
.get(*i as usize)
.ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(*i)))?;
if slashable_attestation_1.validator_indices.contains(&i) & !validator.slashed {
slashable_indices.push(*i);
}
}
verify!(!slashable_indices.is_empty(), Invalid::NoSlashableIndices);
Ok(slashable_indices)
}

View File

@ -0,0 +1,73 @@
use super::errors::{DepositInvalid as Invalid, DepositValidationError as Error};
use hashing::hash;
use merkle_proof::verify_merkle_proof;
use ssz::ssz_encode;
use ssz_derive::Encode;
use types::*;
/// Indicates if a `Deposit` is valid to be included in a block in the current epoch of the given
/// state.
///
/// Returns `Ok(())` if the `Deposit` is valid, otherwise indicates the reason for invalidity.
///
/// Note: this function is incomplete.
///
/// Spec v0.4.0
pub fn verify_deposit(
state: &BeaconState,
deposit: &Deposit,
verify_merkle_branch: bool,
spec: &ChainSpec,
) -> Result<(), Error> {
verify!(
deposit.index == state.deposit_index,
Invalid::BadIndex(state.deposit_index, deposit.index)
);
if verify_merkle_branch {
verify!(
verify_deposit_merkle_proof(state, deposit, spec),
Invalid::BadMerkleProof
);
}
Ok(())
}
/// Verify that a deposit is included in the state's eth1 deposit root.
///
/// Spec v0.4.0
fn verify_deposit_merkle_proof(state: &BeaconState, deposit: &Deposit, spec: &ChainSpec) -> bool {
let leaf = hash(&get_serialized_deposit_data(deposit));
verify_merkle_proof(
Hash256::from_slice(&leaf),
&deposit.branch,
spec.deposit_contract_tree_depth as usize,
deposit.index as usize,
state.latest_eth1_data.deposit_root,
)
}
/// Helper struct for easily getting the serialized data generated by the deposit contract.
///
/// Spec v0.4.0
#[derive(Encode)]
struct SerializedDepositData {
amount: u64,
timestamp: u64,
input: DepositInput,
}
/// Return the serialized data generated by the deposit contract that is used to generate the
/// merkle proof.
///
/// Spec v0.4.0
fn get_serialized_deposit_data(deposit: &Deposit) -> Vec<u8> {
let serialized_deposit_data = SerializedDepositData {
amount: deposit.deposit_data.amount,
timestamp: deposit.deposit_data.timestamp,
input: deposit.deposit_data.deposit_input.clone(),
};
ssz_encode(&serialized_deposit_data)
}

View File

@ -0,0 +1,42 @@
use super::errors::{ExitInvalid as Invalid, ExitValidationError as Error};
use ssz::SignedRoot;
use types::*;
/// Indicates if an `Exit` is valid to be included in a block in the current epoch of the given
/// state.
///
/// Returns `Ok(())` if the `Exit` is valid, otherwise indicates the reason for invalidity.
///
/// Spec v0.4.0
pub fn verify_exit(
state: &BeaconState,
exit: &VoluntaryExit,
spec: &ChainSpec,
) -> Result<(), Error> {
let validator = state
.validator_registry
.get(exit.validator_index as usize)
.ok_or_else(|| Error::Invalid(Invalid::ValidatorUnknown(exit.validator_index)))?;
verify!(
validator.exit_epoch
> state.get_delayed_activation_exit_epoch(state.current_epoch(spec), spec),
Invalid::AlreadyExited
);
verify!(
state.current_epoch(spec) >= exit.epoch,
Invalid::FutureEpoch(state.current_epoch(spec), exit.epoch)
);
let message = exit.signed_root();
let domain = spec.get_domain(exit.epoch, Domain::Exit, &state.fork);
verify!(
exit.signature
.verify(&message[..], domain, &validator.pubkey),
Invalid::BadSignature
);
Ok(())
}

View File

@ -0,0 +1,87 @@
use super::errors::{ProposerSlashingInvalid as Invalid, ProposerSlashingValidationError as Error};
use ssz::SignedRoot;
use types::*;
/// Indicates if a `ProposerSlashing` is valid to be included in a block in the current epoch of the given
/// state.
///
/// Returns `Ok(())` if the `ProposerSlashing` is valid, otherwise indicates the reason for invalidity.
///
/// Spec v0.4.0
pub fn verify_proposer_slashing(
proposer_slashing: &ProposerSlashing,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<(), Error> {
let proposer = state
.validator_registry
.get(proposer_slashing.proposer_index as usize)
.ok_or_else(|| {
Error::Invalid(Invalid::ProposerUnknown(proposer_slashing.proposer_index))
})?;
verify!(
proposer_slashing.proposal_1.slot == proposer_slashing.proposal_2.slot,
Invalid::ProposalSlotMismatch(
proposer_slashing.proposal_1.slot,
proposer_slashing.proposal_2.slot
)
);
verify!(
proposer_slashing.proposal_1.shard == proposer_slashing.proposal_2.shard,
Invalid::ProposalShardMismatch(
proposer_slashing.proposal_1.shard,
proposer_slashing.proposal_2.shard
)
);
verify!(
proposer_slashing.proposal_1.block_root != proposer_slashing.proposal_2.block_root,
Invalid::ProposalBlockRootMismatch(
proposer_slashing.proposal_1.block_root,
proposer_slashing.proposal_2.block_root
)
);
verify!(!proposer.slashed, Invalid::ProposerAlreadySlashed);
verify!(
verify_proposal_signature(
&proposer_slashing.proposal_1,
&proposer.pubkey,
&state.fork,
spec
),
Invalid::BadProposal1Signature
);
verify!(
verify_proposal_signature(
&proposer_slashing.proposal_2,
&proposer.pubkey,
&state.fork,
spec
),
Invalid::BadProposal2Signature
);
Ok(())
}
/// Verifies the signature of a proposal.
///
/// Returns `true` if the signature is valid.
fn verify_proposal_signature(
proposal: &Proposal,
pubkey: &PublicKey,
fork: &Fork,
spec: &ChainSpec,
) -> bool {
let message = proposal.signed_root();
let domain = spec.get_domain(
proposal.slot.epoch(spec.slots_per_epoch),
Domain::Proposal,
fork,
);
proposal.signature.verify(&message[..], domain, pubkey)
}

View File

@ -0,0 +1,112 @@
use super::errors::{
SlashableAttestationInvalid as Invalid, SlashableAttestationValidationError as Error,
};
use ssz::TreeHash;
use types::beacon_state::helpers::verify_bitfield_length;
use types::*;
/// Indicates if a `SlashableAttestation` is valid to be included in a block in the current epoch of the given
/// state.
///
/// Returns `Ok(())` if the `SlashableAttestation` is valid, otherwise indicates the reason for invalidity.
///
/// Spec v0.4.0
pub fn verify_slashable_attestation(
state: &BeaconState,
slashable_attestation: &SlashableAttestation,
spec: &ChainSpec,
) -> Result<(), Error> {
if slashable_attestation.custody_bitfield.num_set_bits() > 0 {
invalid!(Invalid::CustodyBitfieldHasSetBits);
}
if slashable_attestation.validator_indices.is_empty() {
invalid!(Invalid::NoValidatorIndices);
}
for i in 0..(slashable_attestation.validator_indices.len() - 1) {
if slashable_attestation.validator_indices[i]
>= slashable_attestation.validator_indices[i + 1]
{
invalid!(Invalid::BadValidatorIndicesOrdering(i));
}
}
if !verify_bitfield_length(
&slashable_attestation.custody_bitfield,
slashable_attestation.validator_indices.len(),
) {
invalid!(Invalid::BadCustodyBitfieldLength(
slashable_attestation.validator_indices.len(),
slashable_attestation.custody_bitfield.len()
));
}
if slashable_attestation.validator_indices.len() > spec.max_indices_per_slashable_vote as usize
{
invalid!(Invalid::MaxIndicesExceed(
spec.max_indices_per_slashable_vote as usize,
slashable_attestation.validator_indices.len()
));
}
// TODO: this signature verification could likely be replaced with:
//
// super::validate_attestation::validate_attestation_signature(..)
let mut aggregate_pubs = vec![AggregatePublicKey::new(); 2];
let mut message_exists = vec![false; 2];
for (i, v) in slashable_attestation.validator_indices.iter().enumerate() {
let custody_bit = match slashable_attestation.custody_bitfield.get(i) {
Ok(bit) => bit,
Err(_) => unreachable!(),
};
message_exists[custody_bit as usize] = true;
match state.validator_registry.get(*v as usize) {
Some(validator) => {
aggregate_pubs[custody_bit as usize].add(&validator.pubkey);
}
None => invalid!(Invalid::UnknownValidator(*v)),
};
}
let message_0 = AttestationDataAndCustodyBit {
data: slashable_attestation.data.clone(),
custody_bit: false,
}
.hash_tree_root();
let message_1 = AttestationDataAndCustodyBit {
data: slashable_attestation.data.clone(),
custody_bit: true,
}
.hash_tree_root();
let mut messages = vec![];
let mut keys = vec![];
if message_exists[0] {
messages.push(&message_0[..]);
keys.push(&aggregate_pubs[0]);
}
if message_exists[1] {
messages.push(&message_1[..]);
keys.push(&aggregate_pubs[1]);
}
let domain = {
let epoch = slashable_attestation.data.slot.epoch(spec.slots_per_epoch);
spec.get_domain(epoch, Domain::Attestation, &state.fork)
};
verify!(
slashable_attestation
.aggregate_signature
.verify_multiple(&messages[..], domain, &keys[..]),
Invalid::BadSignature
);
Ok(())
}

View File

@ -0,0 +1,135 @@
use super::errors::{TransferInvalid as Invalid, TransferValidationError as Error};
use bls::get_withdrawal_credentials;
use ssz::SignedRoot;
use types::*;
/// Indicates if a `Transfer` is valid to be included in a block in the current epoch of the given
/// state.
///
/// Returns `Ok(())` if the `Transfer` is valid, otherwise indicates the reason for invalidity.
///
/// Note: this function is incomplete.
///
/// Spec v0.4.0
pub fn verify_transfer(
state: &BeaconState,
transfer: &Transfer,
spec: &ChainSpec,
) -> Result<(), Error> {
let from_balance = *state
.validator_balances
.get(transfer.from as usize)
.ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.from)))?;
let total_amount = transfer
.amount
.checked_add(transfer.fee)
.ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?;
verify!(
from_balance >= transfer.amount,
Invalid::FromBalanceInsufficient(transfer.amount, from_balance)
);
verify!(
from_balance >= transfer.fee,
Invalid::FromBalanceInsufficient(transfer.fee, from_balance)
);
verify!(
(from_balance == total_amount)
|| (from_balance >= (total_amount + spec.min_deposit_amount)),
Invalid::InvalidResultingFromBalance(from_balance - total_amount, spec.min_deposit_amount)
);
verify!(
state.slot == transfer.slot,
Invalid::StateSlotMismatch(state.slot, transfer.slot)
);
let from_validator = state
.validator_registry
.get(transfer.from as usize)
.ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.from)))?;
let epoch = state.slot.epoch(spec.slots_per_epoch);
verify!(
from_validator.is_withdrawable_at(epoch)
|| from_validator.activation_epoch == spec.far_future_epoch,
Invalid::FromValidatorIneligableForTransfer(transfer.from)
);
let transfer_withdrawal_credentials = Hash256::from_slice(
&get_withdrawal_credentials(&transfer.pubkey, spec.bls_withdrawal_prefix_byte)[..],
);
verify!(
from_validator.withdrawal_credentials == transfer_withdrawal_credentials,
Invalid::WithdrawalCredentialsMismatch(
from_validator.withdrawal_credentials,
transfer_withdrawal_credentials
)
);
let message = transfer.signed_root();
let domain = spec.get_domain(
transfer.slot.epoch(spec.slots_per_epoch),
Domain::Transfer,
&state.fork,
);
verify!(
transfer
.signature
.verify(&message[..], domain, &transfer.pubkey),
Invalid::BadSignature
);
Ok(())
}
/// Executes a transfer on the state.
///
/// Does not check that the transfer is valid, however checks for overflow in all actions.
///
/// Spec v0.4.0
pub fn execute_transfer(
state: &mut BeaconState,
transfer: &Transfer,
spec: &ChainSpec,
) -> Result<(), Error> {
let from_balance = *state
.validator_balances
.get(transfer.from as usize)
.ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.from)))?;
let to_balance = *state
.validator_balances
.get(transfer.to as usize)
.ok_or_else(|| Error::Invalid(Invalid::ToValidatorUnknown(transfer.to)))?;
let proposer_index = state.get_beacon_proposer_index(state.slot, spec)?;
let proposer_balance = state.validator_balances[proposer_index];
let total_amount = transfer
.amount
.checked_add(transfer.fee)
.ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?;
state.validator_balances[transfer.from as usize] =
from_balance.checked_sub(total_amount).ok_or_else(|| {
Error::Invalid(Invalid::FromBalanceInsufficient(total_amount, from_balance))
})?;
state.validator_balances[transfer.to as usize] = to_balance
.checked_add(transfer.amount)
.ok_or_else(|| Error::Invalid(Invalid::ToBalanceOverflow(to_balance, transfer.amount)))?;
state.validator_balances[proposer_index] =
proposer_balance.checked_add(transfer.fee).ok_or_else(|| {
Error::Invalid(Invalid::ProposerBalanceOverflow(
proposer_balance,
transfer.fee,
))
})?;
Ok(())
}

View File

@ -0,0 +1,491 @@
use attester_sets::AttesterSets;
use errors::EpochProcessingError as Error;
use inclusion_distance::{inclusion_distance, inclusion_slot};
use integer_sqrt::IntegerSquareRoot;
use log::debug;
use rayon::prelude::*;
use ssz::TreeHash;
use std::collections::{HashMap, HashSet};
use std::iter::FromIterator;
use types::{validator_registry::get_active_validator_indices, *};
use winning_root::{winning_root, WinningRoot};
pub mod attester_sets;
pub mod errors;
pub mod inclusion_distance;
pub mod tests;
pub mod winning_root;
pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> {
let current_epoch = state.current_epoch(spec);
let previous_epoch = state.previous_epoch(spec);
let next_epoch = state.next_epoch(spec);
debug!(
"Starting per-epoch processing on epoch {}...",
state.current_epoch(spec)
);
// Ensure all of the caches are built.
state.build_epoch_cache(RelativeEpoch::Previous, spec)?;
state.build_epoch_cache(RelativeEpoch::Current, spec)?;
state.build_epoch_cache(RelativeEpoch::Next, spec)?;
let attesters = AttesterSets::new(&state, spec)?;
let active_validator_indices = get_active_validator_indices(
&state.validator_registry,
state.slot.epoch(spec.slots_per_epoch),
);
let current_total_balance = state.get_total_balance(&active_validator_indices[..], spec);
let previous_total_balance = state.get_total_balance(
&get_active_validator_indices(&state.validator_registry, previous_epoch)[..],
spec,
);
process_eth1_data(state, spec);
process_justification(
state,
current_total_balance,
previous_total_balance,
attesters.previous_epoch_boundary.balance,
attesters.current_epoch_boundary.balance,
spec,
);
// Crosslinks
let winning_root_for_shards = process_crosslinks(state, spec)?;
// Rewards and Penalities
let active_validator_indices_hashset: HashSet<usize> =
HashSet::from_iter(active_validator_indices.iter().cloned());
process_rewards_and_penalities(
state,
active_validator_indices_hashset,
&attesters,
previous_total_balance,
&winning_root_for_shards,
spec,
)?;
// Ejections
state.process_ejections(spec);
// Validator Registry
process_validator_registry(state, spec)?;
// Final updates
let active_tree_root = get_active_validator_indices(
&state.validator_registry,
next_epoch + Epoch::from(spec.activation_exit_delay),
)
.hash_tree_root();
state.latest_active_index_roots[(next_epoch.as_usize()
+ spec.activation_exit_delay as usize)
% spec.latest_active_index_roots_length] = Hash256::from_slice(&active_tree_root[..]);
state.latest_slashed_balances[next_epoch.as_usize() % spec.latest_slashed_exit_length] =
state.latest_slashed_balances[current_epoch.as_usize() % spec.latest_slashed_exit_length];
state.latest_randao_mixes[next_epoch.as_usize() % spec.latest_randao_mixes_length] = state
.get_randao_mix(current_epoch, spec)
.and_then(|x| Some(*x))
.ok_or_else(|| Error::NoRandaoSeed)?;
state.latest_attestations = state
.latest_attestations
.iter()
.filter(|a| a.data.slot.epoch(spec.slots_per_epoch) >= current_epoch)
.cloned()
.collect();
// Rotate the epoch caches to suit the epoch transition.
state.advance_caches();
debug!("Epoch transition complete.");
Ok(())
}
/// Spec v0.4.0
fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) {
let next_epoch = state.next_epoch(spec);
let voting_period = spec.epochs_per_eth1_voting_period;
if next_epoch % voting_period == 0 {
for eth1_data_vote in &state.eth1_data_votes {
if eth1_data_vote.vote_count * 2 > voting_period {
state.latest_eth1_data = eth1_data_vote.eth1_data.clone();
}
}
state.eth1_data_votes = vec![];
}
}
/// Spec v0.4.0
fn process_justification(
state: &mut BeaconState,
current_total_balance: u64,
previous_total_balance: u64,
previous_epoch_boundary_attesting_balance: u64,
current_epoch_boundary_attesting_balance: u64,
spec: &ChainSpec,
) {
let previous_epoch = state.previous_epoch(spec);
let current_epoch = state.current_epoch(spec);
let mut new_justified_epoch = state.justified_epoch;
state.justification_bitfield <<= 1;
// If > 2/3 of the total balance attested to the previous epoch boundary
//
// - Set the 2nd bit of the bitfield.
// - Set the previous epoch to be justified.
if (3 * previous_epoch_boundary_attesting_balance) >= (2 * previous_total_balance) {
state.justification_bitfield |= 2;
new_justified_epoch = previous_epoch;
}
// If > 2/3 of the total balance attested to the previous epoch boundary
//
// - Set the 1st bit of the bitfield.
// - Set the current epoch to be justified.
if (3 * current_epoch_boundary_attesting_balance) >= (2 * current_total_balance) {
state.justification_bitfield |= 1;
new_justified_epoch = current_epoch;
}
// If:
//
// - All three epochs prior to this epoch have been justified.
// - The previous justified justified epoch was three epochs ago.
//
// Then, set the finalized epoch to be three epochs ago.
if ((state.justification_bitfield >> 1) % 8 == 0b111)
& (state.previous_justified_epoch == previous_epoch - 2)
{
state.finalized_epoch = state.previous_justified_epoch;
}
// If:
//
// - Both two epochs prior to this epoch have been justified.
// - The previous justified epoch was two epochs ago.
//
// Then, set the finalized epoch to two epochs ago.
if ((state.justification_bitfield >> 1) % 4 == 0b11)
& (state.previous_justified_epoch == previous_epoch - 1)
{
state.finalized_epoch = state.previous_justified_epoch;
}
// If:
//
// - This epoch and the two prior have been justified.
// - The presently justified epoch was two epochs ago.
//
// Then, set the finalized epoch to two epochs ago.
if (state.justification_bitfield % 8 == 0b111) & (state.justified_epoch == previous_epoch - 1) {
state.finalized_epoch = state.justified_epoch;
}
// If:
//
// - This epoch and the epoch prior to it have been justified.
// - Set the previous epoch to be justified.
//
// Then, set the finalized epoch to be the previous epoch.
if (state.justification_bitfield % 4 == 0b11) & (state.justified_epoch == previous_epoch) {
state.finalized_epoch = state.justified_epoch;
}
state.previous_justified_epoch = state.justified_epoch;
state.justified_epoch = new_justified_epoch;
}
pub type WinningRootHashSet = HashMap<u64, WinningRoot>;
fn process_crosslinks(
state: &mut BeaconState,
spec: &ChainSpec,
) -> Result<WinningRootHashSet, Error> {
let current_epoch_attestations: Vec<&PendingAttestation> = state
.latest_attestations
.par_iter()
.filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.current_epoch(spec))
.collect();
let previous_epoch_attestations: Vec<&PendingAttestation> = state
.latest_attestations
.par_iter()
.filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec))
.collect();
let mut winning_root_for_shards: WinningRootHashSet = HashMap::new();
let previous_and_current_epoch_slots: Vec<Slot> = state
.previous_epoch(spec)
.slot_iter(spec.slots_per_epoch)
.chain(state.current_epoch(spec).slot_iter(spec.slots_per_epoch))
.collect();
for slot in previous_and_current_epoch_slots {
// Clone removes the borrow which becomes an issue when mutating `state.balances`.
let crosslink_committees_at_slot =
state.get_crosslink_committees_at_slot(slot, spec)?.clone();
for (crosslink_committee, shard) in crosslink_committees_at_slot {
let shard = shard as u64;
let winning_root = winning_root(
state,
shard,
&current_epoch_attestations[..],
&previous_epoch_attestations[..],
spec,
)?;
if let Some(winning_root) = winning_root {
let total_committee_balance = state.get_total_balance(&crosslink_committee, spec);
// TODO: I think this has a bug.
if (3 * winning_root.total_attesting_balance) >= (2 * total_committee_balance) {
state.latest_crosslinks[shard as usize] = Crosslink {
epoch: state.current_epoch(spec),
crosslink_data_root: winning_root.crosslink_data_root,
}
}
winning_root_for_shards.insert(shard, winning_root);
}
}
}
Ok(winning_root_for_shards)
}
/// Spec v0.4.0
fn process_rewards_and_penalities(
state: &mut BeaconState,
active_validator_indices: HashSet<usize>,
attesters: &AttesterSets,
previous_total_balance: u64,
winning_root_for_shards: &WinningRootHashSet,
spec: &ChainSpec,
) -> Result<(), Error> {
let next_epoch = state.next_epoch(spec);
let previous_epoch_attestations: Vec<&PendingAttestation> = state
.latest_attestations
.par_iter()
.filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec))
.collect();
let base_reward_quotient = previous_total_balance.integer_sqrt() / spec.base_reward_quotient;
if base_reward_quotient == 0 {
return Err(Error::BaseRewardQuotientIsZero);
}
// Justification and finalization
let epochs_since_finality = next_epoch - state.finalized_epoch;
if epochs_since_finality <= 4 {
for index in 0..state.validator_balances.len() {
let base_reward = state.base_reward(index, base_reward_quotient, spec);
// Expected FFG source
if attesters.previous_epoch.indices.contains(&index) {
safe_add_assign!(
state.validator_balances[index],
base_reward * attesters.previous_epoch.balance / previous_total_balance
);
} else if active_validator_indices.contains(&index) {
safe_sub_assign!(state.validator_balances[index], base_reward);
}
// Expected FFG target
if attesters.previous_epoch_boundary.indices.contains(&index) {
safe_add_assign!(
state.validator_balances[index],
base_reward * attesters.previous_epoch_boundary.balance
/ previous_total_balance
);
} else if active_validator_indices.contains(&index) {
safe_sub_assign!(state.validator_balances[index], base_reward);
}
// Expected beacon chain head
if attesters.previous_epoch_head.indices.contains(&index) {
safe_add_assign!(
state.validator_balances[index],
base_reward * attesters.previous_epoch_head.balance / previous_total_balance
);
} else if active_validator_indices.contains(&index) {
safe_sub_assign!(state.validator_balances[index], base_reward);
}
}
// Inclusion distance
for &index in &attesters.previous_epoch.indices {
let base_reward = state.base_reward(index, base_reward_quotient, spec);
let inclusion_distance =
inclusion_distance(state, &previous_epoch_attestations, index, spec)?;
safe_add_assign!(
state.validator_balances[index],
base_reward * spec.min_attestation_inclusion_delay / inclusion_distance
)
}
} else {
for index in 0..state.validator_balances.len() {
let inactivity_penalty =
state.inactivity_penalty(index, epochs_since_finality, base_reward_quotient, spec);
if active_validator_indices.contains(&index) {
if !attesters.previous_epoch.indices.contains(&index) {
safe_sub_assign!(state.validator_balances[index], inactivity_penalty);
}
if !attesters.previous_epoch_boundary.indices.contains(&index) {
safe_sub_assign!(state.validator_balances[index], inactivity_penalty);
}
if !attesters.previous_epoch_head.indices.contains(&index) {
safe_sub_assign!(state.validator_balances[index], inactivity_penalty);
}
if state.validator_registry[index].slashed {
let base_reward = state.base_reward(index, base_reward_quotient, spec);
safe_sub_assign!(
state.validator_balances[index],
2 * inactivity_penalty + base_reward
);
}
}
}
for &index in &attesters.previous_epoch.indices {
let base_reward = state.base_reward(index, base_reward_quotient, spec);
let inclusion_distance =
inclusion_distance(state, &previous_epoch_attestations, index, spec)?;
safe_sub_assign!(
state.validator_balances[index],
base_reward
- base_reward * spec.min_attestation_inclusion_delay / inclusion_distance
);
}
}
// Attestation inclusion
for &index in &attesters.previous_epoch.indices {
let inclusion_slot = inclusion_slot(state, &previous_epoch_attestations[..], index, spec)?;
let proposer_index = state
.get_beacon_proposer_index(inclusion_slot, spec)
.map_err(|_| Error::UnableToDetermineProducer)?;
let base_reward = state.base_reward(proposer_index, base_reward_quotient, spec);
safe_add_assign!(
state.validator_balances[proposer_index],
base_reward / spec.attestation_inclusion_reward_quotient
);
}
//Crosslinks
for slot in state.previous_epoch(spec).slot_iter(spec.slots_per_epoch) {
// Clone removes the borrow which becomes an issue when mutating `state.balances`.
let crosslink_committees_at_slot =
state.get_crosslink_committees_at_slot(slot, spec)?.clone();
for (crosslink_committee, shard) in crosslink_committees_at_slot {
let shard = shard as u64;
// Note: I'm a little uncertain of the logic here -- I am waiting for spec v0.5.0 to
// clear it up.
//
// What happens here is:
//
// - If there was some crosslink root elected by the super-majority of this committee,
// then we reward all who voted for that root and penalize all that did not.
// - However, if there _was not_ some super-majority-voted crosslink root, then penalize
// all the validators.
//
// I'm not quite sure that the second case (no super-majority crosslink) is correct.
if let Some(winning_root) = winning_root_for_shards.get(&shard) {
// Hash set de-dedups and (hopefully) offers a speed improvement from faster
// lookups.
let attesting_validator_indices: HashSet<usize> =
HashSet::from_iter(winning_root.attesting_validator_indices.iter().cloned());
for &index in &crosslink_committee {
let base_reward = state.base_reward(index, base_reward_quotient, spec);
let total_balance = state.get_total_balance(&crosslink_committee, spec);
if attesting_validator_indices.contains(&index) {
safe_add_assign!(
state.validator_balances[index],
base_reward * winning_root.total_attesting_balance / total_balance
);
} else {
safe_sub_assign!(state.validator_balances[index], base_reward);
}
}
} else {
for &index in &crosslink_committee {
let base_reward = state.base_reward(index, base_reward_quotient, spec);
safe_sub_assign!(state.validator_balances[index], base_reward);
}
}
}
}
Ok(())
}
// Spec v0.4.0
fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> {
let current_epoch = state.current_epoch(spec);
let next_epoch = state.next_epoch(spec);
state.previous_shuffling_epoch = state.current_shuffling_epoch;
state.previous_shuffling_start_shard = state.current_shuffling_start_shard;
state.previous_shuffling_seed = state.current_shuffling_seed;
let should_update_validator_registy = if state.finalized_epoch
> state.validator_registry_update_epoch
{
(0..state.get_current_epoch_committee_count(spec)).all(|i| {
let shard = (state.current_shuffling_start_shard + i as u64) % spec.shard_count;
state.latest_crosslinks[shard as usize].epoch > state.validator_registry_update_epoch
})
} else {
false
};
if should_update_validator_registy {
state.update_validator_registry(spec);
state.current_shuffling_epoch = next_epoch;
state.current_shuffling_start_shard = (state.current_shuffling_start_shard
+ state.get_current_epoch_committee_count(spec) as u64)
% spec.shard_count;
state.current_shuffling_seed = state.generate_seed(state.current_shuffling_epoch, spec)?
} else {
let epochs_since_last_registry_update =
current_epoch - state.validator_registry_update_epoch;
if (epochs_since_last_registry_update > 1)
& epochs_since_last_registry_update.is_power_of_two()
{
state.current_shuffling_epoch = next_epoch;
state.current_shuffling_seed =
state.generate_seed(state.current_shuffling_epoch, spec)?
}
}
state.process_slashings(spec);
state.process_exit_queue(spec);
Ok(())
}

View File

@ -0,0 +1,98 @@
use std::collections::HashSet;
use types::*;
#[derive(Default)]
pub struct Attesters {
pub indices: HashSet<usize>,
pub balance: u64,
}
impl Attesters {
fn add(&mut self, additional_indices: &[usize], additional_balance: u64) {
self.indices.reserve(additional_indices.len());
for i in additional_indices {
self.indices.insert(*i);
}
self.balance.saturating_add(additional_balance);
}
}
pub struct AttesterSets {
pub current_epoch: Attesters,
pub current_epoch_boundary: Attesters,
pub previous_epoch: Attesters,
pub previous_epoch_boundary: Attesters,
pub previous_epoch_head: Attesters,
}
impl AttesterSets {
pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result<Self, BeaconStateError> {
let mut current_epoch = Attesters::default();
let mut current_epoch_boundary = Attesters::default();
let mut previous_epoch = Attesters::default();
let mut previous_epoch_boundary = Attesters::default();
let mut previous_epoch_head = Attesters::default();
for a in &state.latest_attestations {
let attesting_indices =
state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?;
let attesting_balance = state.get_total_balance(&attesting_indices, spec);
if is_from_epoch(a, state.current_epoch(spec), spec) {
current_epoch.add(&attesting_indices, attesting_balance);
if has_common_epoch_boundary_root(a, state, state.current_epoch(spec), spec)? {
current_epoch_boundary.add(&attesting_indices, attesting_balance);
}
} else if is_from_epoch(a, state.previous_epoch(spec), spec) {
previous_epoch.add(&attesting_indices, attesting_balance);
if has_common_epoch_boundary_root(a, state, state.previous_epoch(spec), spec)? {
previous_epoch_boundary.add(&attesting_indices, attesting_balance);
}
if has_common_beacon_block_root(a, state, spec)? {
previous_epoch_head.add(&attesting_indices, attesting_balance);
}
}
}
Ok(Self {
current_epoch,
current_epoch_boundary,
previous_epoch,
previous_epoch_boundary,
previous_epoch_head,
})
}
}
fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool {
a.data.slot.epoch(spec.slots_per_epoch) == epoch
}
fn has_common_epoch_boundary_root(
a: &PendingAttestation,
state: &BeaconState,
epoch: Epoch,
spec: &ChainSpec,
) -> Result<bool, BeaconStateError> {
let slot = epoch.start_slot(spec.slots_per_epoch);
let state_boundary_root = *state
.get_block_root(slot, spec)
.ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?;
Ok(a.data.epoch_boundary_root == state_boundary_root)
}
fn has_common_beacon_block_root(
a: &PendingAttestation,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<bool, BeaconStateError> {
let state_block_root = *state
.get_block_root(a.data.slot, spec)
.ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?;
Ok(a.data.beacon_block_root == state_block_root)
}

View File

@ -0,0 +1,36 @@
use types::*;
#[derive(Debug, PartialEq)]
pub enum EpochProcessingError {
UnableToDetermineProducer,
NoBlockRoots,
BaseRewardQuotientIsZero,
NoRandaoSeed,
BeaconStateError(BeaconStateError),
InclusionError(InclusionError),
}
impl From<InclusionError> for EpochProcessingError {
fn from(e: InclusionError) -> EpochProcessingError {
EpochProcessingError::InclusionError(e)
}
}
impl From<BeaconStateError> for EpochProcessingError {
fn from(e: BeaconStateError) -> EpochProcessingError {
EpochProcessingError::BeaconStateError(e)
}
}
#[derive(Debug, PartialEq)]
pub enum InclusionError {
/// The validator did not participate in an attestation in this period.
NoAttestationsForValidator,
BeaconStateError(BeaconStateError),
}
impl From<BeaconStateError> for InclusionError {
fn from(e: BeaconStateError) -> InclusionError {
InclusionError::BeaconStateError(e)
}
}

View File

@ -0,0 +1,61 @@
use super::errors::InclusionError;
use types::*;
/// Returns the distance between the first included attestation for some validator and this
/// slot.
///
/// Note: In the spec this is defined "inline", not as a helper function.
///
/// Spec v0.4.0
pub fn inclusion_distance(
state: &BeaconState,
attestations: &[&PendingAttestation],
validator_index: usize,
spec: &ChainSpec,
) -> Result<u64, InclusionError> {
let attestation = earliest_included_attestation(state, attestations, validator_index, spec)?;
Ok((attestation.inclusion_slot - attestation.data.slot).as_u64())
}
/// Returns the slot of the earliest included attestation for some validator.
///
/// Note: In the spec this is defined "inline", not as a helper function.
///
/// Spec v0.4.0
pub fn inclusion_slot(
state: &BeaconState,
attestations: &[&PendingAttestation],
validator_index: usize,
spec: &ChainSpec,
) -> Result<Slot, InclusionError> {
let attestation = earliest_included_attestation(state, attestations, validator_index, spec)?;
Ok(attestation.inclusion_slot)
}
/// Finds the earliest included attestation for some validator.
///
/// Note: In the spec this is defined "inline", not as a helper function.
///
/// Spec v0.4.0
fn earliest_included_attestation(
state: &BeaconState,
attestations: &[&PendingAttestation],
validator_index: usize,
spec: &ChainSpec,
) -> Result<PendingAttestation, InclusionError> {
let mut included_attestations = vec![];
for (i, a) in attestations.iter().enumerate() {
let participants =
state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?;
if participants.iter().any(|i| *i == validator_index) {
included_attestations.push(i);
}
}
let earliest_attestation_index = included_attestations
.iter()
.min_by_key(|i| attestations[**i].inclusion_slot)
.ok_or_else(|| InclusionError::NoAttestationsForValidator)?;
Ok(attestations[*earliest_attestation_index].clone())
}

View File

@ -1,5 +1,5 @@
#![cfg(test)] #![cfg(test)]
use crate::EpochProcessable; use crate::per_epoch_processing;
use env_logger::{Builder, Env}; use env_logger::{Builder, Env};
use types::beacon_state::BeaconStateBuilder; use types::beacon_state::BeaconStateBuilder;
use types::*; use types::*;
@ -17,5 +17,5 @@ fn runs_without_error() {
let mut state = builder.cloned_state(); let mut state = builder.cloned_state();
let spec = &builder.spec; let spec = &builder.spec;
state.per_epoch_processing(spec).unwrap(); per_epoch_processing(&mut state, spec).unwrap();
} }

View File

@ -0,0 +1,118 @@
use std::collections::HashSet;
use std::iter::FromIterator;
use types::*;
#[derive(Clone)]
pub struct WinningRoot {
pub crosslink_data_root: Hash256,
pub attesting_validator_indices: Vec<usize>,
pub total_attesting_balance: u64,
}
impl WinningRoot {
/// Returns `true` if `self` is a "better" candidate than `other`.
///
/// A winning root is "better" than another if it has a higher `total_attesting_balance`. Ties
/// are broken by favouring the lower `crosslink_data_root` value.
///
/// Spec v0.4.0
pub fn is_better_than(&self, other: &Self) -> bool {
if self.total_attesting_balance > other.total_attesting_balance {
true
} else if self.total_attesting_balance == other.total_attesting_balance {
self.crosslink_data_root < other.crosslink_data_root
} else {
false
}
}
}
/// Returns the `crosslink_data_root` with the highest total attesting balance for the given shard.
/// Breaks ties by favouring the smaller `crosslink_data_root` hash.
///
/// The `WinningRoot` object also contains additional fields that are useful in later stages of
/// per-epoch processing.
///
/// Spec v0.4.0
pub fn winning_root(
state: &BeaconState,
shard: u64,
current_epoch_attestations: &[&PendingAttestation],
previous_epoch_attestations: &[&PendingAttestation],
spec: &ChainSpec,
) -> Result<Option<WinningRoot>, BeaconStateError> {
let mut winning_root: Option<WinningRoot> = None;
let crosslink_data_roots: HashSet<Hash256> = HashSet::from_iter(
previous_epoch_attestations
.iter()
.chain(current_epoch_attestations.iter())
.filter_map(|a| {
if a.data.shard == shard {
Some(a.data.crosslink_data_root)
} else {
None
}
}),
);
for crosslink_data_root in crosslink_data_roots {
let attesting_validator_indices = get_attesting_validator_indices(
state,
shard,
current_epoch_attestations,
previous_epoch_attestations,
&crosslink_data_root,
spec,
)?;
let total_attesting_balance: u64 = attesting_validator_indices
.iter()
.fold(0, |acc, i| acc + state.get_effective_balance(*i, spec));
let candidate = WinningRoot {
crosslink_data_root,
attesting_validator_indices,
total_attesting_balance,
};
if let Some(ref winner) = winning_root {
if candidate.is_better_than(&winner) {
winning_root = Some(candidate);
}
} else {
winning_root = Some(candidate);
}
}
Ok(winning_root)
}
/// Returns all indices which voted for a given crosslink. May contain duplicates.
///
/// Spec v0.4.0
fn get_attesting_validator_indices(
state: &BeaconState,
shard: u64,
current_epoch_attestations: &[&PendingAttestation],
previous_epoch_attestations: &[&PendingAttestation],
crosslink_data_root: &Hash256,
spec: &ChainSpec,
) -> Result<Vec<usize>, BeaconStateError> {
let mut indices = vec![];
for a in current_epoch_attestations
.iter()
.chain(previous_epoch_attestations.iter())
{
if (a.data.shard == shard) && (a.data.crosslink_data_root == *crosslink_data_root) {
indices.append(&mut state.get_attestation_participants(
&a.data,
&a.aggregation_bitfield,
spec,
)?);
}
}
Ok(indices)
}

View File

@ -0,0 +1,58 @@
use crate::*;
use types::{BeaconState, BeaconStateError, ChainSpec, Hash256};
#[derive(Debug, PartialEq)]
pub enum Error {
BeaconStateError(BeaconStateError),
EpochProcessingError(EpochProcessingError),
}
/// Advances a state forward by one slot, performing per-epoch processing if required.
///
/// Spec v0.4.0
pub fn per_slot_processing(
state: &mut BeaconState,
previous_block_root: Hash256,
spec: &ChainSpec,
) -> Result<(), Error> {
if (state.slot + 1) % spec.slots_per_epoch == 0 {
per_epoch_processing(state, spec)?;
state.advance_caches();
}
state.slot += 1;
update_block_roots(state, previous_block_root, spec);
Ok(())
}
/// Updates the state's block roots as per-slot processing is performed.
///
/// Spec v0.4.0
pub fn update_block_roots(state: &mut BeaconState, previous_block_root: Hash256, spec: &ChainSpec) {
state.latest_block_roots[(state.slot.as_usize() - 1) % spec.latest_block_roots_length] =
previous_block_root;
if state.slot.as_usize() % spec.latest_block_roots_length == 0 {
let root = merkle_root(&state.latest_block_roots[..]);
state.batched_block_roots.push(root);
}
}
fn merkle_root(_input: &[Hash256]) -> Hash256 {
// TODO: implement correctly.
Hash256::zero()
}
impl From<BeaconStateError> for Error {
fn from(e: BeaconStateError) -> Error {
Error::BeaconStateError(e)
}
}
impl From<EpochProcessingError> for Error {
fn from(e: EpochProcessingError) -> Error {
Error::EpochProcessingError(e)
}
}

View File

@ -1,71 +0,0 @@
use crate::{EpochProcessable, EpochProcessingError};
use types::{BeaconState, BeaconStateError, ChainSpec, Hash256};
#[derive(Debug, PartialEq)]
pub enum Error {
BeaconStateError(BeaconStateError),
EpochProcessingError(EpochProcessingError),
}
pub trait SlotProcessable {
fn per_slot_processing(
&mut self,
previous_block_root: Hash256,
spec: &ChainSpec,
) -> Result<(), Error>;
}
impl SlotProcessable for BeaconState
where
BeaconState: EpochProcessable,
{
fn per_slot_processing(
&mut self,
previous_block_root: Hash256,
spec: &ChainSpec,
) -> Result<(), Error> {
if (self.slot + 1) % spec.epoch_length == 0 {
self.per_epoch_processing(spec)?;
self.advance_caches();
}
self.slot += 1;
self.latest_randao_mixes[self.slot.as_usize() % spec.latest_randao_mixes_length] =
self.latest_randao_mixes[(self.slot.as_usize() - 1) % spec.latest_randao_mixes_length];
// Block roots.
self.latest_block_roots[(self.slot.as_usize() - 1) % spec.latest_block_roots_length] =
previous_block_root;
if self.slot.as_usize() % spec.latest_block_roots_length == 0 {
let root = merkle_root(&self.latest_block_roots[..]);
self.batched_block_roots.push(root);
}
Ok(())
}
}
fn merkle_root(_input: &[Hash256]) -> Hash256 {
Hash256::zero()
}
impl From<BeaconStateError> for Error {
fn from(e: BeaconStateError) -> Error {
Error::BeaconStateError(e)
}
}
impl From<EpochProcessingError> for Error {
fn from(e: EpochProcessingError) -> Error {
Error::EpochProcessingError(e)
}
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}

View File

@ -10,6 +10,7 @@ boolean-bitfield = { path = "../utils/boolean-bitfield" }
ethereum-types = "0.5" ethereum-types = "0.5"
hashing = { path = "../utils/hashing" } hashing = { path = "../utils/hashing" }
honey-badger-split = { path = "../utils/honey-badger-split" } honey-badger-split = { path = "../utils/honey-badger-split" }
int_to_bytes = { path = "../utils/int_to_bytes" }
log = "0.4" log = "0.4"
rayon = "1.0" rayon = "1.0"
rand = "0.5.5" rand = "0.5.5"
@ -21,7 +22,6 @@ ssz = { path = "../utils/ssz" }
ssz_derive = { path = "../utils/ssz_derive" } ssz_derive = { path = "../utils/ssz_derive" }
swap_or_not_shuffle = { path = "../utils/swap_or_not_shuffle" } swap_or_not_shuffle = { path = "../utils/swap_or_not_shuffle" }
test_random_derive = { path = "../utils/test_random_derive" } test_random_derive = { path = "../utils/test_random_derive" }
int_to_bytes = { path = "../utils/int_to_bytes" }
[dev-dependencies] [dev-dependencies]
env_logger = "0.6.0" env_logger = "0.6.0"

View File

@ -1,12 +1,15 @@
use super::{AggregatePublicKey, AggregateSignature, AttestationData, Bitfield, Hash256}; use super::{AggregateSignature, AttestationData, Bitfield};
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::TreeHash; use ssz::TreeHash;
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TreeHash, TestRandom)] /// Details an attestation that can be slashable.
///
/// Spec v0.4.0
#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)]
pub struct Attestation { pub struct Attestation {
pub aggregation_bitfield: Bitfield, pub aggregation_bitfield: Bitfield,
pub data: AttestationData, pub data: AttestationData,
@ -14,32 +17,32 @@ pub struct Attestation {
pub aggregate_signature: AggregateSignature, pub aggregate_signature: AggregateSignature,
} }
impl Attestation {
pub fn canonical_root(&self) -> Hash256 {
Hash256::from_slice(&self.hash_tree_root()[..])
}
pub fn signable_message(&self, custody_bit: bool) -> Vec<u8> {
self.data.signable_message(custody_bit)
}
pub fn verify_signature(
&self,
group_public_key: &AggregatePublicKey,
custody_bit: bool,
domain: u64,
) -> bool {
self.aggregate_signature.verify(
&self.signable_message(custody_bit),
domain,
group_public_key,
)
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
ssz_tests!(Attestation); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Attestation::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Attestation::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -1,31 +1,33 @@
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use crate::{AttestationDataAndCustodyBit, Crosslink, Epoch, Hash256, Slot}; use crate::{Crosslink, Epoch, Hash256, Slot};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::TreeHash; use ssz::TreeHash;
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
pub const SSZ_ATTESTION_DATA_LENGTH: usize = { /// The data upon which an attestation is based.
8 + // slot ///
8 + // shard /// Spec v0.4.0
32 + // beacon_block_hash
32 + // epoch_boundary_root
32 + // shard_block_hash
32 + // latest_crosslink_hash
8 + // justified_epoch
32 // justified_block_root
};
#[derive( #[derive(
Debug, Clone, PartialEq, Default, Serialize, Hash, Encode, Decode, TreeHash, TestRandom, Debug,
Clone,
PartialEq,
Default,
Serialize,
Hash,
Encode,
Decode,
TreeHash,
TestRandom,
SignedRoot,
)] )]
pub struct AttestationData { pub struct AttestationData {
pub slot: Slot, pub slot: Slot,
pub shard: u64, pub shard: u64,
pub beacon_block_root: Hash256, pub beacon_block_root: Hash256,
pub epoch_boundary_root: Hash256, pub epoch_boundary_root: Hash256,
pub shard_block_root: Hash256, pub crosslink_data_root: Hash256,
pub latest_crosslink: Crosslink, pub latest_crosslink: Crosslink,
pub justified_epoch: Epoch, pub justified_epoch: Epoch,
pub justified_block_root: Hash256, pub justified_block_root: Hash256,
@ -33,23 +35,32 @@ pub struct AttestationData {
impl Eq for AttestationData {} impl Eq for AttestationData {}
impl AttestationData {
pub fn canonical_root(&self) -> Hash256 {
Hash256::from_slice(&self.hash_tree_root()[..])
}
pub fn signable_message(&self, custody_bit: bool) -> Vec<u8> {
let attestation_data_and_custody_bit = AttestationDataAndCustodyBit {
data: self.clone(),
custody_bit,
};
attestation_data_and_custody_bit.hash_tree_root()
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
ssz_tests!(AttestationData); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttestationData::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttestationData::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -4,6 +4,9 @@ use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
/// Used for pairing an attestation with a proof-of-custody.
///
/// Spec v0.4.0
#[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode, TreeHash)] #[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode, TreeHash)]
pub struct AttestationDataAndCustodyBit { pub struct AttestationDataAndCustodyBit {
pub data: AttestationData, pub data: AttestationData,
@ -14,8 +17,7 @@ impl<T: RngCore> TestRandom<T> for AttestationDataAndCustodyBit {
fn random_for_test(rng: &mut T) -> Self { fn random_for_test(rng: &mut T) -> Self {
Self { Self {
data: <_>::random_for_test(rng), data: <_>::random_for_test(rng),
// TODO: deal with bools custody_bit: <_>::random_for_test(rng),
custody_bit: false,
} }
} }
} }
@ -23,6 +25,31 @@ impl<T: RngCore> TestRandom<T> for AttestationDataAndCustodyBit {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
ssz_tests!(AttestationDataAndCustodyBit); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttestationDataAndCustodyBit::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttestationDataAndCustodyBit::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -8,6 +8,9 @@ mod builder;
pub use builder::AttesterSlashingBuilder; pub use builder::AttesterSlashingBuilder;
/// Two conflicting attestations.
///
/// Spec v0.4.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct AttesterSlashing { pub struct AttesterSlashing {
pub slashable_attestation_1: SlashableAttestation, pub slashable_attestation_1: SlashableAttestation,
@ -17,6 +20,29 @@ pub struct AttesterSlashing {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
ssz_tests!(AttesterSlashing); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttesterSlashing::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttesterSlashing::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -12,16 +12,12 @@ impl AttesterSlashingBuilder {
/// - `validator_index: u64` /// - `validator_index: u64`
/// - `message: &[u8]` /// - `message: &[u8]`
/// - `epoch: Epoch` /// - `epoch: Epoch`
/// - `domain: u64` /// - `domain: Domain`
/// ///
/// Where domain is a domain "constant" (e.g., `spec.domain_attestation`). /// Where domain is a domain "constant" (e.g., `spec.domain_attestation`).
pub fn double_vote<F>( pub fn double_vote<F>(validator_indices: &[u64], signer: F) -> AttesterSlashing
validator_indices: &[u64],
signer: F,
spec: &ChainSpec,
) -> AttesterSlashing
where where
F: Fn(u64, &[u8], Epoch, u64) -> Signature, F: Fn(u64, &[u8], Epoch, Domain) -> Signature,
{ {
let double_voted_slot = Slot::new(0); let double_voted_slot = Slot::new(0);
let shard = 0; let shard = 0;
@ -37,10 +33,10 @@ impl AttesterSlashingBuilder {
shard, shard,
beacon_block_root: hash_1, beacon_block_root: hash_1,
epoch_boundary_root: hash_1, epoch_boundary_root: hash_1,
shard_block_root: hash_1, crosslink_data_root: hash_1,
latest_crosslink: Crosslink { latest_crosslink: Crosslink {
epoch, epoch,
shard_block_root: hash_1, crosslink_data_root: hash_1,
}, },
justified_epoch, justified_epoch,
justified_block_root: hash_1, justified_block_root: hash_1,
@ -56,10 +52,10 @@ impl AttesterSlashingBuilder {
shard, shard,
beacon_block_root: hash_2, beacon_block_root: hash_2,
epoch_boundary_root: hash_2, epoch_boundary_root: hash_2,
shard_block_root: hash_2, crosslink_data_root: hash_2,
latest_crosslink: Crosslink { latest_crosslink: Crosslink {
epoch, epoch,
shard_block_root: hash_2, crosslink_data_root: hash_2,
}, },
justified_epoch, justified_epoch,
justified_block_root: hash_2, justified_block_root: hash_2,
@ -75,12 +71,7 @@ impl AttesterSlashingBuilder {
custody_bit: attestation.custody_bitfield.get(i).unwrap(), custody_bit: attestation.custody_bitfield.get(i).unwrap(),
}; };
let message = attestation_data_and_custody_bit.hash_tree_root(); let message = attestation_data_and_custody_bit.hash_tree_root();
let signature = signer( let signature = signer(*validator_index, &message[..], epoch, Domain::Attestation);
*validator_index,
&message[..],
epoch,
spec.domain_attestation,
);
attestation.aggregate_signature.add(&signature); attestation.aggregate_signature.add(&signature);
} }
}; };

View File

@ -1,21 +1,24 @@
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, ProposalSignedData, Slot}; use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Slot};
use bls::Signature; use bls::Signature;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::TreeHash; use ssz::TreeHash;
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] /// A block of the `BeaconChain`.
///
/// Spec v0.4.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)]
pub struct BeaconBlock { pub struct BeaconBlock {
pub slot: Slot, pub slot: Slot,
pub parent_root: Hash256, pub parent_root: Hash256,
pub state_root: Hash256, pub state_root: Hash256,
pub randao_reveal: Signature, pub randao_reveal: Signature,
pub eth1_data: Eth1Data, pub eth1_data: Eth1Data,
pub signature: Signature,
pub body: BeaconBlockBody, pub body: BeaconBlockBody,
pub signature: Signature,
} }
impl BeaconBlock { impl BeaconBlock {
@ -36,34 +39,44 @@ impl BeaconBlock {
attester_slashings: vec![], attester_slashings: vec![],
attestations: vec![], attestations: vec![],
deposits: vec![], deposits: vec![],
exits: vec![], voluntary_exits: vec![],
transfers: vec![],
}, },
} }
} }
/// Returns the `hash_tree_root` of the block.
pub fn canonical_root(&self) -> Hash256 { pub fn canonical_root(&self) -> Hash256 {
Hash256::from_slice(&self.hash_tree_root()[..]) Hash256::from_slice(&self.hash_tree_root()[..])
} }
pub fn proposal_root(&self, spec: &ChainSpec) -> Hash256 {
let block_without_signature_root = {
let mut block_without_signature = self.clone();
block_without_signature.signature = spec.empty_signature.clone();
block_without_signature.canonical_root()
};
let proposal = ProposalSignedData {
slot: self.slot,
shard: spec.beacon_chain_shard_number,
block_root: block_without_signature_root,
};
Hash256::from_slice(&proposal.hash_tree_root()[..])
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
ssz_tests!(BeaconBlock); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconBlock::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconBlock::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -1,22 +1,49 @@
use super::{Attestation, AttesterSlashing, Deposit, Exit, ProposerSlashing}; use super::{Attestation, AttesterSlashing, Deposit, ProposerSlashing, Transfer, VoluntaryExit};
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// The body of a `BeaconChain` block, containing operations.
///
/// Spec v0.4.0
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct BeaconBlockBody { pub struct BeaconBlockBody {
pub proposer_slashings: Vec<ProposerSlashing>, pub proposer_slashings: Vec<ProposerSlashing>,
pub attester_slashings: Vec<AttesterSlashing>, pub attester_slashings: Vec<AttesterSlashing>,
pub attestations: Vec<Attestation>, pub attestations: Vec<Attestation>,
pub deposits: Vec<Deposit>, pub deposits: Vec<Deposit>,
pub exits: Vec<Exit>, pub voluntary_exits: Vec<VoluntaryExit>,
pub transfers: Vec<Transfer>,
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
ssz_tests!(BeaconBlockBody); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconBlockBody::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconBlockBody::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

File diff suppressed because it is too large Load Diff

View File

@ -137,16 +137,16 @@ impl BeaconStateBuilder {
pub fn teleport_to_end_of_epoch(&mut self, epoch: Epoch) { pub fn teleport_to_end_of_epoch(&mut self, epoch: Epoch) {
let state = self.state.as_mut().expect("Genesis required"); let state = self.state.as_mut().expect("Genesis required");
let slot = epoch.end_slot(self.spec.epoch_length); let slot = epoch.end_slot(self.spec.slots_per_epoch);
state.slot = slot; state.slot = slot;
state.validator_registry_update_epoch = epoch - 1; state.validator_registry_update_epoch = epoch - 1;
state.previous_calculation_epoch = epoch - 1; state.previous_shuffling_epoch = epoch - 1;
state.current_calculation_epoch = epoch; state.current_shuffling_epoch = epoch;
state.previous_epoch_seed = Hash256::from([0x01; 32]); state.previous_shuffling_seed = Hash256::from_low_u64_le(0);
state.current_epoch_seed = Hash256::from([0x02; 32]); state.current_shuffling_seed = Hash256::from_low_u64_le(1);
state.previous_justified_epoch = epoch - 2; state.previous_justified_epoch = epoch - 2;
state.justified_epoch = epoch - 1; state.justified_epoch = epoch - 1;
@ -171,11 +171,11 @@ impl BeaconStateBuilder {
let current_epoch = state.current_epoch(&self.spec); let current_epoch = state.current_epoch(&self.spec);
let previous_epoch = state.previous_epoch(&self.spec); let previous_epoch = state.previous_epoch(&self.spec);
let current_epoch_depth = let current_epoch_depth =
(state.slot - current_epoch.end_slot(self.spec.epoch_length)).as_usize(); (state.slot - current_epoch.end_slot(self.spec.slots_per_epoch)).as_usize();
let previous_epoch_slots = previous_epoch.slot_iter(self.spec.epoch_length); let previous_epoch_slots = previous_epoch.slot_iter(self.spec.slots_per_epoch);
let current_epoch_slots = current_epoch let current_epoch_slots = current_epoch
.slot_iter(self.spec.epoch_length) .slot_iter(self.spec.slots_per_epoch)
.take(current_epoch_depth); .take(current_epoch_depth);
for slot in previous_epoch_slots.chain(current_epoch_slots) { for slot in previous_epoch_slots.chain(current_epoch_slots) {
@ -219,7 +219,8 @@ fn committee_to_pending_attestation(
custody_bitfield.set(i, true); custody_bitfield.set(i, true);
} }
let is_previous_epoch = state.slot.epoch(spec.epoch_length) != slot.epoch(spec.epoch_length); let is_previous_epoch =
state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch);
let justified_epoch = if is_previous_epoch { let justified_epoch = if is_previous_epoch {
state.previous_justified_epoch state.previous_justified_epoch
@ -229,16 +230,16 @@ fn committee_to_pending_attestation(
let epoch_boundary_root = if is_previous_epoch { let epoch_boundary_root = if is_previous_epoch {
*state *state
.get_block_root(previous_epoch.start_slot(spec.epoch_length), spec) .get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec)
.unwrap() .unwrap()
} else { } else {
*state *state
.get_block_root(current_epoch.start_slot(spec.epoch_length), spec) .get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec)
.unwrap() .unwrap()
}; };
let justified_block_root = *state let justified_block_root = *state
.get_block_root(justified_epoch.start_slot(spec.epoch_length), &spec) .get_block_root(justified_epoch.start_slot(spec.slots_per_epoch), &spec)
.unwrap(); .unwrap();
PendingAttestation { PendingAttestation {
@ -248,10 +249,10 @@ fn committee_to_pending_attestation(
shard, shard,
beacon_block_root: *state.get_block_root(slot, spec).unwrap(), beacon_block_root: *state.get_block_root(slot, spec).unwrap(),
epoch_boundary_root, epoch_boundary_root,
shard_block_root: Hash256::zero(), crosslink_data_root: Hash256::zero(),
latest_crosslink: Crosslink { latest_crosslink: Crosslink {
epoch: slot.epoch(spec.epoch_length), epoch: slot.epoch(spec.slots_per_epoch),
shard_block_root: Hash256::zero(), crosslink_data_root: Hash256::zero(),
}, },
justified_epoch, justified_epoch,
justified_block_root, justified_block_root,

View File

@ -32,14 +32,14 @@ impl EpochCache {
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<EpochCache, Error> { ) -> Result<EpochCache, Error> {
let mut epoch_committees: Vec<CrosslinkCommittees> = let mut epoch_committees: Vec<CrosslinkCommittees> =
Vec::with_capacity(spec.epoch_length as usize); Vec::with_capacity(spec.slots_per_epoch as usize);
let mut attestation_duty_map: AttestationDutyMap = HashMap::new(); let mut attestation_duty_map: AttestationDutyMap = HashMap::new();
let mut shard_committee_index_map: ShardCommitteeIndexMap = HashMap::new(); let mut shard_committee_index_map: ShardCommitteeIndexMap = HashMap::new();
let shuffling = let shuffling =
state.get_shuffling_for_slot(epoch.start_slot(spec.epoch_length), false, spec)?; state.get_shuffling_for_slot(epoch.start_slot(spec.slots_per_epoch), false, spec)?;
for (epoch_committeess_index, slot) in epoch.slot_iter(spec.epoch_length).enumerate() { for (epoch_committeess_index, slot) in epoch.slot_iter(spec.slots_per_epoch).enumerate() {
let slot_committees = state.calculate_crosslink_committees_at_slot( let slot_committees = state.calculate_crosslink_committees_at_slot(
slot, slot,
false, false,

View File

@ -0,0 +1,20 @@
use crate::*;
/// Verify ``bitfield`` against the ``committee_size``.
///
/// Is title `verify_bitfield` in spec.
///
/// Spec v0.4.0
pub fn verify_bitfield_length(bitfield: &Bitfield, committee_size: usize) -> bool {
if bitfield.num_bytes() != ((committee_size + 7) / 8) {
return false;
}
for i in committee_size..(bitfield.num_bytes() * 8) {
if bitfield.get(i).expect("Impossible due to previous check.") {
return false;
}
}
true
}

View File

@ -3,6 +3,7 @@
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use crate::{BeaconState, ChainSpec}; use crate::{BeaconState, ChainSpec};
use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn can_produce_genesis_block() { pub fn can_produce_genesis_block() {
@ -34,8 +35,8 @@ pub fn get_attestation_participants_consistency() {
for slot in state for slot in state
.slot .slot
.epoch(spec.epoch_length) .epoch(spec.slots_per_epoch)
.slot_iter(spec.epoch_length) .slot_iter(spec.slots_per_epoch)
{ {
let committees = state.get_crosslink_committees_at_slot(slot, &spec).unwrap(); let committees = state.get_crosslink_committees_at_slot(slot, &spec).unwrap();
@ -59,4 +60,25 @@ pub fn get_attestation_participants_consistency() {
} }
} }
ssz_tests!(BeaconState); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconState::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconState::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}

View File

@ -1,19 +0,0 @@
use super::SlashableVoteData;
use crate::test_utils::TestRandom;
use rand::RngCore;
use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom;
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct CasperSlashing {
pub slashable_vote_data_1: SlashableVoteData,
pub slashable_vote_data_2: SlashableVoteData,
}
#[cfg(test)]
mod tests {
use super::*;
ssz_tests!(CasperSlashing);
}

View File

@ -1,11 +1,20 @@
use crate::{Address, Epoch, Hash256, Slot}; use crate::{Address, Epoch, Fork, Hash256, Slot};
use bls::Signature; use bls::Signature;
const GWEI: u64 = 1_000_000_000; const GWEI: u64 = 1_000_000_000;
pub enum Domain {
Deposit,
Attestation,
Proposal,
Exit,
Randao,
Transfer,
}
/// Holds all the "constants" for a BeaconChain. /// Holds all the "constants" for a BeaconChain.
/// ///
/// Spec v0.2.0 /// Spec v0.4.0
#[derive(PartialEq, Debug, Clone)] #[derive(PartialEq, Debug, Clone)]
pub struct ChainSpec { pub struct ChainSpec {
/* /*
@ -16,7 +25,7 @@ pub struct ChainSpec {
pub max_balance_churn_quotient: u64, pub max_balance_churn_quotient: u64,
pub beacon_chain_shard_number: u64, pub beacon_chain_shard_number: u64,
pub max_indices_per_slashable_vote: u64, pub max_indices_per_slashable_vote: u64,
pub max_withdrawals_per_epoch: u64, pub max_exit_dequeues_per_epoch: u64,
pub shuffle_round_count: u8, pub shuffle_round_count: u8,
/* /*
@ -48,29 +57,30 @@ pub struct ChainSpec {
/* /*
* Time parameters * Time parameters
*/ */
pub slot_duration: u64, pub seconds_per_slot: u64,
pub min_attestation_inclusion_delay: u64, pub min_attestation_inclusion_delay: u64,
pub epoch_length: u64, pub slots_per_epoch: u64,
pub seed_lookahead: Epoch, pub min_seed_lookahead: Epoch,
pub entry_exit_delay: u64, pub activation_exit_delay: u64,
pub eth1_data_voting_period: u64, pub epochs_per_eth1_voting_period: u64,
pub min_validator_withdrawal_epochs: Epoch, pub min_validator_withdrawability_delay: Epoch,
/* /*
* State list lengths * State list lengths
*/ */
pub latest_block_roots_length: usize, pub latest_block_roots_length: usize,
pub latest_randao_mixes_length: usize, pub latest_randao_mixes_length: usize,
pub latest_index_roots_length: usize, pub latest_active_index_roots_length: usize,
pub latest_penalized_exit_length: usize, pub latest_slashed_exit_length: usize,
/* /*
* Reward and penalty quotients * Reward and penalty quotients
*/ */
pub base_reward_quotient: u64, pub base_reward_quotient: u64,
pub whistleblower_reward_quotient: u64, pub whistleblower_reward_quotient: u64,
pub includer_reward_quotient: u64, pub attestation_inclusion_reward_quotient: u64,
pub inactivity_penalty_quotient: u64, pub inactivity_penalty_quotient: u64,
pub min_penalty_quotient: u64,
/* /*
* Max operations per block * Max operations per block
@ -79,29 +89,63 @@ pub struct ChainSpec {
pub max_attester_slashings: u64, pub max_attester_slashings: u64,
pub max_attestations: u64, pub max_attestations: u64,
pub max_deposits: u64, pub max_deposits: u64,
pub max_exits: u64, pub max_voluntary_exits: u64,
pub max_transfers: u64,
/* /*
* Signature domains * Signature domains
*
* Fields should be private to prevent accessing a domain that hasn't been modified to suit
* some `Fork`.
*
* Use `ChainSpec::get_domain(..)` to access these values.
*/ */
pub domain_deposit: u64, domain_deposit: u64,
pub domain_attestation: u64, domain_attestation: u64,
pub domain_proposal: u64, domain_proposal: u64,
pub domain_exit: u64, domain_exit: u64,
pub domain_randao: u64, domain_randao: u64,
domain_transfer: u64,
} }
impl ChainSpec { impl ChainSpec {
/// Returns a `ChainSpec` compatible with the specification from Ethereum Foundation. /// Return the number of committees in one epoch.
/// ///
/// Of course, the actual foundation specs are unknown at this point so these are just a rough /// Spec v0.4.0
/// estimate. pub fn get_epoch_committee_count(&self, active_validator_count: usize) -> u64 {
std::cmp::max(
1,
std::cmp::min(
self.shard_count / self.slots_per_epoch,
active_validator_count as u64 / self.slots_per_epoch / self.target_committee_size,
),
) * self.slots_per_epoch
}
/// Get the domain number that represents the fork meta and signature domain.
/// ///
/// Spec v0.2.0 /// Spec v0.4.0
pub fn get_domain(&self, epoch: Epoch, domain: Domain, fork: &Fork) -> u64 {
let domain_constant = match domain {
Domain::Deposit => self.domain_deposit,
Domain::Attestation => self.domain_attestation,
Domain::Proposal => self.domain_proposal,
Domain::Exit => self.domain_exit,
Domain::Randao => self.domain_randao,
Domain::Transfer => self.domain_transfer,
};
let fork_version = fork.get_fork_version(epoch);
fork_version * u64::pow(2, 32) + domain_constant
}
/// Returns a `ChainSpec` compatible with the Ethereum Foundation specification.
///
/// Spec v0.4.0
pub fn foundation() -> Self { pub fn foundation() -> Self {
let genesis_slot = Slot::new(2_u64.pow(19)); let genesis_slot = Slot::new(2_u64.pow(32));
let epoch_length = 64; let slots_per_epoch = 64;
let genesis_epoch = genesis_slot.epoch(epoch_length); let genesis_epoch = genesis_slot.epoch(slots_per_epoch);
Self { Self {
/* /*
@ -112,7 +156,7 @@ impl ChainSpec {
max_balance_churn_quotient: 32, max_balance_churn_quotient: 32,
beacon_chain_shard_number: u64::max_value(), beacon_chain_shard_number: u64::max_value(),
max_indices_per_slashable_vote: 4_096, max_indices_per_slashable_vote: 4_096,
max_withdrawals_per_epoch: 4, max_exit_dequeues_per_epoch: 4,
shuffle_round_count: 90, shuffle_round_count: 90,
/* /*
@ -133,7 +177,7 @@ impl ChainSpec {
* Initial Values * Initial Values
*/ */
genesis_fork_version: 0, genesis_fork_version: 0,
genesis_slot: Slot::new(2_u64.pow(19)), genesis_slot,
genesis_epoch, genesis_epoch,
genesis_start_shard: 0, genesis_start_shard: 0,
far_future_epoch: Epoch::new(u64::max_value()), far_future_epoch: Epoch::new(u64::max_value()),
@ -144,29 +188,30 @@ impl ChainSpec {
/* /*
* Time parameters * Time parameters
*/ */
slot_duration: 6, seconds_per_slot: 6,
min_attestation_inclusion_delay: 4, min_attestation_inclusion_delay: 4,
epoch_length, slots_per_epoch,
seed_lookahead: Epoch::new(1), min_seed_lookahead: Epoch::new(1),
entry_exit_delay: 4, activation_exit_delay: 4,
eth1_data_voting_period: 16, epochs_per_eth1_voting_period: 16,
min_validator_withdrawal_epochs: Epoch::new(256), min_validator_withdrawability_delay: Epoch::new(256),
/* /*
* State list lengths * State list lengths
*/ */
latest_block_roots_length: 8_192, latest_block_roots_length: 8_192,
latest_randao_mixes_length: 8_192, latest_randao_mixes_length: 8_192,
latest_index_roots_length: 8_192, latest_active_index_roots_length: 8_192,
latest_penalized_exit_length: 8_192, latest_slashed_exit_length: 8_192,
/* /*
* Reward and penalty quotients * Reward and penalty quotients
*/ */
base_reward_quotient: 32, base_reward_quotient: 32,
whistleblower_reward_quotient: 512, whistleblower_reward_quotient: 512,
includer_reward_quotient: 8, attestation_inclusion_reward_quotient: 8,
inactivity_penalty_quotient: 16_777_216, inactivity_penalty_quotient: 16_777_216,
min_penalty_quotient: 32,
/* /*
* Max operations per block * Max operations per block
@ -175,7 +220,8 @@ impl ChainSpec {
max_attester_slashings: 1, max_attester_slashings: 1,
max_attestations: 128, max_attestations: 128,
max_deposits: 16, max_deposits: 16,
max_exits: 16, max_voluntary_exits: 16,
max_transfers: 16,
/* /*
* Signature domains * Signature domains
@ -185,25 +231,24 @@ impl ChainSpec {
domain_proposal: 2, domain_proposal: 2,
domain_exit: 3, domain_exit: 3,
domain_randao: 4, domain_randao: 4,
} domain_transfer: 5,
} }
} }
impl ChainSpec {
/// Returns a `ChainSpec` compatible with the specification suitable for 8 validators. /// Returns a `ChainSpec` compatible with the specification suitable for 8 validators.
/// ///
/// Spec v0.2.0 /// Spec v0.4.0
pub fn few_validators() -> Self { pub fn few_validators() -> Self {
let genesis_slot = Slot::new(2_u64.pow(19)); let genesis_slot = Slot::new(2_u64.pow(32));
let epoch_length = 8; let slots_per_epoch = 8;
let genesis_epoch = genesis_slot.epoch(epoch_length); let genesis_epoch = genesis_slot.epoch(slots_per_epoch);
Self { Self {
shard_count: 8, shard_count: 8,
target_committee_size: 1, target_committee_size: 1,
genesis_slot, genesis_slot,
genesis_epoch, genesis_epoch,
epoch_length, slots_per_epoch,
..ChainSpec::foundation() ..ChainSpec::foundation()
} }
} }

View File

@ -5,27 +5,43 @@ use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// Specifies the block hash for a shard at an epoch.
///
/// Spec v0.4.0
#[derive( #[derive(
Debug, Clone, PartialEq, Default, Serialize, Hash, Encode, Decode, TreeHash, TestRandom, Debug, Clone, PartialEq, Default, Serialize, Hash, Encode, Decode, TreeHash, TestRandom,
)] )]
pub struct Crosslink { pub struct Crosslink {
pub epoch: Epoch, pub epoch: Epoch,
pub shard_block_root: Hash256, pub crosslink_data_root: Hash256,
}
impl Crosslink {
/// Generates a new instance where `dynasty` and `hash` are both zero.
pub fn zero() -> Self {
Self {
epoch: Epoch::new(0),
shard_block_root: Hash256::zero(),
}
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
ssz_tests!(Crosslink); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Crosslink::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Crosslink::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -5,6 +5,9 @@ use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// A deposit to potentially become a beacon chain validator.
///
/// Spec v0.4.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct Deposit { pub struct Deposit {
pub branch: Vec<Hash256>, pub branch: Vec<Hash256>,
@ -15,6 +18,29 @@ pub struct Deposit {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
ssz_tests!(Deposit); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Deposit::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Deposit::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -5,6 +5,9 @@ use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// Data generated by the deposit contract.
///
/// Spec v0.4.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct DepositData { pub struct DepositData {
pub amount: u64, pub amount: u64,
@ -15,6 +18,29 @@ pub struct DepositData {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
ssz_tests!(DepositData); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = DepositData::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = DepositData::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -6,6 +6,9 @@ use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// The data supplied by the user to the deposit contract.
///
/// Spec v0.4.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct DepositInput { pub struct DepositInput {
pub pubkey: PublicKey, pub pubkey: PublicKey,
@ -16,6 +19,29 @@ pub struct DepositInput {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
ssz_tests!(DepositInput); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = DepositInput::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = DepositInput::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -5,7 +5,9 @@ use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
// Note: this is refer to as DepositRootVote in specs /// Contains data obtained from the Eth1 chain.
///
/// Spec v0.4.0
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct Eth1Data { pub struct Eth1Data {
pub deposit_root: Hash256, pub deposit_root: Hash256,
@ -15,6 +17,29 @@ pub struct Eth1Data {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
ssz_tests!(Eth1Data); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Eth1Data::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Eth1Data::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -5,7 +5,9 @@ use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
// Note: this is refer to as DepositRootVote in specs /// A summation of votes for some `Eth1Data`.
///
/// Spec v0.4.0
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct Eth1DataVote { pub struct Eth1DataVote {
pub eth1_data: Eth1Data, pub eth1_data: Eth1Data,
@ -15,6 +17,29 @@ pub struct Eth1DataVote {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
ssz_tests!(Eth1DataVote); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Eth1DataVote::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Eth1DataVote::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -1,20 +0,0 @@
use crate::{test_utils::TestRandom, Epoch};
use bls::Signature;
use rand::RngCore;
use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom;
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct Exit {
pub epoch: Epoch,
pub validator_index: u64,
pub signature: Signature,
}
#[cfg(test)]
mod tests {
use super::*;
ssz_tests!(Exit);
}

View File

@ -4,6 +4,9 @@ use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// Specifies a fork of the `BeaconChain`, to prevent replay attacks.
///
/// Spec v0.4.0
#[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct Fork { pub struct Fork {
pub previous_version: u64, pub previous_version: u64,
@ -13,23 +16,42 @@ pub struct Fork {
impl Fork { impl Fork {
/// Return the fork version of the given ``epoch``. /// Return the fork version of the given ``epoch``.
///
/// Spec v0.4.0
pub fn get_fork_version(&self, epoch: Epoch) -> u64 { pub fn get_fork_version(&self, epoch: Epoch) -> u64 {
if epoch < self.epoch { if epoch < self.epoch {
return self.previous_version; return self.previous_version;
} }
self.current_version self.current_version
} }
/// Get the domain number that represents the fork meta and signature domain.
pub fn get_domain(&self, epoch: Epoch, domain_type: u64) -> u64 {
let fork_version = self.get_fork_version(epoch);
fork_version * u64::pow(2, 32) + domain_type
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
ssz_tests!(Fork); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Fork::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Fork::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -1,4 +1,3 @@
#[macro_use]
pub mod test_utils; pub mod test_utils;
pub mod attestation; pub mod attestation;
@ -8,7 +7,6 @@ pub mod attester_slashing;
pub mod beacon_block; pub mod beacon_block;
pub mod beacon_block_body; pub mod beacon_block_body;
pub mod beacon_state; pub mod beacon_state;
pub mod casper_slashing;
pub mod chain_spec; pub mod chain_spec;
pub mod crosslink; pub mod crosslink;
pub mod deposit; pub mod deposit;
@ -16,23 +14,22 @@ pub mod deposit_data;
pub mod deposit_input; pub mod deposit_input;
pub mod eth1_data; pub mod eth1_data;
pub mod eth1_data_vote; pub mod eth1_data_vote;
pub mod exit;
pub mod fork; pub mod fork;
pub mod free_attestation; pub mod free_attestation;
pub mod pending_attestation; pub mod pending_attestation;
pub mod proposal_signed_data; pub mod proposal;
pub mod proposer_slashing; pub mod proposer_slashing;
pub mod readers; pub mod readers;
pub mod shard_reassignment_record; pub mod shard_reassignment_record;
pub mod slashable_attestation; pub mod slashable_attestation;
pub mod slashable_vote_data; pub mod transfer;
pub mod voluntary_exit;
#[macro_use] #[macro_use]
pub mod slot_epoch_macros; pub mod slot_epoch_macros;
pub mod slot_epoch; pub mod slot_epoch;
pub mod slot_height; pub mod slot_height;
pub mod validator; pub mod validator;
pub mod validator_registry; pub mod validator_registry;
pub mod validator_registry_delta_block;
use ethereum_types::{H160, H256, U256}; use ethereum_types::{H160, H256, U256};
use std::collections::HashMap; use std::collections::HashMap;
@ -43,29 +40,25 @@ pub use crate::attestation_data_and_custody_bit::AttestationDataAndCustodyBit;
pub use crate::attester_slashing::AttesterSlashing; pub use crate::attester_slashing::AttesterSlashing;
pub use crate::beacon_block::BeaconBlock; pub use crate::beacon_block::BeaconBlock;
pub use crate::beacon_block_body::BeaconBlockBody; pub use crate::beacon_block_body::BeaconBlockBody;
pub use crate::beacon_state::{ pub use crate::beacon_state::{BeaconState, Error as BeaconStateError, RelativeEpoch};
BeaconState, Error as BeaconStateError, InclusionError, RelativeEpoch, pub use crate::chain_spec::{ChainSpec, Domain};
};
pub use crate::casper_slashing::CasperSlashing;
pub use crate::chain_spec::ChainSpec;
pub use crate::crosslink::Crosslink; pub use crate::crosslink::Crosslink;
pub use crate::deposit::Deposit; pub use crate::deposit::Deposit;
pub use crate::deposit_data::DepositData; pub use crate::deposit_data::DepositData;
pub use crate::deposit_input::DepositInput; pub use crate::deposit_input::DepositInput;
pub use crate::eth1_data::Eth1Data; pub use crate::eth1_data::Eth1Data;
pub use crate::eth1_data_vote::Eth1DataVote; pub use crate::eth1_data_vote::Eth1DataVote;
pub use crate::exit::Exit;
pub use crate::fork::Fork; pub use crate::fork::Fork;
pub use crate::free_attestation::FreeAttestation; pub use crate::free_attestation::FreeAttestation;
pub use crate::pending_attestation::PendingAttestation; pub use crate::pending_attestation::PendingAttestation;
pub use crate::proposal_signed_data::ProposalSignedData; pub use crate::proposal::Proposal;
pub use crate::proposer_slashing::ProposerSlashing; pub use crate::proposer_slashing::ProposerSlashing;
pub use crate::slashable_attestation::SlashableAttestation; pub use crate::slashable_attestation::SlashableAttestation;
pub use crate::slashable_vote_data::SlashableVoteData;
pub use crate::slot_epoch::{Epoch, Slot}; pub use crate::slot_epoch::{Epoch, Slot};
pub use crate::slot_height::SlotHeight; pub use crate::slot_height::SlotHeight;
pub use crate::validator::{StatusFlags as ValidatorStatusFlags, Validator}; pub use crate::transfer::Transfer;
pub use crate::validator_registry_delta_block::ValidatorRegistryDeltaBlock; pub use crate::validator::Validator;
pub use crate::voluntary_exit::VoluntaryExit;
pub type Hash256 = H256; pub type Hash256 = H256;
pub type Address = H160; pub type Address = H160;

View File

@ -5,6 +5,9 @@ use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// An attestation that has been included in the state but not yet fully processed.
///
/// Spec v0.4.0
#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct PendingAttestation { pub struct PendingAttestation {
pub aggregation_bitfield: Bitfield, pub aggregation_bitfield: Bitfield,
@ -16,6 +19,29 @@ pub struct PendingAttestation {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
ssz_tests!(PendingAttestation); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = PendingAttestation::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = PendingAttestation::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -0,0 +1,78 @@
use crate::test_utils::TestRandom;
use crate::{Hash256, Slot};
use bls::Signature;
use rand::RngCore;
use serde_derive::Serialize;
use ssz::TreeHash;
use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
use test_random_derive::TestRandom;
/// A proposal for some shard or beacon block.
///
/// Spec v0.4.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)]
pub struct Proposal {
pub slot: Slot,
/// Shard number (spec.beacon_chain_shard_number for beacon chain)
pub shard: u64,
pub block_root: Hash256,
pub signature: Signature,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, SignedRoot, TreeHash};
#[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Proposal::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Proposal::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
#[derive(TreeHash)]
struct SignedProposal {
pub slot: Slot,
pub shard: u64,
pub block_root: Hash256,
}
impl Into<SignedProposal> for Proposal {
fn into(self) -> SignedProposal {
SignedProposal {
slot: self.slot,
shard: self.shard,
block_root: self.block_root,
}
}
}
#[test]
pub fn test_signed_root() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Proposal::random_for_test(&mut rng);
let other: SignedProposal = original.clone().into();
assert_eq!(original.signed_root(), other.hash_tree_root());
}
}

View File

@ -1,6 +1,5 @@
use super::ProposalSignedData; use super::Proposal;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use bls::Signature;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
@ -10,18 +9,42 @@ mod builder;
pub use builder::ProposerSlashingBuilder; pub use builder::ProposerSlashingBuilder;
/// Two conflicting proposals from the same proposer (validator).
///
/// Spec v0.4.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct ProposerSlashing { pub struct ProposerSlashing {
pub proposer_index: u64, pub proposer_index: u64,
pub proposal_data_1: ProposalSignedData, pub proposal_1: Proposal,
pub proposal_signature_1: Signature, pub proposal_2: Proposal,
pub proposal_data_2: ProposalSignedData,
pub proposal_signature_2: Signature,
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
ssz_tests!(ProposerSlashing); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = ProposerSlashing::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = ProposerSlashing::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -1,5 +1,5 @@
use crate::*; use crate::*;
use ssz::TreeHash; use ssz::SignedRoot;
/// Builds a `ProposerSlashing`. /// Builds a `ProposerSlashing`.
pub struct ProposerSlashingBuilder(); pub struct ProposerSlashingBuilder();
@ -12,48 +12,46 @@ impl ProposerSlashingBuilder {
/// - `validator_index: u64` /// - `validator_index: u64`
/// - `message: &[u8]` /// - `message: &[u8]`
/// - `epoch: Epoch` /// - `epoch: Epoch`
/// - `domain: u64` /// - `domain: Domain`
/// ///
/// Where domain is a domain "constant" (e.g., `spec.domain_attestation`). /// Where domain is a domain "constant" (e.g., `spec.domain_attestation`).
pub fn double_vote<F>(proposer_index: u64, signer: F, spec: &ChainSpec) -> ProposerSlashing pub fn double_vote<F>(proposer_index: u64, signer: F, spec: &ChainSpec) -> ProposerSlashing
where where
F: Fn(u64, &[u8], Epoch, u64) -> Signature, F: Fn(u64, &[u8], Epoch, Domain) -> Signature,
{ {
let slot = Slot::new(0); let slot = Slot::new(0);
let shard = 0; let shard = 0;
let proposal_data_1 = ProposalSignedData { let mut proposal_1 = Proposal {
slot, slot,
shard, shard,
block_root: Hash256::from_low_u64_le(1), block_root: Hash256::from_low_u64_le(1),
signature: Signature::empty_signature(),
}; };
let proposal_data_2 = ProposalSignedData { let mut proposal_2 = Proposal {
slot, slot,
shard, shard,
block_root: Hash256::from_low_u64_le(2), block_root: Hash256::from_low_u64_le(2),
signature: Signature::empty_signature(),
}; };
let proposal_signature_1 = { proposal_1.signature = {
let message = proposal_data_1.hash_tree_root(); let message = proposal_1.signed_root();
let epoch = slot.epoch(spec.epoch_length); let epoch = slot.epoch(spec.slots_per_epoch);
let domain = spec.domain_proposal; signer(proposer_index, &message[..], epoch, Domain::Proposal)
signer(proposer_index, &message[..], epoch, domain)
}; };
let proposal_signature_2 = { proposal_2.signature = {
let message = proposal_data_2.hash_tree_root(); let message = proposal_2.signed_root();
let epoch = slot.epoch(spec.epoch_length); let epoch = slot.epoch(spec.slots_per_epoch);
let domain = spec.domain_proposal; signer(proposer_index, &message[..], epoch, Domain::Proposal)
signer(proposer_index, &message[..], epoch, domain)
}; };
ProposerSlashing { ProposerSlashing {
proposer_index, proposer_index,
proposal_data_1, proposal_1,
proposal_signature_1, proposal_2,
proposal_data_2,
proposal_signature_2,
} }
} }
} }

View File

@ -13,7 +13,6 @@ pub trait BeaconBlockReader: Debug + PartialEq {
fn slot(&self) -> Slot; fn slot(&self) -> Slot;
fn parent_root(&self) -> Hash256; fn parent_root(&self) -> Hash256;
fn state_root(&self) -> Hash256; fn state_root(&self) -> Hash256;
fn canonical_root(&self) -> Hash256;
fn into_beacon_block(self) -> Option<BeaconBlock>; fn into_beacon_block(self) -> Option<BeaconBlock>;
} }
@ -30,10 +29,6 @@ impl BeaconBlockReader for BeaconBlock {
self.state_root self.state_root
} }
fn canonical_root(&self) -> Hash256 {
self.canonical_root()
}
fn into_beacon_block(self) -> Option<BeaconBlock> { fn into_beacon_block(self) -> Option<BeaconBlock> {
Some(self) Some(self)
} }

View File

@ -1,4 +1,4 @@
use crate::{BeaconState, Hash256, Slot}; use crate::{BeaconState, Slot};
use std::fmt::Debug; use std::fmt::Debug;
/// The `BeaconStateReader` provides interfaces for reading a subset of fields of a `BeaconState`. /// The `BeaconStateReader` provides interfaces for reading a subset of fields of a `BeaconState`.
@ -11,7 +11,6 @@ use std::fmt::Debug;
/// "future proofing". /// "future proofing".
pub trait BeaconStateReader: Debug + PartialEq { pub trait BeaconStateReader: Debug + PartialEq {
fn slot(&self) -> Slot; fn slot(&self) -> Slot;
fn canonical_root(&self) -> Hash256;
fn into_beacon_state(self) -> Option<BeaconState>; fn into_beacon_state(self) -> Option<BeaconState>;
} }
@ -20,10 +19,6 @@ impl BeaconStateReader for BeaconState {
self.slot self.slot
} }
fn canonical_root(&self) -> Hash256 {
self.canonical_root()
}
fn into_beacon_state(self) -> Option<BeaconState> { fn into_beacon_state(self) -> Option<BeaconState> {
Some(self) Some(self)
} }

View File

@ -14,6 +14,29 @@ pub struct ShardReassignmentRecord {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
ssz_tests!(ShardReassignmentRecord); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = ShardReassignmentRecord::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = ShardReassignmentRecord::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -1,11 +1,18 @@
use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, Bitfield, ChainSpec}; use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, Bitfield, ChainSpec};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash}; use ssz::TreeHash;
use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] /// Details an attestation that can be slashable.
///
/// To be included in an `AttesterSlashing`.
///
/// Spec v0.4.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)]
pub struct SlashableAttestation { pub struct SlashableAttestation {
/// Lists validator registry indices, not committee indices.
pub validator_indices: Vec<u64>, pub validator_indices: Vec<u64>,
pub data: AttestationData, pub data: AttestationData,
pub custody_bitfield: Bitfield, pub custody_bitfield: Bitfield,
@ -15,21 +22,21 @@ pub struct SlashableAttestation {
impl SlashableAttestation { impl SlashableAttestation {
/// Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target. /// Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target.
/// ///
/// Spec v0.3.0 /// Spec v0.4.0
pub fn is_double_vote(&self, other: &SlashableAttestation, spec: &ChainSpec) -> bool { pub fn is_double_vote(&self, other: &SlashableAttestation, spec: &ChainSpec) -> bool {
self.data.slot.epoch(spec.epoch_length) == other.data.slot.epoch(spec.epoch_length) self.data.slot.epoch(spec.slots_per_epoch) == other.data.slot.epoch(spec.slots_per_epoch)
} }
/// Check if ``attestation_data_1`` surrounds ``attestation_data_2``. /// Check if ``attestation_data_1`` surrounds ``attestation_data_2``.
/// ///
/// Spec v0.3.0 /// Spec v0.4.0
pub fn is_surround_vote(&self, other: &SlashableAttestation, spec: &ChainSpec) -> bool { pub fn is_surround_vote(&self, other: &SlashableAttestation, spec: &ChainSpec) -> bool {
let source_epoch_1 = self.data.justified_epoch; let source_epoch_1 = self.data.justified_epoch;
let source_epoch_2 = other.data.justified_epoch; let source_epoch_2 = other.data.justified_epoch;
let target_epoch_1 = self.data.slot.epoch(spec.epoch_length); let target_epoch_1 = self.data.slot.epoch(spec.slots_per_epoch);
let target_epoch_2 = other.data.slot.epoch(spec.epoch_length); let target_epoch_2 = other.data.slot.epoch(spec.slots_per_epoch);
(source_epoch_1 < source_epoch_2) && (target_epoch_2 < target_epoch_1) (source_epoch_1 < source_epoch_2) & (target_epoch_2 < target_epoch_1)
} }
} }
@ -39,6 +46,7 @@ mod tests {
use crate::chain_spec::ChainSpec; use crate::chain_spec::ChainSpec;
use crate::slot_epoch::{Epoch, Slot}; use crate::slot_epoch::{Epoch, Slot};
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test] #[test]
pub fn test_is_double_vote_true() { pub fn test_is_double_vote_true() {
@ -112,7 +120,28 @@ mod tests {
); );
} }
ssz_tests!(SlashableAttestation); #[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = SlashableAttestation::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = SlashableAttestation::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
fn create_slashable_attestation( fn create_slashable_attestation(
slot_factor: u64, slot_factor: u64,
@ -122,7 +151,7 @@ mod tests {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let mut slashable_vote = SlashableAttestation::random_for_test(&mut rng); let mut slashable_vote = SlashableAttestation::random_for_test(&mut rng);
slashable_vote.data.slot = Slot::new(slot_factor * spec.epoch_length); slashable_vote.data.slot = Slot::new(slot_factor * spec.slots_per_epoch);
slashable_vote.data.justified_epoch = Epoch::new(justified_epoch); slashable_vote.data.justified_epoch = Epoch::new(justified_epoch);
slashable_vote slashable_vote
} }

View File

@ -35,8 +35,8 @@ impl Slot {
Slot(slot) Slot(slot)
} }
pub fn epoch(self, epoch_length: u64) -> Epoch { pub fn epoch(self, slots_per_epoch: u64) -> Epoch {
Epoch::from(self.0 / epoch_length) Epoch::from(self.0 / slots_per_epoch)
} }
pub fn height(self, genesis_slot: Slot) -> SlotHeight { pub fn height(self, genesis_slot: Slot) -> SlotHeight {
@ -57,24 +57,24 @@ impl Epoch {
Epoch(u64::max_value()) Epoch(u64::max_value())
} }
pub fn start_slot(self, epoch_length: u64) -> Slot { pub fn start_slot(self, slots_per_epoch: u64) -> Slot {
Slot::from(self.0.saturating_mul(epoch_length)) Slot::from(self.0.saturating_mul(slots_per_epoch))
} }
pub fn end_slot(self, epoch_length: u64) -> Slot { pub fn end_slot(self, slots_per_epoch: u64) -> Slot {
Slot::from( Slot::from(
self.0 self.0
.saturating_add(1) .saturating_add(1)
.saturating_mul(epoch_length) .saturating_mul(slots_per_epoch)
.saturating_sub(1), .saturating_sub(1),
) )
} }
pub fn slot_iter(&self, epoch_length: u64) -> SlotIter { pub fn slot_iter(&self, slots_per_epoch: u64) -> SlotIter {
SlotIter { SlotIter {
current_iteration: 0, current_iteration: 0,
epoch: self, epoch: self,
epoch_length, slots_per_epoch,
} }
} }
} }
@ -82,17 +82,17 @@ impl Epoch {
pub struct SlotIter<'a> { pub struct SlotIter<'a> {
current_iteration: u64, current_iteration: u64,
epoch: &'a Epoch, epoch: &'a Epoch,
epoch_length: u64, slots_per_epoch: u64,
} }
impl<'a> Iterator for SlotIter<'a> { impl<'a> Iterator for SlotIter<'a> {
type Item = Slot; type Item = Slot;
fn next(&mut self) -> Option<Slot> { fn next(&mut self) -> Option<Slot> {
if self.current_iteration >= self.epoch_length { if self.current_iteration >= self.slots_per_epoch {
None None
} else { } else {
let start_slot = self.epoch.start_slot(self.epoch_length); let start_slot = self.epoch.start_slot(self.slots_per_epoch);
let previous = self.current_iteration; let previous = self.current_iteration;
self.current_iteration += 1; self.current_iteration += 1;
Some(start_slot + previous) Some(start_slot + previous)
@ -103,6 +103,8 @@ impl<'a> Iterator for SlotIter<'a> {
#[cfg(test)] #[cfg(test)]
mod slot_tests { mod slot_tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode;
all_tests!(Slot); all_tests!(Slot);
} }
@ -110,23 +112,25 @@ mod slot_tests {
#[cfg(test)] #[cfg(test)]
mod epoch_tests { mod epoch_tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode;
all_tests!(Epoch); all_tests!(Epoch);
#[test] #[test]
fn slot_iter() { fn slot_iter() {
let epoch_length = 8; let slots_per_epoch = 8;
let epoch = Epoch::new(0); let epoch = Epoch::new(0);
let mut slots = vec![]; let mut slots = vec![];
for slot in epoch.slot_iter(epoch_length) { for slot in epoch.slot_iter(slots_per_epoch) {
slots.push(slot); slots.push(slot);
} }
assert_eq!(slots.len(), epoch_length as usize); assert_eq!(slots.len(), slots_per_epoch as usize);
for i in 0..epoch_length { for i in 0..slots_per_epoch {
assert_eq!(Slot::from(i), slots[i as usize]) assert_eq!(Slot::from(i), slots[i as usize])
} }
} }

View File

@ -20,26 +20,6 @@ macro_rules! impl_from_into_u64 {
}; };
} }
// need to truncate for some fork-choice algorithms
#[allow(unused_macros)]
macro_rules! impl_into_u32 {
($main: ident) => {
impl Into<u32> for $main {
fn into(self) -> u32 {
assert!(self.0 < u64::from(std::u32::MAX), "Lossy conversion to u32");
self.0 as u32
}
}
impl $main {
pub fn as_u32(&self) -> u32 {
assert!(self.0 < u64::from(std::u32::MAX), "Lossy conversion to u32");
self.0 as u32
}
}
};
}
macro_rules! impl_from_into_usize { macro_rules! impl_from_into_usize {
($main: ident) => { ($main: ident) => {
impl From<usize> for $main { impl From<usize> for $main {
@ -268,7 +248,7 @@ macro_rules! impl_common {
} }
// test macros // test macros
#[cfg(test)] #[allow(unused_macros)]
macro_rules! new_tests { macro_rules! new_tests {
($type: ident) => { ($type: ident) => {
#[test] #[test]
@ -280,7 +260,7 @@ macro_rules! new_tests {
}; };
} }
#[cfg(test)] #[allow(unused_macros)]
macro_rules! from_into_tests { macro_rules! from_into_tests {
($type: ident, $other: ident) => { ($type: ident, $other: ident) => {
#[test] #[test]
@ -306,7 +286,7 @@ macro_rules! from_into_tests {
}; };
} }
#[cfg(test)] #[allow(unused_macros)]
macro_rules! math_between_tests { macro_rules! math_between_tests {
($type: ident, $other: ident) => { ($type: ident, $other: ident) => {
#[test] #[test]
@ -454,7 +434,7 @@ macro_rules! math_between_tests {
}; };
} }
#[cfg(test)] #[allow(unused_macros)]
macro_rules! math_tests { macro_rules! math_tests {
($type: ident) => { ($type: ident) => {
#[test] #[test]
@ -548,7 +528,35 @@ macro_rules! math_tests {
}; };
} }
#[cfg(test)] #[allow(unused_macros)]
macro_rules! ssz_tests {
($type: ident) => {
#[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = $type::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = $type::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = $type::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
};
}
#[allow(unused_macros)]
macro_rules! all_tests { macro_rules! all_tests {
($type: ident) => { ($type: ident) => {
new_tests!($type); new_tests!($type);

View File

@ -23,8 +23,8 @@ impl SlotHeight {
Slot::from(self.0.saturating_add(genesis_slot.as_u64())) Slot::from(self.0.saturating_add(genesis_slot.as_u64()))
} }
pub fn epoch(self, genesis_slot: u64, epoch_length: u64) -> Epoch { pub fn epoch(self, genesis_slot: u64, slots_per_epoch: u64) -> Epoch {
Epoch::from(self.0.saturating_add(genesis_slot) / epoch_length) Epoch::from(self.0.saturating_add(genesis_slot) / slots_per_epoch)
} }
pub fn max_value() -> SlotHeight { pub fn max_value() -> SlotHeight {
@ -33,8 +33,11 @@ impl SlotHeight {
} }
#[cfg(test)] #[cfg(test)]
mod slot_height_tests { mod slot_height_tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode;
all_tests!(SlotHeight); all_tests!(SlotHeight);
} }

View File

@ -6,8 +6,6 @@ pub mod address;
pub mod aggregate_signature; pub mod aggregate_signature;
pub mod bitfield; pub mod bitfield;
pub mod hash256; pub mod hash256;
#[macro_use]
mod macros;
pub mod public_key; pub mod public_key;
pub mod secret_key; pub mod secret_key;
pub mod signature; pub mod signature;
@ -19,6 +17,12 @@ where
fn random_for_test(rng: &mut T) -> Self; fn random_for_test(rng: &mut T) -> Self;
} }
impl<T: RngCore> TestRandom<T> for bool {
fn random_for_test(rng: &mut T) -> Self {
(rng.next_u32() % 2) == 1
}
}
impl<T: RngCore> TestRandom<T> for u64 { impl<T: RngCore> TestRandom<T> for u64 {
fn random_for_test(rng: &mut T) -> Self { fn random_for_test(rng: &mut T) -> Self {
rng.next_u64() rng.next_u64()

View File

@ -0,0 +1,52 @@
use super::Slot;
use crate::test_utils::TestRandom;
use bls::{PublicKey, Signature};
use rand::RngCore;
use serde_derive::Serialize;
use ssz::TreeHash;
use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
use test_random_derive::TestRandom;
/// The data submitted to the deposit contract.
///
/// Spec v0.4.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)]
pub struct Transfer {
pub from: u64,
pub to: u64,
pub amount: u64,
pub fee: u64,
pub slot: Slot,
pub pubkey: PublicKey,
pub signature: Signature,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Transfer::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Transfer::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
}

View File

@ -1,56 +1,21 @@
use crate::{test_utils::TestRandom, Epoch, Hash256, PublicKey}; use crate::{test_utils::TestRandom, Epoch, Hash256, PublicKey};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom;
const STATUS_FLAG_INITIATED_EXIT: u8 = 1; /// Information about a `BeaconChain` validator.
const STATUS_FLAG_WITHDRAWABLE: u8 = 2; ///
/// Spec v0.4.0
#[derive(Debug, PartialEq, Clone, Copy, Serialize)] #[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TestRandom, TreeHash)]
pub enum StatusFlags {
InitiatedExit,
Withdrawable,
}
struct StatusFlagsDecodeError;
impl From<StatusFlagsDecodeError> for DecodeError {
fn from(_: StatusFlagsDecodeError) -> DecodeError {
DecodeError::Invalid
}
}
/// Handles the serialization logic for the `status_flags` field of the `Validator`.
fn status_flag_to_byte(flag: Option<StatusFlags>) -> u8 {
if let Some(flag) = flag {
match flag {
StatusFlags::InitiatedExit => STATUS_FLAG_INITIATED_EXIT,
StatusFlags::Withdrawable => STATUS_FLAG_WITHDRAWABLE,
}
} else {
0
}
}
/// Handles the deserialization logic for the `status_flags` field of the `Validator`.
fn status_flag_from_byte(flag: u8) -> Result<Option<StatusFlags>, StatusFlagsDecodeError> {
match flag {
0 => Ok(None),
1 => Ok(Some(StatusFlags::InitiatedExit)),
2 => Ok(Some(StatusFlags::Withdrawable)),
_ => Err(StatusFlagsDecodeError),
}
}
#[derive(Debug, Clone, PartialEq, Serialize)]
pub struct Validator { pub struct Validator {
pub pubkey: PublicKey, pub pubkey: PublicKey,
pub withdrawal_credentials: Hash256, pub withdrawal_credentials: Hash256,
pub activation_epoch: Epoch, pub activation_epoch: Epoch,
pub exit_epoch: Epoch, pub exit_epoch: Epoch,
pub withdrawal_epoch: Epoch, pub withdrawable_epoch: Epoch,
pub penalized_epoch: Epoch, pub initiated_exit: bool,
pub status_flags: Option<StatusFlags>, pub slashed: bool,
} }
impl Validator { impl Validator {
@ -64,14 +29,9 @@ impl Validator {
self.exit_epoch <= epoch self.exit_epoch <= epoch
} }
/// Returns `true` if the validator is considered penalized at some epoch. /// Returns `true` if the validator is able to withdraw at some epoch.
pub fn is_penalized_at(&self, epoch: Epoch) -> bool { pub fn is_withdrawable_at(&self, epoch: Epoch) -> bool {
self.penalized_epoch <= epoch self.withdrawable_epoch <= epoch
}
/// Returns `true` if the validator is considered penalized at some epoch.
pub fn has_initiated_exit(&self) -> bool {
self.status_flags == Some(StatusFlags::InitiatedExit)
} }
} }
@ -83,85 +43,9 @@ impl Default for Validator {
withdrawal_credentials: Hash256::default(), withdrawal_credentials: Hash256::default(),
activation_epoch: Epoch::from(std::u64::MAX), activation_epoch: Epoch::from(std::u64::MAX),
exit_epoch: Epoch::from(std::u64::MAX), exit_epoch: Epoch::from(std::u64::MAX),
withdrawal_epoch: Epoch::from(std::u64::MAX), withdrawable_epoch: Epoch::from(std::u64::MAX),
penalized_epoch: Epoch::from(std::u64::MAX), initiated_exit: false,
status_flags: None, slashed: false,
}
}
}
impl<T: RngCore> TestRandom<T> for StatusFlags {
fn random_for_test(rng: &mut T) -> Self {
let options = vec![StatusFlags::InitiatedExit, StatusFlags::Withdrawable];
options[(rng.next_u32() as usize) % options.len()]
}
}
impl Encodable for Validator {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.pubkey);
s.append(&self.withdrawal_credentials);
s.append(&self.activation_epoch);
s.append(&self.exit_epoch);
s.append(&self.withdrawal_epoch);
s.append(&self.penalized_epoch);
s.append(&status_flag_to_byte(self.status_flags));
}
}
impl Decodable for Validator {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (pubkey, i) = <_>::ssz_decode(bytes, i)?;
let (withdrawal_credentials, i) = <_>::ssz_decode(bytes, i)?;
let (activation_epoch, i) = <_>::ssz_decode(bytes, i)?;
let (exit_epoch, i) = <_>::ssz_decode(bytes, i)?;
let (withdrawal_epoch, i) = <_>::ssz_decode(bytes, i)?;
let (penalized_epoch, i) = <_>::ssz_decode(bytes, i)?;
let (status_flags_byte, i): (u8, usize) = <_>::ssz_decode(bytes, i)?;
let status_flags = status_flag_from_byte(status_flags_byte)?;
Ok((
Self {
pubkey,
withdrawal_credentials,
activation_epoch,
exit_epoch,
withdrawal_epoch,
penalized_epoch,
status_flags,
},
i,
))
}
}
impl TreeHash for Validator {
fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![];
result.append(&mut self.pubkey.hash_tree_root_internal());
result.append(&mut self.withdrawal_credentials.hash_tree_root_internal());
result.append(&mut self.activation_epoch.hash_tree_root_internal());
result.append(&mut self.exit_epoch.hash_tree_root_internal());
result.append(&mut self.withdrawal_epoch.hash_tree_root_internal());
result.append(&mut self.penalized_epoch.hash_tree_root_internal());
result.append(
&mut u64::from(status_flag_to_byte(self.status_flags)).hash_tree_root_internal(),
);
hash(&result)
}
}
impl<T: RngCore> TestRandom<T> for Validator {
fn random_for_test(rng: &mut T) -> Self {
Self {
pubkey: <_>::random_for_test(rng),
withdrawal_credentials: <_>::random_for_test(rng),
activation_epoch: <_>::random_for_test(rng),
exit_epoch: <_>::random_for_test(rng),
withdrawal_epoch: <_>::random_for_test(rng),
penalized_epoch: <_>::random_for_test(rng),
status_flags: Some(<_>::random_for_test(rng)),
} }
} }
} }
@ -170,6 +54,18 @@ impl<T: RngCore> TestRandom<T> for Validator {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Validator::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test] #[test]
fn test_validator_can_be_active() { fn test_validator_can_be_active() {
@ -194,5 +90,15 @@ mod tests {
} }
} }
ssz_tests!(Validator); #[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Validator::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -4,6 +4,8 @@ use super::validator::*;
use crate::Epoch; use crate::Epoch;
/// Given an indexed sequence of `validators`, return the indices corresponding to validators that are active at `epoch`. /// Given an indexed sequence of `validators`, return the indices corresponding to validators that are active at `epoch`.
///
/// Spec v0.4.0
pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> Vec<usize> { pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> Vec<usize> {
validators validators
.iter() .iter()

View File

@ -0,0 +1,47 @@
use crate::{test_utils::TestRandom, Epoch};
use bls::Signature;
use rand::RngCore;
use serde_derive::Serialize;
use ssz::TreeHash;
use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
use test_random_derive::TestRandom;
/// An exit voluntarily submitted a validator who wishes to withdraw.
///
/// Spec v0.4.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)]
pub struct VoluntaryExit {
pub epoch: Epoch,
pub validator_index: u64,
pub signature: Signature,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = VoluntaryExit::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = VoluntaryExit::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
}

View File

@ -17,6 +17,7 @@ pub use crate::signature::Signature;
pub const BLS_AGG_SIG_BYTE_SIZE: usize = 96; pub const BLS_AGG_SIG_BYTE_SIZE: usize = 96;
use hashing::hash;
use ssz::ssz_encode; use ssz::ssz_encode;
/// For some signature and public key, ensure that the signature message was the public key and it /// For some signature and public key, ensure that the signature message was the public key and it
@ -33,6 +34,15 @@ pub fn create_proof_of_possession(keypair: &Keypair) -> Signature {
Signature::new(&ssz_encode(&keypair.pk), 0, &keypair.sk) Signature::new(&ssz_encode(&keypair.pk), 0, &keypair.sk)
} }
/// Returns the withdrawal credentials for a given public key.
pub fn get_withdrawal_credentials(pubkey: &PublicKey, prefix_byte: u8) -> Vec<u8> {
let hashed = hash(&ssz_encode(pubkey));
let mut prefixed = vec![prefix_byte];
prefixed.extend_from_slice(&hashed[1..]);
prefixed
}
pub fn bls_verify_aggregate( pub fn bls_verify_aggregate(
pubkey: &AggregatePublicKey, pubkey: &AggregatePublicKey,
message: &[u8], message: &[u8],

View File

@ -25,14 +25,14 @@ fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usi
let mut merkle_root = leaf.as_bytes().to_vec(); let mut merkle_root = leaf.as_bytes().to_vec();
for i in 0..depth { for (i, leaf) in branch.iter().enumerate().take(depth) {
let ith_bit = (index >> i) & 0x01; let ith_bit = (index >> i) & 0x01;
if ith_bit == 1 { if ith_bit == 1 {
let input = concat(branch[i].as_bytes().to_vec(), merkle_root); let input = concat(leaf.as_bytes().to_vec(), merkle_root);
merkle_root = hash(&input); merkle_root = hash(&input);
} else { } else {
let mut input = merkle_root; let mut input = merkle_root;
input.extend_from_slice(branch[i].as_bytes()); input.extend_from_slice(leaf.as_bytes());
merkle_root = hash(&input); merkle_root = hash(&input);
} }
} }

View File

@ -12,6 +12,7 @@ extern crate ethereum_types;
pub mod decode; pub mod decode;
pub mod encode; pub mod encode;
mod signed_root;
pub mod tree_hash; pub mod tree_hash;
mod impl_decode; mod impl_decode;
@ -20,6 +21,7 @@ mod impl_tree_hash;
pub use crate::decode::{decode_ssz, decode_ssz_list, Decodable, DecodeError}; pub use crate::decode::{decode_ssz, decode_ssz_list, Decodable, DecodeError};
pub use crate::encode::{Encodable, SszStream}; pub use crate::encode::{Encodable, SszStream};
pub use crate::signed_root::SignedRoot;
pub use crate::tree_hash::{merkle_hash, TreeHash}; pub use crate::tree_hash::{merkle_hash, TreeHash};
pub use hashing::hash; pub use hashing::hash;

View File

@ -0,0 +1,5 @@
use crate::TreeHash;
pub trait SignedRoot: TreeHash {
fn signed_root(&self) -> Vec<u8>;
}

View File

@ -158,3 +158,78 @@ pub fn ssz_tree_hash_derive(input: TokenStream) -> TokenStream {
}; };
output.into() output.into()
} }
/// Returns `true` if some `Ident` should be considered to be a signature type.
fn type_ident_is_signature(ident: &syn::Ident) -> bool {
match ident.to_string().as_ref() {
"Signature" => true,
"AggregateSignature" => true,
_ => false,
}
}
/// Takes a `Field` where the type (`ty`) portion is a path (e.g., `types::Signature`) and returns
/// the final `Ident` in that path.
///
/// E.g., for `types::Signature` returns `Signature`.
fn final_type_ident(field: &syn::Field) -> &syn::Ident {
match &field.ty {
syn::Type::Path(path) => &path.path.segments.last().unwrap().value().ident,
_ => panic!("ssz_derive only supports Path types."),
}
}
/// Implements `ssz::TreeHash` for some `struct`, whilst excluding any fields following and
/// including a field that is of type "Signature" or "AggregateSignature".
///
/// See:
/// https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md#signed-roots
///
/// This is a rather horrendous macro, it will read the type of the object as a string and decide
/// if it's a signature by matching that string against "Signature" or "AggregateSignature". So,
/// it's important that you use those exact words as your type -- don't alias it to something else.
///
/// If you can think of a better way to do this, please make an issue!
///
/// Fields are processed in the order they are defined.
#[proc_macro_derive(SignedRoot)]
pub fn ssz_signed_root_derive(input: TokenStream) -> TokenStream {
let item = parse_macro_input!(input as DeriveInput);
let name = &item.ident;
let struct_data = match &item.data {
syn::Data::Struct(s) => s,
_ => panic!("ssz_derive only supports structs."),
};
let mut field_idents: Vec<&syn::Ident> = vec![];
for field in struct_data.fields.iter() {
let final_type_ident = final_type_ident(&field);
if type_ident_is_signature(final_type_ident) {
break;
} else {
let ident = field
.ident
.as_ref()
.expect("ssz_derive only supports named_struct fields.");
field_idents.push(ident);
}
}
let output = quote! {
impl ssz::SignedRoot for #name {
fn signed_root(&self) -> Vec<u8> {
let mut list: Vec<Vec<u8>> = Vec::new();
#(
list.push(self.#field_idents.hash_tree_root_internal());
)*
ssz::merkle_hash(&mut list)
}
}
};
output.into()
}

View File

@ -63,7 +63,8 @@ impl BeaconNode for BeaconBlockGrpcClient {
attester_slashings: vec![], attester_slashings: vec![],
attestations: vec![], attestations: vec![],
deposits: vec![], deposits: vec![],
exits: vec![], voluntary_exits: vec![],
transfers: vec![],
}, },
})) }))
} else { } else {

View File

@ -50,6 +50,9 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducerServi
Ok(BlockProducerPollOutcome::ValidatorIsUnknown(slot)) => { Ok(BlockProducerPollOutcome::ValidatorIsUnknown(slot)) => {
error!(self.log, "The Beacon Node does not recognise the validator"; "slot" => slot) error!(self.log, "The Beacon Node does not recognise the validator"; "slot" => slot)
} }
Ok(BlockProducerPollOutcome::UnableToGetFork(slot)) => {
error!(self.log, "Unable to get a `Fork` struct to generate signature domains"; "slot" => slot)
}
}; };
std::thread::sleep(Duration::from_millis(self.poll_interval_millis)); std::thread::sleep(Duration::from_millis(self.poll_interval_millis));

View File

@ -1,7 +1,7 @@
use block_proposer::{DutiesReader, DutiesReaderError}; use block_proposer::{DutiesReader, DutiesReaderError};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::RwLock; use std::sync::RwLock;
use types::{Epoch, Slot}; use types::{Epoch, Fork, Slot};
/// The information required for a validator to propose and attest during some epoch. /// The information required for a validator to propose and attest during some epoch.
/// ///
@ -32,14 +32,14 @@ pub enum EpochDutiesMapError {
/// Maps an `epoch` to some `EpochDuties` for a single validator. /// Maps an `epoch` to some `EpochDuties` for a single validator.
pub struct EpochDutiesMap { pub struct EpochDutiesMap {
pub epoch_length: u64, pub slots_per_epoch: u64,
pub map: RwLock<HashMap<Epoch, EpochDuties>>, pub map: RwLock<HashMap<Epoch, EpochDuties>>,
} }
impl EpochDutiesMap { impl EpochDutiesMap {
pub fn new(epoch_length: u64) -> Self { pub fn new(slots_per_epoch: u64) -> Self {
Self { Self {
epoch_length, slots_per_epoch,
map: RwLock::new(HashMap::new()), map: RwLock::new(HashMap::new()),
} }
} }
@ -67,7 +67,7 @@ impl EpochDutiesMap {
impl DutiesReader for EpochDutiesMap { impl DutiesReader for EpochDutiesMap {
fn is_block_production_slot(&self, slot: Slot) -> Result<bool, DutiesReaderError> { fn is_block_production_slot(&self, slot: Slot) -> Result<bool, DutiesReaderError> {
let epoch = slot.epoch(self.epoch_length); let epoch = slot.epoch(self.slots_per_epoch);
let map = self.map.read().map_err(|_| DutiesReaderError::Poisoned)?; let map = self.map.read().map_err(|_| DutiesReaderError::Poisoned)?;
let duties = map let duties = map
@ -75,6 +75,17 @@ impl DutiesReader for EpochDutiesMap {
.ok_or_else(|| DutiesReaderError::UnknownEpoch)?; .ok_or_else(|| DutiesReaderError::UnknownEpoch)?;
Ok(duties.is_block_production_slot(slot)) Ok(duties.is_block_production_slot(slot))
} }
fn fork(&self) -> Result<Fork, DutiesReaderError> {
// TODO: this is garbage data.
//
// It will almost certainly cause signatures to fail verification.
Ok(Fork {
previous_version: 0,
current_version: 0,
epoch: Epoch::new(0),
})
}
} }
// TODO: add tests. // TODO: add tests.

View File

@ -61,7 +61,7 @@ impl<T: SlotClock, U: BeaconNode> DutiesManager<T, U> {
.map_err(|_| Error::SlotClockError)? .map_err(|_| Error::SlotClockError)?
.ok_or(Error::SlotUnknowable)?; .ok_or(Error::SlotUnknowable)?;
let epoch = slot.epoch(self.spec.epoch_length); let epoch = slot.epoch(self.spec.slots_per_epoch);
if let Some(duties) = self.beacon_node.request_shuffling(epoch, &self.pubkey)? { if let Some(duties) = self.beacon_node.request_shuffling(epoch, &self.pubkey)? {
// If these duties were known, check to see if they're updates or identical. // If these duties were known, check to see if they're updates or identical.
@ -112,7 +112,7 @@ mod tests {
#[test] #[test]
pub fn polling() { pub fn polling() {
let spec = Arc::new(ChainSpec::foundation()); let spec = Arc::new(ChainSpec::foundation());
let duties_map = Arc::new(EpochDutiesMap::new(spec.epoch_length)); let duties_map = Arc::new(EpochDutiesMap::new(spec.slots_per_epoch));
let keypair = Keypair::random(); let keypair = Keypair::random();
let slot_clock = Arc::new(TestingSlotClock::new(0)); let slot_clock = Arc::new(TestingSlotClock::new(0));
let beacon_node = Arc::new(TestBeaconNode::default()); let beacon_node = Arc::new(TestBeaconNode::default());

View File

@ -111,13 +111,13 @@ fn main() {
let genesis_time = 1_549_935_547; let genesis_time = 1_549_935_547;
let slot_clock = { let slot_clock = {
info!(log, "Genesis time"; "unix_epoch_seconds" => genesis_time); info!(log, "Genesis time"; "unix_epoch_seconds" => genesis_time);
let clock = SystemTimeSlotClock::new(genesis_time, spec.slot_duration) let clock = SystemTimeSlotClock::new(genesis_time, spec.seconds_per_slot)
.expect("Unable to instantiate SystemTimeSlotClock."); .expect("Unable to instantiate SystemTimeSlotClock.");
Arc::new(clock) Arc::new(clock)
}; };
let poll_interval_millis = spec.slot_duration * 1000 / 10; // 10% epoch time precision. let poll_interval_millis = spec.seconds_per_slot * 1000 / 10; // 10% epoch time precision.
info!(log, "Starting block producer service"; "polls_per_epoch" => spec.slot_duration * 1000 / poll_interval_millis); info!(log, "Starting block producer service"; "polls_per_epoch" => spec.seconds_per_slot * 1000 / poll_interval_millis);
/* /*
* Start threads. * Start threads.
@ -129,7 +129,7 @@ fn main() {
for keypair in keypairs { for keypair in keypairs {
info!(log, "Starting validator services"; "validator" => keypair.pk.concatenated_hex_id()); info!(log, "Starting validator services"; "validator" => keypair.pk.concatenated_hex_id());
let duties_map = Arc::new(EpochDutiesMap::new(spec.epoch_length)); let duties_map = Arc::new(EpochDutiesMap::new(spec.slots_per_epoch));
// Spawn a new thread to maintain the validator's `EpochDuties`. // Spawn a new thread to maintain the validator's `EpochDuties`.
let duties_manager_thread = { let duties_manager_thread = {